├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── feature_request.md
│ └── user-story.md
└── images
│ ├── ACM-example.png
│ └── S3-models.png
├── .gitignore
├── CONTRIBUTING.md
├── DEVELOPMENT.md
├── LICENSE
├── Makefile
├── OWNERS
├── README.md
├── THIRD-PARTY-NOTICES.txt
├── acm
├── odh-core
│ ├── acm-observability
│ │ ├── files
│ │ │ └── uwl_metrics_list.yaml
│ │ ├── grafana-dashboards
│ │ │ └── edge-inference-health.json
│ │ ├── kustomization.yaml
│ │ ├── multiclusterobservability.yaml
│ │ ├── namespace.yaml
│ │ └── secrets
│ │ │ └── thanos.yaml
│ ├── default
│ │ ├── kustomization.yaml
│ │ └── test-namespace.yaml
│ └── olm-operator-subscriptions
│ │ ├── kustomization.yaml
│ │ ├── namespace.yaml
│ │ ├── opendatahub-operator.yaml
│ │ ├── openshift-pipelines.yaml
│ │ └── operatorgroup.yaml
├── odh-edge
│ ├── apps
│ │ ├── bike-rental-app
│ │ │ ├── bike-rental-inference-generator-cronjob.yaml
│ │ │ ├── files
│ │ │ │ └── test-data.json
│ │ │ └── kustomization.yaml
│ │ ├── telemetry
│ │ │ ├── kustomization.yaml
│ │ │ ├── namespace.yaml
│ │ │ ├── otel-collector.yaml
│ │ │ ├── role-binding.yaml
│ │ │ └── role.yaml
│ │ └── tensorflow-housing-app
│ │ │ ├── files
│ │ │ └── test-data.json
│ │ │ ├── housing-inference-generator.yaml
│ │ │ └── kustomization.yaml
│ └── base
│ │ ├── files
│ │ └── uwl_metrics_list.yaml
│ │ ├── kustomization.yaml
│ │ ├── model-deployment.yaml
│ │ ├── model-route.yaml
│ │ ├── model-service.yaml
│ │ └── namespace.yaml
└── registration
│ ├── kustomization.yaml
│ └── near-edge
│ ├── base
│ ├── kustomization.yaml
│ ├── nameReference.yaml
│ └── near-edge.yaml
│ ├── kustomization.yaml
│ ├── openshift-gitops-gitopscluster.yaml
│ ├── openshift-gitops-managedclustersetbinding.yaml
│ ├── openshift-gitops-placement.yaml
│ ├── overlays
│ ├── bike-rental-app
│ │ └── kustomization.yaml
│ ├── opentelemetry
│ │ └── kustomization.yaml
│ └── tensorflow-housing-app
│ │ └── kustomization.yaml
│ └── test
│ ├── bike-rental-app
│ └── kustomization.yaml
│ ├── kustomization.yaml
│ └── tensorflow-housing-app
│ └── kustomization.yaml
├── byo-mgmt
└── registration
│ └── near-edge
│ ├── base
│ ├── argocd-application.yaml
│ └── kustomization.yaml
│ └── overlays
│ ├── bike-rental-app
│ └── kustomization.yaml
│ └── tensorflow-housing-app
│ └── kustomization.yaml
├── cli
├── Makefile
├── cmd
│ └── main.go
├── cobra.yaml
├── examples
│ └── params.yaml.sample
├── go.mod
├── go.sum
└── pkg
│ ├── commands
│ ├── common
│ │ ├── cmd.go
│ │ ├── errors.go
│ │ └── styles.go
│ ├── flags
│ │ └── flags.go
│ ├── images
│ │ ├── build.go
│ │ ├── describe.go
│ │ ├── images.go
│ │ ├── msgs.go
│ │ └── update.go
│ ├── models
│ │ ├── add.go
│ │ └── models.go
│ └── root.go
│ ├── edgeclient
│ ├── client.go
│ └── types.go
│ ├── httptest
│ └── server.go
│ ├── modelregistry
│ ├── client.go
│ ├── client_test.go
│ └── errors.go
│ └── pipelines
│ └── params.go
├── docs
├── byo-mgmt-gitops.md
├── cli.md
├── glossary.md
├── images
│ ├── edge-architecture.png
│ └── poc-interaction-diagram.png
└── preparing_the_infrastructure.md
├── examples
├── README.md
├── containerfiles
│ ├── Containerfile.ab-jq
│ ├── Containerfile.openvino.mlserver.mlflow
│ └── Containerfile.seldonio.mlserver.mlflow
├── model-upload
│ ├── Makefile
│ ├── local-model-pvc-template.yaml
│ └── local-model-to-pvc-pod-template.yaml
├── models
│ ├── bike-rentals-auto-ml
│ │ ├── MLmodel
│ │ ├── conda.yaml
│ │ ├── dataset.csv
│ │ ├── model.pkl
│ │ ├── python_env.yaml
│ │ ├── requirements.txt
│ │ └── test_data_generator.py
│ ├── lightgbm-iris
│ │ ├── README.md
│ │ ├── iris-lightgbm.bst
│ │ ├── model-settings.json
│ │ └── settings.json
│ ├── lightgbm-mushrooms
│ │ ├── README.md
│ │ ├── model-settings.json
│ │ ├── mushroom-lightgbm.bst
│ │ └── settings.json
│ ├── onnx-mnist
│ │ ├── 1
│ │ │ ├── mnist.onnx
│ │ │ └── schema
│ │ │ │ └── schema.json
│ │ └── README.md
│ ├── tensorflow-facedetection
│ │ ├── 1
│ │ │ ├── face-detection-retail-0004.bin
│ │ │ └── face-detection-retail-0004.xml
│ │ └── README.md
│ └── tensorflow-housing
│ │ ├── MLmodel
│ │ ├── README.md
│ │ ├── conda.yaml
│ │ ├── convert_csv_to_json.py
│ │ ├── dataset.csv
│ │ ├── python_env.yaml
│ │ ├── requirements.txt
│ │ └── tf2model
│ │ ├── fingerprint.pb
│ │ ├── saved_model.pb
│ │ └── variables
│ │ ├── variables.data-00000-of-00001
│ │ └── variables.index
└── tekton
│ ├── aiedge-e2e
│ ├── example-pipelineruns
│ │ ├── git-fetch.tensorflow-housing.pipelinerun.yaml
│ │ └── s3-fetch.bike-rentals.pipelinerun.yaml
│ ├── templates
│ │ ├── credentials-git.secret.yaml.template
│ │ ├── credentials-image-registry.secret.yaml.template
│ │ ├── credentials-s3.secret.yaml.template
│ │ └── self-signed-cert.configmap.yaml.template
│ └── test-data
│ │ ├── bike-rentals-test-data-cm.yaml
│ │ ├── kustomization.yaml
│ │ └── tensorflow-housing-test-data-cm.yaml
│ └── gitops-update-pipeline
│ ├── example-pipelineruns
│ ├── gitops-update-pipelinerun-bike-rentals.yaml
│ ├── gitops-update-pipelinerun-json.yaml
│ └── gitops-update-pipelinerun-tensorflow-housing.yaml
│ ├── templates
│ └── example-git-credentials-secret.yaml.template
│ └── test
│ └── json
│ └── my-deployment.json
├── gitea
├── README.md
├── operator
│ ├── catalogsource.yaml
│ ├── kustomization.yaml
│ ├── namespace.yaml
│ ├── operatorgroup.yaml
│ └── subscription.yaml
└── server
│ ├── gitea.yaml
│ └── kustomization.yaml
├── manifests
├── README.md
├── kustomization.yaml
├── pipelines
│ ├── git-fetch-pipeline.yaml
│ ├── gitops-update-pipeline.yaml
│ ├── kustomization.yaml
│ └── s3-fetch-pipeline.yaml
└── tasks
│ ├── check-model-and-containerfile-exists
│ ├── README.md
│ └── check-model-and-containerfile-exists.yaml
│ ├── copy-model-from-pvc
│ ├── README.md
│ └── copy-model-from-pvc.yaml
│ ├── kserve-download-model
│ ├── README.md
│ └── kserve-download-model.yaml
│ ├── kustomization.yaml
│ ├── move-model-to-root-dir
│ ├── README.md
│ └── move-model-to-root-dir.yaml
│ ├── retrieve-build-image-info
│ ├── README.md
│ └── retrieve-build-image-info.yaml
│ ├── sanitise-object-name
│ ├── README.md
│ └── sanitise-object-name.yaml
│ ├── test-model-rest-svc
│ ├── README.md
│ └── test-model-rest-svc.yaml
│ └── yq-update
│ ├── README.md
│ └── yq-update.yaml
└── test
├── acm
├── bike-rental-app
│ └── kustomization.yaml
└── tensorflow-housing-app
│ └── kustomization.yaml
├── e2e-tests
├── README.md
├── go.mod
├── go.sum
├── support
│ ├── clients.go
│ ├── config.go
│ ├── git.go
│ ├── kustomize.go
│ ├── setup.go
│ ├── tekton.go
│ └── utils.go
├── template.config.json
└── tests
│ └── pipelines_test.go
├── gitops
├── bike-rental-app
│ └── kustomization.yaml
└── tensorflow-housing-app
│ └── kustomization.yaml
└── shell-pipeline-tests
├── README.md
├── common.sh
├── openvino-tensorflow-housing
└── pipelines-test-openvino-tensorflow-housing.sh
└── seldon-bike-rentals
└── pipelines-test-seldon-bike-rentals.sh
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a bug report
4 | title: "[BUG]: "
5 | labels: kind/bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Details
11 |
12 | ### Describe the bug
13 |
14 |
15 | ### To Reproduce
16 |
17 |
18 | ### Expected behavior
19 |
20 |
21 | ### Screenshots (if applicable)
22 |
23 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest a feature or improvement
4 | title: "[FEATURE]: "
5 | labels: kind/enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Details
11 |
12 | ### Is your feature request related to a problem? Please describe.
13 |
14 |
15 | ### Describe the use-case or expected workflow
16 |
17 |
18 | ### Describe alternatives you've considered
19 |
20 |
21 | ### Additional context
22 |
23 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/user-story.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: User story
3 | about: Create a user story and add it to the tracker
4 | title: "[STORY]: "
5 | labels: kind/userstory
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Details
11 |
12 | ### Description
13 |
14 |
15 |
16 | ### Sub-tasks
17 |
18 | - [ ] #(issue number)
19 |
--------------------------------------------------------------------------------
/.github/images/ACM-example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/opendatahub-io/ai-edge/62d03f0571e1448e556ed4ba2892c6e5f954ed1f/.github/images/ACM-example.png
--------------------------------------------------------------------------------
/.github/images/S3-models.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/opendatahub-io/ai-edge/62d03f0571e1448e556ed4ba2892c6e5f954ed1f/.github/images/S3-models.png
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # config used by e2e-tests with secrets, so do not track
2 | test/e2e-tests/config.json
3 |
4 | # Byte-compiled / optimized / DLL files
5 | __pycache__/
6 | *.py[cod]
7 | *$py.class
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | build/
15 | develop-eggs/
16 | dist/
17 | downloads/
18 | eggs/
19 | .eggs/
20 | lib/
21 | lib64/
22 | parts/
23 | sdist/
24 | var/
25 | wheels/
26 | share/python-wheels/
27 | *.egg-info/
28 | .installed.cfg
29 | *.egg
30 | MANIFEST
31 |
32 | # PyInstaller
33 | # Usually these files are written by a python script from a template
34 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
35 | *.manifest
36 | *.spec
37 |
38 | # Installer logs
39 | pip-log.txt
40 | pip-delete-this-directory.txt
41 |
42 | # Unit test / coverage reports
43 | htmlcov/
44 | .tox/
45 | .nox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | *.py,cover
53 | .hypothesis/
54 | .pytest_cache/
55 | cover/
56 |
57 | # Translations
58 | *.mo
59 | *.pot
60 |
61 | # Django stuff:
62 | *.log
63 | local_settings.py
64 | db.sqlite3
65 | db.sqlite3-journal
66 |
67 | # Flask stuff:
68 | instance/
69 | .webassets-cache
70 |
71 | # Scrapy stuff:
72 | .scrapy
73 |
74 | # Sphinx documentation
75 | docs/_build/
76 |
77 | # PyBuilder
78 | .pybuilder/
79 | target/
80 |
81 | # Jupyter Notebook
82 | .ipynb_checkpoints
83 |
84 | # IPython
85 | profile_default/
86 | ipython_config.py
87 |
88 | # pyenv
89 | # For a library or package, you might want to ignore these files since the code is
90 | # intended to run in multiple environments; otherwise, check them in:
91 | # .python-version
92 |
93 | # pipenv
94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
97 | # install all needed dependencies.
98 | #Pipfile.lock
99 |
100 | # poetry
101 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
102 | # This is especially recommended for binary packages to ensure reproducibility, and is more
103 | # commonly ignored for libraries.
104 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
105 | #poetry.lock
106 |
107 | # pdm
108 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
109 | #pdm.lock
110 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
111 | # in version control.
112 | # https://pdm.fming.dev/#use-with-ide
113 | .pdm.toml
114 |
115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
116 | __pypackages__/
117 |
118 | # Celery stuff
119 | celerybeat-schedule
120 | celerybeat.pid
121 |
122 | # SageMath parsed files
123 | *.sage.py
124 |
125 | # Environments
126 | .env
127 | .venv
128 | env/
129 | venv/
130 | ENV/
131 | env.bak/
132 | venv.bak/
133 |
134 | # Spyder project settings
135 | .spyderproject
136 | .spyproject
137 |
138 | # Rope project settings
139 | .ropeproject
140 |
141 | # mkdocs documentation
142 | /site
143 |
144 | # mypy
145 | .mypy_cache/
146 | .dmypy.json
147 | dmypy.json
148 |
149 | # Pyre type checker
150 | .pyre/
151 |
152 | # pytype static type analyzer
153 | .pytype/
154 |
155 | # Cython debug symbols
156 | cython_debug/
157 |
158 | # PyCharm
159 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
160 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161 | # and can be added to the global gitignore or merged into this file. For a more nuclear
162 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163 | #.idea/
164 |
165 | *[.-]secret.yaml
166 | *[.-]secret.yml
167 | aws-storage-config
168 | *-overridden.yaml
169 | *-overridden.yml
170 | oc-debug-pod.yaml
171 |
172 |
173 | # Local Makefile variable override file name.
174 | *local.vars.mk
175 |
176 | # The odh binary
177 | cli/odh
178 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guide
2 |
3 | The following [`opendatahub-io/ai-edge` GitHub Project board](https://github.com/orgs/opendatahub-io/projects/47) is the source of truth for the work taking place in this repo. You can pick up an issue from the TO DO column and follow the workflow described below.
4 |
5 |
6 |
7 | 
8 |
9 |
10 |
11 | Issues with the `Tracker` label are the high-level longer-term tasks for the PoC, with smaller _“Sub Tasks”_ often listed in the description that should be completed to achieve this goal. These link to other GitHub issues.
12 |
13 |
14 |
15 | 
16 |
17 |
18 |
19 | ## Typical Workflow
20 |
21 | The easiest way to start contributing is to work on these smaller _“Sub Tasks”_.
22 |
23 |
24 |
25 | > **NOTE**
26 | > By contributing you agree to the license terms (see [LICENSE](./LICENSE)).
27 |
28 |
29 |
30 | The general flow of making contributions to the repo goes like this:
31 |
32 | - Choose an issue to work on.
33 |
34 | - [Assign it to yourself](https://docs.github.com/en/issues/tracking-your-work-with-issues/assigning-issues-and-pull-requests-to-other-github-users).
35 | - If the description is not very detailed, you can improve it yourself. Add a suitable description, link to a User Story, and add acceptance criteria (see next page).
36 | - Add labels and other details e.g. the priority and `kind/documentation` if you will be adding documentation or modifying existing `README` files.
37 | - If there is a roadblock to completing the issue, reach out on the relevant OpenDataHub Slack channels ([invite](https://github.com/opendatahub-io/opendatahub-community)). Someone will gladly try to help.
38 | - Sometimes a task or user story ends up not being fully possible to achieve, at least in the way it was intended (like [in this case](https://github.com/opendatahub-io/ai-edge/issues/17)). This is okay, make sure to reach out to others for help instead of trying to do the impossible.
39 | - If your work involves coding (which it probably does) please use the following Git approach:
40 | - Make a fork of the [`opendatahub-io/ai-edge`](https://github.com/opendatahub-io/ai-edge) repo.
41 |
42 | - Create a new branch with a suitable name that describes your work.
43 | - Create and push commits regularly to save your progress.
44 | - When you’re ready to make a pull request with your changes, first clean-up by rebasing and squashing the commits. Make sure to use clear and descriptive commit messages.
45 | - Rebasing and squashing can be tricky so be careful while performing it. You can also learn more about [squashing commits with rebase](https://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html).
46 | - Create the pull request, apply the appropriate labels, and link the relevant GitHub issue to it. Also make sure to include a good description of your changes and screenshots if appropriate.
47 | - Wait for other members of the team to review your work, you can also tag team members who you think are relevant to your work.
48 | - Once any conflicts and code suggestions have been resolved, and your work has been approved, you can merge the pull request or wait for somebody else to merge it for you.
49 |
50 |
51 |
52 | ## Examples
53 |
54 |
55 |
56 |
Typical “Sub Task” GitHub issue:
57 |
58 | 
59 |
60 |
61 |
62 | Typical Pull Request:
63 |
64 | 
65 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # Read any custom variables overrides from a local.vars.mk file. This will only be read if it exists in the
2 | # same directory as this Makefile. Variables can be specified in the standard format supported by
3 | # GNU Make since include process Makefiles
4 | # Standard variables override would include anything you would pass at runtime that is different from the defaults specified in this file
5 | MAKE_ENV_FILE = local.vars.mk
6 | -include $(MAKE_ENV_FILE)
7 |
8 | .PHONY: setup-observability test-acm-%-generate test go-test
9 |
10 | # Setup the secret required for observability then generate and apply the files
11 | setup-observability: acm/odh-core/acm-observability/kustomization.yaml
12 | oc apply -k acm/odh-core/acm-observability
13 |
14 | # Generate app manifests using ACM GitOps flow, from custom GitHub org/branch, to custom namespace
15 | # Example invocations:
16 | # make -s -e GIT_REPO_URL="https\://github.com/opendatahub-io/ai-edge" GIT_BRANCH=my-git-branch CUSTOM_PREFIX=custom- CUSTOM_APP_NAMESPACE=custom-bike-rental-app test-acm-bike-rental-app-generate
17 | # make -s -e GIT_REPO_URL="https\://github.com/opendatahub-io/ai-edge" GIT_BRANCH=my-git-branch CUSTOM_PREFIX=custom- CUSTOM_APP_NAMESPACE=custom-tensorflow-housing test-acm-tensorflow-housing-app-generate
18 | test-acm-%-generate: test/acm/%/kustomization.yaml
19 | ifndef GIT_REPO_URL
20 | $(error GIT_REPO_URL is undefined)
21 | endif
22 | ifndef GIT_BRANCH
23 | $(error GIT_BRANCH is undefined)
24 | endif
25 | ifndef CUSTOM_PREFIX
26 | $(error CUSTOM_PREFIX is undefined)
27 | endif
28 | ifndef CUSTOM_APP_NAMESPACE
29 | $(error CUSTOM_APP_NAMESPACE is undefined)
30 | endif
31 | oc kustomize test/acm/$(subst -generate,,$(subst test-acm-,,$@))/ | sed -e "s|https://github.com/opendatahub-io/ai-edge|$(GIT_REPO_URL)|g" -e "s|my-git-branch|$(GIT_BRANCH)|g" -e "s|custom-prefix-|$(CUSTOM_PREFIX)|g" -e "s|custom-app-namespace|$(CUSTOM_APP_NAMESPACE)|g"
32 |
33 | GO=go
34 | GOFLAGS=""
35 |
36 | go-test:
37 | (cd test/e2e-tests/tests && ${GO} test -timeout 60m -shuffle off)
38 |
39 | test:
40 | ${MAKE} -C cli cli-test
41 | @(./test/shell-pipeline-tests/seldon-bike-rentals/pipelines-test-seldon-bike-rentals.sh)
42 | @(./test/shell-pipeline-tests/openvino-tensorflow-housing/pipelines-test-openvino-tensorflow-housing.sh)
43 |
44 | # This is a generic target to forward any cli-* targets to the cli Makefile
45 | cli-%:
46 | ${MAKE} -C cli $@
47 |
--------------------------------------------------------------------------------
/OWNERS:
--------------------------------------------------------------------------------
1 | approvers:
2 | - adelton
3 | - devguyio
4 | - grdryn
5 | - lavlas
6 | - MarianMacik
7 | - apodhrad
8 |
9 | reviewers:
10 | - biswassri
11 | - jackdelahunt
12 | - Sara4994
13 | - steventobin
14 |
--------------------------------------------------------------------------------
/THIRD-PARTY-NOTICES.txt:
--------------------------------------------------------------------------------
1 | This repository uses third-party software and datasets.
2 |
3 | Bike Sharing Dataset
4 | ====================
5 | Fanaee-T,Hadi. (2013). Bike Sharing Dataset. UCI Machine Learning Repository. https://doi.org/10.24432/C5W894 under (CC BY 4.0) license.
6 |
7 | California Housing Dataset
8 | ====================
9 | Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions, Statistics and Probability Letters, 33 (1997) 291-297. https://scikit-learn.org/stable/datasets/real_world.html#california-housing-dataset.
10 |
--------------------------------------------------------------------------------
/acm/odh-core/acm-observability/files/uwl_metrics_list.yaml:
--------------------------------------------------------------------------------
1 | names:
2 | # MLServer metrics
3 | - parallel_request_queue_sum
4 | - parallel_request_queue_bucket
5 | - parallel_request_queue_count
6 | - rest_server_request_duration_seconds_sum
7 | - rest_server_request_duration_seconds_bucket
8 | - rest_server_request_duration_seconds_count
9 | - rest_server_requests_in_progress
10 | - rest_server_requests_total
11 | - ovms_infer_req_active
12 | - ovms_infer_req_queue_size
13 | - ovms_inference_time_us_bucket
14 | - ovms_inference_time_us_count
15 | - ovms_inference_time_us_sum
16 | - ovms_request_time_us_bucket
17 | - ovms_request_time_us_count
18 | - ovms_request_time_us_sum
19 | - ovms_requests_fail
20 | - ovms_requests_success
21 | - ovms_streams
22 | - ovms_wait_for_infer_req_time_us_bucket
23 | - ovms_wait_for_infer_req_time_us_count
24 | - ovms_wait_for_infer_req_time_us_sum
25 |
--------------------------------------------------------------------------------
/acm/odh-core/acm-observability/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - namespace.yaml
6 | - multiclusterobservability.yaml
7 |
8 | namespace: open-cluster-management-observability
9 |
10 | configMapGenerator:
11 | - name: grafana-dashboard-edge-inference-health
12 | files:
13 | - grafana-dashboards/edge-inference-health.json
14 | options:
15 | annotations:
16 | observability.open-cluster-management.io/dashboard-folder: "Edge"
17 | labels:
18 | grafana-custom-dashboard: "true"
19 | - name: observability-metrics-custom-allowlist
20 | files:
21 | - files/uwl_metrics_list.yaml
22 | options:
23 | disableNameSuffixHash: true
24 |
25 | secretGenerator:
26 | - name: thanos-object-storage
27 | files:
28 | - secrets/thanos.yaml
29 | options:
30 | disableNameSuffixHash: true
31 |
--------------------------------------------------------------------------------
/acm/odh-core/acm-observability/multiclusterobservability.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: observability.open-cluster-management.io/v1beta2
2 | kind: MultiClusterObservability
3 | metadata:
4 | name: observability
5 | spec:
6 | observabilityAddonSpec: {}
7 | storageConfig:
8 | metricObjectStorage:
9 | name: thanos-object-storage
10 | key: thanos.yaml
11 |
--------------------------------------------------------------------------------
/acm/odh-core/acm-observability/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: open-cluster-management-observability
5 |
--------------------------------------------------------------------------------
/acm/odh-core/acm-observability/secrets/thanos.yaml:
--------------------------------------------------------------------------------
1 | type: s3
2 | config:
3 | bucket: YOUR_S3_BUCKET
4 | endpoint: YOUR_S3_ENDPOINT
5 | insecure: true
6 | access_key: YOUR_ACCESS_KEY
7 | secret_key: YOUR_SECRET_KEY
8 |
--------------------------------------------------------------------------------
/acm/odh-core/default/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - test-namespace.yaml
6 | - ../olm-operator-subscriptions
7 |
--------------------------------------------------------------------------------
/acm/odh-core/default/test-namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: test-namespace-2
5 |
--------------------------------------------------------------------------------
/acm/odh-core/olm-operator-subscriptions/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - namespace.yaml
6 | - operatorgroup.yaml
7 | - opendatahub-operator.yaml
8 | - openshift-pipelines.yaml
9 |
--------------------------------------------------------------------------------
/acm/odh-core/olm-operator-subscriptions/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: odh-core-operators
5 |
--------------------------------------------------------------------------------
/acm/odh-core/olm-operator-subscriptions/opendatahub-operator.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: operators.coreos.com/v1alpha1
2 | kind: Subscription
3 | metadata:
4 | name: opendatahub-operator
5 | namespace: odh-core-operators
6 | spec:
7 | channel: fast
8 | name: opendatahub-operator
9 | source: community-operators
10 | sourceNamespace: openshift-marketplace
11 |
--------------------------------------------------------------------------------
/acm/odh-core/olm-operator-subscriptions/openshift-pipelines.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: operators.coreos.com/v1alpha1
2 | kind: Subscription
3 | metadata:
4 | name: openshift-pipelines-operator
5 | namespace: odh-core-operators
6 | spec:
7 | name: openshift-pipelines-operator-rh
8 | source: redhat-operators
9 | sourceNamespace: openshift-marketplace
10 |
--------------------------------------------------------------------------------
/acm/odh-core/olm-operator-subscriptions/operatorgroup.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: operators.coreos.com/v1
2 | kind: OperatorGroup
3 | metadata:
4 | name: global-operators
5 | namespace: odh-core-operators
6 | spec:
7 | upgradeStrategy: Default
8 |
--------------------------------------------------------------------------------
/acm/odh-edge/apps/bike-rental-app/bike-rental-inference-generator-cronjob.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: CronJob
3 | metadata:
4 | name: bike-rental-inference-generator
5 | spec:
6 | schedule: "*/5 * * * *"
7 | jobTemplate:
8 | spec:
9 | template:
10 | spec:
11 | containers:
12 | - name: test-mlflow-container
13 | image: quay.io/rhoai-edge/ab-jq@sha256:492b950bef86a0c33163ccc53e96a9ec8847773ad3fecbf1e5c92be932866e04
14 | securityContext:
15 | allowPrivilegeEscalation: false
16 | runAsNonRoot: true
17 | seccompProfile:
18 | type: "RuntimeDefault"
19 | capabilities:
20 | drop:
21 | - ALL
22 | command:
23 | - /bin/bash
24 | - -c
25 | - |
26 | echo "Test inference REST web service"
27 | echo "Data:"
28 | cat /workspace/test-data/test-data.json
29 | # Add your expected response here or remove this line if not needed
30 | INDEX=$(( RANDOM % 20 ))
31 |
32 | # Number of requests between 10 and 100
33 | R_COUNT=$(( RANDOM % 91 + 10 ))
34 | # Number of concurrent requests between 10 and R_COUNT
35 | R_RATE=$(( RANDOM % ($R_COUNT - 9) + 10 ))
36 |
37 | # Randomly selects a record from data.json
38 | RANDOM_ROW=$(cat /workspace/test-data/test-data.json | jq -r ".dataframe_split.data | .[$INDEX]")
39 | PAYLOAD="{\"dataframe_split\": {\"columns\":[ \"day\", \"mnth\", \"year\", \"season\",\"holiday\", \"weekday\", \"workingday\", \"weathersit\", \"temp\", \"hum\", \"windspeed\" ], \"data\":[$RANDOM_ROW]}}"
40 | echo "Payload: $PAYLOAD"
41 | echo "$PAYLOAD" > /tmp/payload.json
42 |
43 |
44 | # Call the service
45 | SRVC_URL="http://$(MODEL_NAME)-$(MODEL_VERSION):8080/$(TEST_ENDPOINT)"
46 | echo ""
47 | echo "Call inference service $SRVC_URL :"
48 | echo "Number of requests: ${R_COUNT} concurrent requests: ${R_RATE}"
49 | echo ""
50 | # Use ab for load testing with 100 concurrent requests
51 | ab -s 300 -p /tmp/payload.json -T "application/json" -c $R_RATE -n $R_COUNT $SRVC_URL
52 | env:
53 | - name: MODEL_NAME
54 | value: "bike-rental-app-model" # modify this
55 | - name: MODEL_VERSION
56 | value: "1" # modify this if needed
57 | - name: TEST_ENDPOINT
58 | value: "invocations" # modify this
59 | volumeMounts:
60 | - name: test-data-volume
61 | mountPath: /workspace/test-data
62 | volumes:
63 | - name: test-data-volume
64 | configMap:
65 | name: to-be-replaced-by-kustomize
66 | restartPolicy: OnFailure
67 |
68 |
--------------------------------------------------------------------------------
/acm/odh-edge/apps/bike-rental-app/files/test-data.json:
--------------------------------------------------------------------------------
1 | {"dataframe_split": {"columns": ["season", "year", "mnth", "holiday", "weekday", "workingday", "weathersit", "temp", "hum", "windspeed", "day"], "data": [[1.0, 0.0, 3.0, 0.0, 2.0, 1.0, 1.0, 0.266667, 0.535, 0.216425, 1.0], [4.0, 1.0, 12.0, 0.0, 1.0, 1.0, 2.0, 0.393333, 0.907083, 0.0982583, 17.0], [4.0, 1.0, 11.0, 0.0, 0.0, 0.0, 1.0, 0.3425, 0.692917, 0.227612, 18.0], [4.0, 0.0, 9.0, 0.0, 2.0, 1.0, 2.0, 0.636667, 0.885417, 0.118171, 27.0], [3.0, 1.0, 9.0, 0.0, 6.0, 0.0, 2.0, 0.659167, 0.799167, 0.281104, 8.0], [1.0, 1.0, 2.0, 0.0, 5.0, 1.0, 1.0, 0.343333, 0.634583, 0.205846, 17.0], [3.0, 0.0, 7.0, 0.0, 1.0, 1.0, 1.0, 0.746667, 0.65125, 0.215804, 18.0], [1.0, 0.0, 3.0, 0.0, 1.0, 1.0, 1.0, 0.325217, 0.496957, 0.136926, 14.0], [4.0, 0.0, 12.0, 0.0, 6.0, 0.0, 1.0, 0.299167, 0.612917, 0.0957833, 3.0], [2.0, 1.0, 4.0, 0.0, 0.0, 0.0, 1.0, 0.606667, 0.507917, 0.225129, 15.0], [4.0, 1.0, 12.0, 0.0, 3.0, 1.0, 1.0, 0.438333, 0.485, 0.324021, 5.0], [1.0, 0.0, 3.0, 0.0, 0.0, 0.0, 1.0, 0.384348, 0.527391, 0.270604, 13.0], [4.0, 1.0, 9.0, 0.0, 1.0, 1.0, 1.0, 0.514167, 0.492917, 0.142404, 24.0], [2.0, 0.0, 4.0, 0.0, 2.0, 1.0, 2.0, 0.414167, 0.642083, 0.388067, 5.0], [4.0, 0.0, 12.0, 0.0, 2.0, 1.0, 3.0, 0.4625, 0.949583, 0.232583, 6.0], [3.0, 1.0, 7.0, 0.0, 1.0, 1.0, 1.0, 0.781667, 0.447083, 0.195267, 2.0], [2.0, 1.0, 4.0, 0.0, 1.0, 1.0, 1.0, 0.489167, 0.3175, 0.358196, 9.0], [2.0, 1.0, 5.0, 0.0, 3.0, 1.0, 2.0, 0.575, 0.744167, 0.216412, 9.0], [1.0, 0.0, 3.0, 0.0, 4.0, 1.0, 1.0, 0.198333, 0.318333, 0.225754, 3.0], [4.0, 1.0, 10.0, 0.0, 5.0, 1.0, 1.0, 0.4375, 0.539167, 0.235092, 12.0]]}}
2 |
--------------------------------------------------------------------------------
/acm/odh-edge/apps/bike-rental-app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | labels:
4 | - includeSelectors: true
5 | pairs:
6 | app: bike-rental-app
7 | resources:
8 | - ../../base
9 | - bike-rental-inference-generator-cronjob.yaml
10 | namespace: bike-rental-app
11 | namePrefix: bike-rental-app-
12 | commonLabels:
13 | app: bike-rental-app-1
14 | model-name: bike-rentals-auto-ml
15 | configMapGenerator:
16 | - name: test-data
17 | files:
18 | - files/test-data.json
19 | patches:
20 | - patch: |-
21 | apiVersion: apps/v1
22 | kind: Deployment
23 | metadata:
24 | name: model-1
25 | spec:
26 | template:
27 | spec:
28 | containers:
29 | - name: model
30 | readinessProbe:
31 | httpGet:
32 | path: /v2/models/bike-rentals-auto-ml/ready
33 | target:
34 | kind: Deployment
35 | replacements:
36 | - source:
37 | kind: Service
38 | fieldPath: metadata.name
39 | targets:
40 | - select:
41 | group: route.openshift.io
42 | kind: Route
43 | fieldPaths:
44 | - spec.to.name
45 | - source:
46 | kind: Service
47 | fieldPath: metadata.labels.app
48 | targets:
49 | - select:
50 | group: monitoring.coreos.com
51 | kind: ServiceMonitor
52 | fieldPaths:
53 | - spec.selector.matchLabels.app
54 | - source:
55 | kind: ConfigMap
56 | name: test-data
57 | fieldPath: metadata.name
58 | targets:
59 | - select:
60 | kind: CronJob
61 | name: bike-rental-inference-generator
62 | fieldPaths:
63 | - spec.jobTemplate.spec.template.spec.volumes.*.configMap.name
64 | images:
65 | - name: edge-model-template-image
66 | newName: quay.io/rhoai-edge/bike-rentals-auto-ml
67 | digest: sha256:ed53c9566f8424e84ee4be1fd51939f99c1406a0ceb8b4d0b72693e33faea7aa
68 |
--------------------------------------------------------------------------------
/acm/odh-edge/apps/telemetry/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: opendatahub-edge-collector
5 |
6 | resources:
7 | - namespace.yaml
8 | - otel-collector.yaml
9 | - role.yaml
10 | - role-binding.yaml
11 |
--------------------------------------------------------------------------------
/acm/odh-edge/apps/telemetry/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: opendatahub-edge-collector
5 | labels:
6 | # If your OpenShift GitOps (or Argo CD) is in a different namespace
7 | # than openshift-gitops, then update this label value to match.
8 | # More information:
9 | # https://docs.openshift.com/gitops/1.11/argocd_instance/setting-up-argocd-instance.html
10 | argocd.argoproj.io/managed-by: openshift-gitops
11 |
--------------------------------------------------------------------------------
/acm/odh-edge/apps/telemetry/otel-collector.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: opentelemetry.io/v1alpha1
2 | kind: OpenTelemetryCollector
3 | metadata:
4 | name: cluster-collector
5 | namespace: opendatahub-edge-collector
6 | labels:
7 | app.kubernetes.io/managed-by: opentelemetry-operator
8 | spec:
9 | mode: deployment
10 | observability:
11 | metrics:
12 | enableMetrics: true
13 | config: |
14 | receivers:
15 | prometheus:
16 | config:
17 | scrape_configs:
18 | - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
19 | job_name: otel-collector
20 | metrics_path: /metrics
21 | scrape_interval: 5s
22 | static_configs:
23 | - targets:
24 | - tensorflow-housing-app-model-1.tensorflow-housing-app.svc.cluster.local:8080
25 | - bike-rental-app-model-1.bike-rental-app.svc.cluster.local:8082
26 | exporters:
27 | prometheus:
28 | endpoint: 0.0.0.0:8889
29 | service:
30 | pipelines:
31 | metrics:
32 | exporters:
33 | - prometheus
34 | receivers:
35 | - prometheus
36 |
--------------------------------------------------------------------------------
/acm/odh-edge/apps/telemetry/role-binding.yaml:
--------------------------------------------------------------------------------
1 | kind: RoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: otel-collector-argo
5 | namespace: opendatahub-edge-collector
6 | subjects:
7 | - kind: ServiceAccount
8 | name: openshift-gitops-argocd-application-controller
9 | namespace: openshift-gitops
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: Role
13 | name: otel-collector-role
14 |
--------------------------------------------------------------------------------
/acm/odh-edge/apps/telemetry/role.yaml:
--------------------------------------------------------------------------------
1 | kind: Role
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: otel-collector-role
5 | namespace: opendatahub-edge-collector
6 | rules:
7 | - verbs:
8 | - '*'
9 | apiGroups:
10 | - opentelemetry.io
11 | resources:
12 | - opentelemetrycollectors
13 |
--------------------------------------------------------------------------------
/acm/odh-edge/apps/tensorflow-housing-app/files/test-data.json:
--------------------------------------------------------------------------------
1 | {"data": [[-120.47, 35.74, 9.0, 4267.0, 785.0, 691.0, 3.7303, 162700.0], [-119.88, 34.43, 16.0, 1734.0, 365.0, 391.0, 4.4777, 282500.0], [-117.8, 33.78, 17.0, 4138.0, 805.0, 780.0, 4.7804, 242000.0], [-117.93, 34.15, 14.0, 9610.0, 2005.0, 1907.0, 4.0393, 156800.0], [-120.43, 34.89, 30.0, 1979.0, 342.0, 320.0, 5.0286, 158000.0], [-118.3, 34.18, 13.0, 7174.0, 1997.0, 1872.0, 3.0973, 251900.0], [-122.47, 37.78, 51.0, 1485.0, 386.0, 385.0, 2.7431, 307100.0], [-118.52, 34.22, 35.0, 1275.0, 222.0, 226.0, 5.0282, 195400.0], [-119.27, 35.5, 21.0, 2171.0, 483.0, 450.0, 1.7105, 52100.0], [-119.79, 34.44, 25.0, 1479.0, 314.0, 309.0, 4.1797, 271800.0], [-122.41, 37.61, 42.0, 1602.0, 262.0, 255.0, 5.7398, 336400.0], [-119.4, 36.25, 25.0, 1696.0, 279.0, 291.0, 2.3, 132800.0], [-122.3, 37.9, 35.0, 1102.0, 308.0, 303.0, 2.3946, 141700.0], [-118.27, 33.99, 41.0, 656.0, 162.0, 170.0, 1.8047, 101800.0], [-118.19, 33.98, 34.0, 1022.0, 286.0, 275.0, 2.6042, 156700.0], [-121.75, 36.76, 32.0, 1740.0, 399.0, 389.0, 2.7694, 132400.0], [-120.91, 38.11, 9.0, 3585.0, 680.0, 598.0, 3.636, 133100.0], [-118.24, 34.22, 34.0, 1722.0, 406.0, 371.0, 4.1523, 252000.0], [-116.85, 34.25, 5.0, 5806.0, 1030.0, 219.0, 4.0132, 163100.0], [-121.04, 37.68, 28.0, 1909.0, 398.0, 380.0, 2.3783, 81400.0]]}
2 |
--------------------------------------------------------------------------------
/acm/odh-edge/apps/tensorflow-housing-app/housing-inference-generator.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: CronJob
3 | metadata:
4 | name: housing-inference-generator
5 | spec:
6 | schedule: "*/5 * * * *"
7 | jobTemplate:
8 | spec:
9 | template:
10 | spec:
11 | containers:
12 | - name: test-mlflow-container
13 | image: quay.io/rhoai-edge/ab-jq
14 | securityContext:
15 | allowPrivilegeEscalation: false
16 | runAsNonRoot: true
17 | seccompProfile:
18 | type: "RuntimeDefault"
19 | capabilities:
20 | drop:
21 | - ALL
22 | command:
23 | - /bin/bash
24 | - -c
25 | - |
26 | echo "Test inference REST web service"
27 | echo "Data:"
28 | cat /workspace/test-data/test-data.json
29 | # Add your expected response here or remove this line if not needed
30 | INDEX=$(( RANDOM % 20 ))
31 |
32 | # Number of requests between 100 and 1000
33 | R_COUNT=$(( RANDOM % 901 + 100 ))
34 | # Number of concurrent requests between 100 and R_COUNT
35 | R_RATE=$(( RANDOM % ($R_COUNT - 99) + 100 ))
36 |
37 | # Randomly selects a record from data.json
38 | RANDOM_ROW=$(cat /workspace/test-data/test-data.json | jq -r ".data | .[$INDEX]")
39 | PAYLOAD="{\"instances\":[$RANDOM_ROW]}}"
40 | echo "Payload: $PAYLOAD"
41 | echo "$PAYLOAD" > /tmp/payload.json
42 |
43 |
44 | # Call the service
45 | SRVC_URL="http://$(MODEL_NAME)-$(MODEL_VERSION):8080/$(TEST_ENDPOINT)"
46 | echo ""
47 | echo "Call inference service $SRVC_URL :"
48 | echo "Number of requests: ${R_COUNT} concurrent requests: ${R_RATE}"
49 | echo ""
50 | # Use ab for load testing with 100 concurrent requests
51 | ab -p /tmp/payload.json -T "application/json" -c $R_RATE -n $R_COUNT $SRVC_URL
52 | env:
53 | - name: MODEL_NAME
54 | value: "tensorflow-housing-app-model" # modify this, this is a guess to the actual value
55 | - name: MODEL_VERSION
56 | value: "1" # modify this if needed
57 | - name: TEST_ENDPOINT
58 | value: "v1/models/tensorflow-housing/versions/1:predict" # modify this
59 | volumeMounts:
60 | - name: test-data-volume
61 | mountPath: /workspace/test-data
62 | volumes:
63 | - name: test-data-volume
64 | configMap:
65 | name: to-be-replaced-by-kustomize
66 | restartPolicy: OnFailure
67 |
--------------------------------------------------------------------------------
/acm/odh-edge/apps/tensorflow-housing-app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | labels:
4 | - includeSelectors: true
5 | pairs:
6 | app: tensorflow-housing-app
7 | resources:
8 | - ../../base
9 | - housing-inference-generator.yaml
10 | namespace: tensorflow-housing-app
11 | namePrefix: tensorflow-housing-app-
12 | commonLabels:
13 | app: tensorflow-housing-app-1
14 | model-name: tensorflow-housing
15 | configMapGenerator:
16 | - name: test-data
17 | files:
18 | - files/test-data.json
19 | patches:
20 | - patch: |-
21 | apiVersion: apps/v1
22 | kind: Deployment
23 | metadata:
24 | name: model-1
25 | spec:
26 | template:
27 | spec:
28 | containers:
29 | - name: model
30 | readinessProbe:
31 | httpGet:
32 | path: /v2/models/tensorflow-housing/ready
33 | target:
34 | kind: Deployment
35 | - patch: |-
36 | kind: CronJob
37 | metadata:
38 | name: housing-inference-generator
39 | spec:
40 | jobTemplate:
41 | spec:
42 | template:
43 | spec:
44 | containers:
45 | - name: test-mlflow-container
46 | env:
47 | - name: MODEL_NAME
48 | value: "tensorflow-housing-app-model"
49 | - name: MODEL_VERSION
50 | value: "1"
51 | - name: TEST_ENDPOINT
52 | value: "v1/models/tensorflow-housing/versions/1:predict"
53 | target:
54 | kind: CronJob
55 | replacements:
56 | - source:
57 | kind: Service
58 | fieldPath: metadata.name
59 | targets:
60 | - select:
61 | group: route.openshift.io
62 | kind: Route
63 | fieldPaths:
64 | - spec.to.name
65 | - source:
66 | kind: Service
67 | fieldPath: metadata.labels.app
68 | targets:
69 | - select:
70 | group: monitoring.coreos.com
71 | kind: ServiceMonitor
72 | fieldPaths:
73 | - spec.selector.matchLabels.app
74 | - source:
75 | kind: ConfigMap
76 | name: test-data
77 | fieldPath: metadata.name
78 | targets:
79 | - select:
80 | kind: CronJob
81 | name: housing-inference-generator
82 | fieldPaths:
83 | - spec.jobTemplate.spec.template.spec.volumes.*.configMap.name
84 | images:
85 | - name: edge-model-template-image
86 | newName: quay.io/rhoai-edge/tensorflow-housing
87 | digest: sha256:8486af7728e1214ac5f81c6b93d560078d357f2096d1b40c38a9ac8e1cd68767
88 |
--------------------------------------------------------------------------------
/acm/odh-edge/base/files/uwl_metrics_list.yaml:
--------------------------------------------------------------------------------
1 | names:
2 | # MLServer metrics
3 | - parallel_request_queue_sum
4 | - parallel_request_queue_bucket
5 | - parallel_request_queue_count
6 | - rest_server_request_duration_seconds_sum
7 | - rest_server_request_duration_seconds_bucket
8 | - rest_server_request_duration_seconds_count
9 | - rest_server_requests_in_progress
10 | - rest_server_requests_total
11 | - ovms_infer_req_active
12 | - ovms_infer_req_queue_size
13 | - ovms_inference_time_us_bucket
14 | - ovms_inference_time_us_count
15 | - ovms_inference_time_us_sum
16 | - ovms_request_time_us_bucket
17 | - ovms_request_time_us_count
18 | - ovms_request_time_us_sum
19 | - ovms_requests_fail
20 | - ovms_requests_success
21 | - ovms_streams
22 | - ovms_wait_for_infer_req_time_us_bucket
23 | - ovms_wait_for_infer_req_time_us_count
24 | - ovms_wait_for_infer_req_time_us_sum
25 |
--------------------------------------------------------------------------------
/acm/odh-edge/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - namespace.yaml
6 | - model-deployment.yaml
7 | - model-service.yaml
8 | - model-route.yaml
9 |
10 | configMapGenerator:
11 | - files:
12 | - files/uwl_metrics_list.yaml
13 | name: observability-metrics-custom-allowlist
14 | options:
15 | disableNameSuffixHash: true
16 |
--------------------------------------------------------------------------------
/acm/odh-edge/base/model-deployment.yaml:
--------------------------------------------------------------------------------
1 | kind: Deployment
2 | apiVersion: apps/v1
3 | metadata:
4 | name: model-1
5 | spec:
6 | replicas: 1
7 | template:
8 | spec:
9 | containers:
10 | - readinessProbe:
11 | httpGet:
12 | path: /v2/models/tensorflow-housing/ready
13 | port: 8080
14 | scheme: HTTP
15 | timeoutSeconds: 1
16 | periodSeconds: 5
17 | successThreshold: 1
18 | failureThreshold: 8
19 | terminationMessagePath: /dev/termination-log
20 | name: model
21 | livenessProbe:
22 | httpGet:
23 | path: /v2/health/live
24 | port: 8080
25 | scheme: HTTP
26 | timeoutSeconds: 1
27 | periodSeconds: 5
28 | successThreshold: 1
29 | failureThreshold: 8
30 | ports:
31 | - containerPort: 8080
32 | protocol: TCP
33 | imagePullPolicy: IfNotPresent
34 | terminationMessagePolicy: File
35 | image: edge-model-template-image
36 | restartPolicy: Always
37 | terminationGracePeriodSeconds: 30
38 | dnsPolicy: ClusterFirst
39 | securityContext: {}
40 | schedulerName: default-scheduler
41 | strategy:
42 | type: RollingUpdate
43 | rollingUpdate:
44 | maxUnavailable: 25%
45 | maxSurge: 25%
46 | revisionHistoryLimit: 10
47 | progressDeadlineSeconds: 600
48 |
--------------------------------------------------------------------------------
/acm/odh-edge/base/model-route.yaml:
--------------------------------------------------------------------------------
1 | kind: Route
2 | apiVersion: route.openshift.io/v1
3 | metadata:
4 | name: model-1
5 | spec:
6 | to:
7 | kind: Service
8 | name: model-1
9 | weight: 100
10 | port:
11 | targetPort: port
12 | tls:
13 | termination: edge
14 | insecureEdgeTerminationPolicy: Redirect
15 | wildcardPolicy: None
16 |
--------------------------------------------------------------------------------
/acm/odh-edge/base/model-service.yaml:
--------------------------------------------------------------------------------
1 | kind: Service
2 | apiVersion: v1
3 | metadata:
4 | name: model-1
5 | spec:
6 | ports:
7 | - name: port
8 | protocol: TCP
9 | port: 8080
10 | targetPort: 8080
11 | - name: metrics
12 | protocol: TCP
13 | port: 8082
14 | targetPort: 8082
15 | type: ClusterIP
16 |
--------------------------------------------------------------------------------
/acm/odh-edge/base/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | # edit this to the namespace where the model is deployed
5 | name: model-to-edge
6 | labels:
7 | # If your OpenShift GitOps (or Argo CD) is in a different namespace
8 | # than openshift-gitops, then update this label value to match.
9 | # More information:
10 | # https://docs.openshift.com/gitops/1.11/argocd_instance/setting-up-argocd-instance.html
11 | argocd.argoproj.io/managed-by: openshift-gitops
12 |
--------------------------------------------------------------------------------
/acm/registration/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - near-edge
6 |
--------------------------------------------------------------------------------
/acm/registration/near-edge/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - near-edge.yaml
6 |
7 | configurations:
8 | - nameReference.yaml
9 |
--------------------------------------------------------------------------------
/acm/registration/near-edge/base/nameReference.yaml:
--------------------------------------------------------------------------------
1 | nameReference:
2 | - kind: Secret
3 | fieldSpecs:
4 | - kind: Channel
5 | path: spec/secretRef/name
6 |
--------------------------------------------------------------------------------
/acm/registration/near-edge/base/near-edge.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: cluster.open-cluster-management.io/v1beta1
3 | kind: Placement
4 | metadata:
5 | name: placement
6 | namespace: openshift-gitops
7 | spec:
8 | clusterSets:
9 | - poc-near-edge
10 | predicates:
11 | - requiredClusterSelector:
12 | labelSelector: {}
13 | tolerations:
14 | - key: cluster.open-cluster-management.io/unreachable
15 | operator: Exists
16 | - key: cluster.open-cluster-management.io/unavailable
17 | operator: Exists
18 | ---
19 | apiVersion: argoproj.io/v1alpha1
20 | kind: ApplicationSet
21 | metadata:
22 | name: appset
23 | namespace: openshift-gitops
24 | spec:
25 | generators:
26 | - clusterDecisionResource:
27 | configMapRef: acm-placement
28 | labelSelector:
29 | matchLabels:
30 | cluster.open-cluster-management.io/placement: placement
31 | requeueAfterSeconds: 30
32 | template:
33 | metadata:
34 | name: '{{name}}-application'
35 | labels:
36 | apps.open-cluster-management.io/pull-to-ocm-managed-cluster: 'true'
37 | annotations:
38 | argocd.argoproj.io/skip-reconcile: 'true'
39 | apps.open-cluster-management.io/ocm-managed-cluster: '{{name}}'
40 | apps.open-cluster-management.io/ocm-managed-cluster-app-namespace: near-edge-acm-template
41 | spec:
42 | project: default
43 | source:
44 | repoURL: 'https://github.com/opendatahub-io/ai-edge.git'
45 | targetRevision: main
46 | path: acm/odh-edge/apps/app
47 | destination:
48 | server: https://kubernetes.default.svc
49 | namespace: near-edge-acm-template
50 | syncPolicy:
51 | automated:
52 | prune: true
53 | syncOptions:
54 | - CreateNamespace=true
55 |
--------------------------------------------------------------------------------
/acm/registration/near-edge/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - openshift-gitops-managedclustersetbinding.yaml
6 | - openshift-gitops-placement.yaml
7 | - openshift-gitops-gitopscluster.yaml
8 | - overlays/bike-rental-app
9 | - overlays/tensorflow-housing-app
10 | - overlays/opentelemetry
11 |
--------------------------------------------------------------------------------
/acm/registration/near-edge/openshift-gitops-gitopscluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps.open-cluster-management.io/v1beta1
3 | kind: GitOpsCluster
4 | metadata:
5 | name: near-edge-clusters
6 | namespace: openshift-gitops
7 | spec:
8 | argoServer:
9 | cluster: notused
10 | argoNamespace: openshift-gitops
11 | placementRef:
12 | kind: Placement
13 | apiVersion: cluster.open-cluster-management.io/v1beta1
14 | name: near-edge-clusters
15 |
--------------------------------------------------------------------------------
/acm/registration/near-edge/openshift-gitops-managedclustersetbinding.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: cluster.open-cluster-management.io/v1beta2
3 | kind: ManagedClusterSetBinding
4 | metadata:
5 | name: poc-near-edge
6 | namespace: openshift-gitops
7 | spec:
8 | clusterSet: poc-near-edge
9 |
--------------------------------------------------------------------------------
/acm/registration/near-edge/openshift-gitops-placement.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: cluster.open-cluster-management.io/v1beta1
3 | kind: Placement
4 | metadata:
5 | name: near-edge-clusters
6 | namespace: openshift-gitops
7 | spec:
8 | clusterSets:
9 | - poc-near-edge
10 | predicates:
11 | - requiredClusterSelector:
12 | labelSelector: {}
13 | tolerations:
14 | - key: cluster.open-cluster-management.io/unreachable
15 | operator: Exists
16 | - key: cluster.open-cluster-management.io/unavailable
17 | operator: Exists
18 |
--------------------------------------------------------------------------------
/acm/registration/near-edge/overlays/bike-rental-app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../base
6 |
7 | namePrefix: bike-rental-app-
8 |
9 | patches:
10 | - patch: |-
11 | - op: replace
12 | path: /spec/template/metadata/name
13 | value: '{{name}}-bike-rental-app'
14 | - op: replace
15 | path: /spec/template/spec/source/path
16 | value: acm/odh-edge/apps/bike-rental-app
17 | - op: replace
18 | path: /spec/template/spec/destination/namespace
19 | value: bike-rental-app
20 | - op: replace
21 | path: /spec/template/metadata/annotations/apps.open-cluster-management.io~1ocm-managed-cluster-app-namespace
22 | value: openshift-gitops
23 | target:
24 | group: argoproj.io
25 | version: v1alpha1
26 | kind: ApplicationSet
27 |
28 | replacements:
29 | - source:
30 | kind: Placement
31 | group: cluster.open-cluster-management.io
32 | fieldPath: metadata.name
33 | targets:
34 | - select:
35 | group: argoproj.io
36 | kind: ApplicationSet
37 | fieldPaths:
38 | - spec.generators.0.clusterDecisionResource.labelSelector.matchLabels.cluster\.open-cluster-management\.io/placement
39 |
--------------------------------------------------------------------------------
/acm/registration/near-edge/overlays/opentelemetry/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../base
6 |
7 | namePrefix: otel-edge-collector-
8 |
9 | patches:
10 | - patch: |-
11 | - op: replace
12 | path: /spec/template/metadata/name
13 | value: '{{name}}-openshift-opentelemetry-operator'
14 | - op: replace
15 | path: /spec/template/spec/source/path
16 | value: acm/odh-edge/apps/telemetry
17 | - op: replace
18 | path: /spec/template/spec/destination/namespace
19 | value: opendatahub-edge-collector
20 | - op: replace
21 | path: /spec/template/metadata/annotations/apps.open-cluster-management.io~1ocm-managed-cluster-app-namespace
22 | value: openshift-gitops
23 | target:
24 | group: argoproj.io
25 | version: v1alpha1
26 | kind: ApplicationSet
27 |
28 | replacements:
29 | - source:
30 | kind: Placement
31 | group: cluster.open-cluster-management.io
32 | fieldPath: metadata.name
33 | targets:
34 | - select:
35 | group: argoproj.io
36 | kind: ApplicationSet
37 | fieldPaths:
38 | - spec.generators.0.clusterDecisionResource.labelSelector.matchLabels.cluster\.open-cluster-management\.io/placement
39 |
--------------------------------------------------------------------------------
/acm/registration/near-edge/overlays/tensorflow-housing-app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../base
6 |
7 | namePrefix: tensorflow-housing-app-
8 |
9 | patches:
10 | - patch: |-
11 | - op: replace
12 | path: /spec/template/metadata/name
13 | value: '{{name}}-tensorflow-housing-app'
14 | - op: replace
15 | path: /spec/template/spec/source/path
16 | value: acm/odh-edge/apps/tensorflow-housing-app
17 | - op: replace
18 | path: /spec/template/spec/destination/namespace
19 | value: tensorflow-housing-app
20 | - op: replace
21 | path: /spec/template/metadata/annotations/apps.open-cluster-management.io~1ocm-managed-cluster-app-namespace
22 | value: openshift-gitops
23 | target:
24 | group: argoproj.io
25 | version: v1alpha1
26 | kind: ApplicationSet
27 |
28 | replacements:
29 | - source:
30 | kind: Placement
31 | group: cluster.open-cluster-management.io
32 | fieldPath: metadata.name
33 | targets:
34 | - select:
35 | group: argoproj.io
36 | kind: ApplicationSet
37 | fieldPaths:
38 | - spec.generators.0.clusterDecisionResource.labelSelector.matchLabels.cluster\.open-cluster-management\.io/placement
39 |
--------------------------------------------------------------------------------
/acm/registration/near-edge/test/bike-rental-app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | resources:
6 | - ../../overlays/bike-rental-app/
7 |
8 | patches:
9 | - patch: |-
10 | - op: test
11 | path: /metadata/name
12 | value: bike-rental-app-appset
13 | - op: test
14 | path: /spec/template/metadata/name
15 | value: '{{name}}-bike-rental-app'
16 | - op: test
17 | path: /spec/template/spec/source/path
18 | value: acm/odh-edge/apps/bike-rental-app
19 | - op: test
20 | path: /spec/template/spec/destination/namespace
21 | value: bike-rental-app
22 | - op: test
23 | path: /spec/template/metadata/annotations/apps.open-cluster-management.io~1ocm-managed-cluster-app-namespace
24 | value: openshift-gitops
25 | - op: test
26 | path: /spec/generators/0/clusterDecisionResource/labelSelector/matchLabels/cluster.open-cluster-management.io~1placement
27 | value: bike-rental-app-placement
28 | target:
29 | group: argoproj.io
30 | kind: ApplicationSet
31 |
32 | - patch: |-
33 | - op: test
34 | path: /metadata/name
35 | value: bike-rental-app-placement
36 | target:
37 | group: cluster.open-cluster-management.io
38 | kind: Placement
39 |
--------------------------------------------------------------------------------
/acm/registration/near-edge/test/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - bike-rental-app
6 | - tensorflow-housing-app
7 |
--------------------------------------------------------------------------------
/acm/registration/near-edge/test/tensorflow-housing-app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | resources:
6 | - ../../overlays/tensorflow-housing-app/
7 |
8 | patches:
9 | - patch: |-
10 | - op: test
11 | path: /metadata/name
12 | value: tensorflow-housing-app-appset
13 | - op: test
14 | path: /spec/template/metadata/name
15 | value: '{{name}}-tensorflow-housing-app'
16 | - op: test
17 | path: /spec/template/spec/source/path
18 | value: acm/odh-edge/apps/tensorflow-housing-app
19 | - op: test
20 | path: /spec/template/spec/destination/namespace
21 | value: tensorflow-housing-app
22 | - op: test
23 | path: /spec/template/metadata/annotations/apps.open-cluster-management.io~1ocm-managed-cluster-app-namespace
24 | value: openshift-gitops
25 | - op: test
26 | path: /spec/generators/0/clusterDecisionResource/labelSelector/matchLabels/cluster.open-cluster-management.io~1placement
27 | value: tensorflow-housing-app-placement
28 | target:
29 | group: argoproj.io
30 | kind: ApplicationSet
31 |
32 | - patch: |-
33 | - op: test
34 | path: /metadata/name
35 | value: tensorflow-housing-app-placement
36 | target:
37 | group: cluster.open-cluster-management.io
38 | kind: Placement
39 |
--------------------------------------------------------------------------------
/byo-mgmt/registration/near-edge/base/argocd-application.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: argoproj.io/v1alpha1
2 | kind: Application
3 | metadata:
4 | name: application
5 | namespace: openshift-gitops
6 | spec:
7 | project: default
8 | source:
9 | path: acm/odh-edge/apps/app
10 | repoURL: 'https://github.com/opendatahub-io/ai-edge.git'
11 | targetRevision: main
12 | destination:
13 | namespace: namespace
14 | server: 'https://kubernetes.default.svc'
15 | syncPolicy:
16 | automated:
17 | prune: true
18 |
--------------------------------------------------------------------------------
/byo-mgmt/registration/near-edge/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - argocd-application.yaml
6 |
--------------------------------------------------------------------------------
/byo-mgmt/registration/near-edge/overlays/bike-rental-app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../base
6 |
7 | patches:
8 | - patch: |-
9 | - op: replace
10 | path: /metadata/name
11 | value: bike-rental-app
12 | - op: replace
13 | path: /spec/source/path
14 | value: acm/odh-edge/apps/bike-rental-app
15 | - op: replace
16 | path: /spec/destination/namespace
17 | value: bike-rental-app
18 | target:
19 | group: argoproj.io
20 | version: v1alpha1
21 | kind: Application
22 | - patch: |-
23 | - op: replace
24 | path: /metadata/name
25 | value: bike-rental-app
26 | target:
27 | version: v1
28 | kind: Namespace
29 |
--------------------------------------------------------------------------------
/byo-mgmt/registration/near-edge/overlays/tensorflow-housing-app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../base
6 |
7 | patches:
8 | - patch: |-
9 | - op: replace
10 | path: /metadata/name
11 | value: tensorflow-housing-app
12 | - op: replace
13 | path: /spec/source/path
14 | value: acm/odh-edge/apps/tensorflow-housing-app
15 | - op: replace
16 | path: /spec/destination/namespace
17 | value: tensorflow-housing-app
18 | target:
19 | group: argoproj.io
20 | version: v1alpha1
21 | kind: Application
22 | - patch: |-
23 | - op: replace
24 | path: /metadata/name
25 | value: tensorflow-housing-app
26 | target:
27 | version: v1
28 | kind: Namespace
29 |
--------------------------------------------------------------------------------
/cli/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: cli-test cli-build cli-run
2 |
3 | GO=go
4 |
5 | cli-test:
6 | ${GO} test ./cmd/... ./... -v
7 |
8 | cli-build:
9 | ${GO} build -o odh ./cmd/main.go
10 |
11 | cli-run:
12 | ${GO} run ./cmd/main.go
13 |
14 | cli-clean:
15 | rm -f odh
16 |
--------------------------------------------------------------------------------
/cli/cmd/main.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright © 2024 Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 | package main
17 |
18 | import (
19 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands"
20 | )
21 |
22 | func main() {
23 | commands.Execute()
24 | }
25 |
--------------------------------------------------------------------------------
/cli/cobra.yaml:
--------------------------------------------------------------------------------
1 | author: Open Data Hub Authors
2 | license: apache
3 | useViper: true
4 |
--------------------------------------------------------------------------------
/cli/examples/params.yaml.sample:
--------------------------------------------------------------------------------
1 | params:
2 | # The name of the S3 bucket where the model is stored
3 | - name: s3-bucket-name
4 | value: BUCKET_NAME
5 | # The URL of the git repository where the containerfile is stored
6 | - name: git-containerfile-repo
7 | value: GIT_REPO_URL
8 | # The branch of the git repository where the containerfile is stored
9 | - name: git-containerfile-revision
10 | value: GIT_BRANCH
11 | # The relative path to the containerfile in the git repository
12 | - name: containerfileRelativePath
13 | value: RELATIVE_PATH
14 | # The method used to fetch the model (s3 or git)
15 | - name: fetch-model
16 | value: FETCH_METHOD
17 | # The URL of the git repository where the model is stored. This is only used if fetch-model is set to git.
18 | - name: git-model-repo
19 | value: GIT_REPO_URL
20 | # The relative path to the model in the git repository. This is only used if fetch-model is set to git.
21 | - name: modelRelativePath
22 | value: RELATIVE_PATH
23 | # The branch of the git repository where the model is stored. This is only used if fetch-model is set to git.
24 | - name: git-model-revision
25 | value: GIT_BRANCH
26 | # The name of the model serving test endpoint (e.g. invocations)
27 | - name: test-endpoint
28 | value: ENDPOINT_NAME
29 | # The candidate image tag reference. This is the intermediate image that is built during the pipeline.
30 | # e.g. image-registry.openshift-image-registry.svc:5000/$(context.pipelineRun.namespace)/$(params.model-name):$(params.model-version)-candidate
31 | - name: candidate-image-tag-reference
32 | value: CANDIDATE_IMAGE_TAG
33 | # The target image tag references. These are the final images that are pushed to the image registry. A typical value would be:
34 | # - quay.io/rhoai-edge/$(params.model-name):$(params.model-version)-$(context.pipelineRun.uid)
35 | # - quay.io/rhoai-edge/$(params.model-name):$(params.model-version)
36 | # - quay.io/rhoai-edge/$(params.model-name):latest
37 | - name: target-image-tag-references
38 | value:
39 | - TARGET_IMAGE_TAG_1
40 | - TARGET_IMAGE_TAG_2
41 | # The action to take upon the completion of the pipeline (e.g. delete)
42 | - name: upon-end
43 | value: ACTION
44 | # The name of the secret that contains the S3 credentials
45 | - name: s3SecretName
46 | value: SECRET_NAME
47 | # The name of the config map that contains the test data
48 | - name: testDataConfigMapName
49 | value: CONFIG_MAP_NAME
50 |
--------------------------------------------------------------------------------
/cli/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/opendatahub-io/ai-edge/cli
2 |
3 | go 1.21.4
4 |
5 | require (
6 | github.com/charmbracelet/bubbles v0.18.0
7 | github.com/charmbracelet/bubbletea v0.25.0
8 | github.com/charmbracelet/lipgloss v0.9.1
9 | github.com/kubeflow/model-registry v0.0.0-20240312073310-67d9e4deff70
10 | github.com/spf13/cobra v1.8.0
11 | github.com/tektoncd/pipeline v0.58.0
12 | gopkg.in/yaml.v2 v2.4.0
13 | k8s.io/api v0.29.3
14 | k8s.io/apimachinery v0.29.3
15 | k8s.io/client-go v0.29.3
16 | )
17 |
18 | require (
19 | contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect
20 | contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect
21 | github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
22 | github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
23 | github.com/beorn7/perks v1.0.1 // indirect
24 | github.com/blendle/zapdriver v1.3.1 // indirect
25 | github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
26 | github.com/cespare/xxhash/v2 v2.2.0 // indirect
27 | github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect
28 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
29 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect
30 | github.com/evanphx/json-patch/v5 v5.6.0 // indirect
31 | github.com/go-kit/log v0.2.1 // indirect
32 | github.com/go-logfmt/logfmt v0.5.1 // indirect
33 | github.com/go-logr/logr v1.4.1 // indirect
34 | github.com/go-openapi/jsonpointer v0.19.6 // indirect
35 | github.com/go-openapi/jsonreference v0.20.2 // indirect
36 | github.com/go-openapi/swag v0.22.3 // indirect
37 | github.com/gogo/protobuf v1.3.2 // indirect
38 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
39 | github.com/golang/protobuf v1.5.4 // indirect
40 | github.com/google/cel-go v0.20.0 // indirect
41 | github.com/google/gnostic-models v0.6.8 // indirect
42 | github.com/google/go-cmp v0.6.0 // indirect
43 | github.com/google/go-containerregistry v0.19.0 // indirect
44 | github.com/google/gofuzz v1.2.0 // indirect
45 | github.com/google/uuid v1.6.0 // indirect
46 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
47 | github.com/hashicorp/errwrap v1.1.0 // indirect
48 | github.com/hashicorp/go-multierror v1.1.1 // indirect
49 | github.com/imdario/mergo v0.3.13 // indirect
50 | github.com/inconshreveable/mousetrap v1.1.0 // indirect
51 | github.com/josharian/intern v1.0.0 // indirect
52 | github.com/json-iterator/go v1.1.12 // indirect
53 | github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
54 | github.com/mailru/easyjson v0.7.7 // indirect
55 | github.com/mattn/go-isatty v0.0.18 // indirect
56 | github.com/mattn/go-localereader v0.0.1 // indirect
57 | github.com/mattn/go-runewidth v0.0.15 // indirect
58 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
59 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
60 | github.com/modern-go/reflect2 v1.0.2 // indirect
61 | github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b // indirect
62 | github.com/muesli/cancelreader v0.2.2 // indirect
63 | github.com/muesli/reflow v0.3.0 // indirect
64 | github.com/muesli/termenv v0.15.2 // indirect
65 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
66 | github.com/opencontainers/go-digest v1.0.0 // indirect
67 | github.com/pkg/errors v0.9.1 // indirect
68 | github.com/prometheus/client_golang v1.15.1 // indirect
69 | github.com/prometheus/client_model v0.4.0 // indirect
70 | github.com/prometheus/common v0.42.0 // indirect
71 | github.com/prometheus/procfs v0.9.0 // indirect
72 | github.com/prometheus/statsd_exporter v0.21.0 // indirect
73 | github.com/rivo/uniseg v0.4.6 // indirect
74 | github.com/rogpeppe/go-internal v1.12.0 // indirect
75 | github.com/spf13/pflag v1.0.5 // indirect
76 | github.com/stoewer/go-strcase v1.2.0 // indirect
77 | go.opencensus.io v0.24.0 // indirect
78 | go.uber.org/atomic v1.10.0 // indirect
79 | go.uber.org/multierr v1.10.0 // indirect
80 | go.uber.org/zap v1.27.0 // indirect
81 | golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
82 | golang.org/x/net v0.21.0 // indirect
83 | golang.org/x/oauth2 v0.17.0 // indirect
84 | golang.org/x/sync v0.6.0 // indirect
85 | golang.org/x/sys v0.17.0 // indirect
86 | golang.org/x/term v0.17.0 // indirect
87 | golang.org/x/text v0.14.0 // indirect
88 | golang.org/x/time v0.5.0 // indirect
89 | gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
90 | google.golang.org/api v0.167.0 // indirect
91 | google.golang.org/appengine v1.6.8 // indirect
92 | google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect
93 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect
94 | google.golang.org/grpc v1.62.1 // indirect
95 | google.golang.org/protobuf v1.33.0 // indirect
96 | gopkg.in/inf.v0 v0.9.1 // indirect
97 | gopkg.in/yaml.v3 v3.0.1 // indirect
98 | k8s.io/klog/v2 v2.110.1 // indirect
99 | k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
100 | k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
101 | knative.dev/pkg v0.0.0-20231023150739-56bfe0dd9626 // indirect
102 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
103 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
104 | sigs.k8s.io/yaml v1.4.0 // indirect
105 | )
106 |
--------------------------------------------------------------------------------
/cli/pkg/commands/common/cmd.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package common
18 |
19 | import (
20 | "os"
21 |
22 | tea "github.com/charmbracelet/bubbletea"
23 | "github.com/spf13/cobra"
24 |
25 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/flags"
26 | )
27 |
28 | // SubCommand is a type to represent the subcommand
29 | type SubCommand int
30 |
31 | const (
32 | // SubCommandList is a subcommand to list items
33 | SubCommandList SubCommand = iota
34 | // SubCommandAdd is a subcommand to add items
35 | SubCommandAdd
36 | // SubCommandUpdate is a subcommand to sync items
37 | SubCommandUpdate
38 | // SubCommandBuild is a subcommand to build items
39 | SubCommandBuild
40 | // SubCommandDescribe is a subcommand to view details
41 | SubCommandDescribe
42 | )
43 |
44 | // NewCmd creates a new cobra command.
45 | //
46 | // The command will create a new tea program, passing the model created by the modelFactory, and run it.
47 | // The modelFactory will be called with the args, flags and subCommand.
48 | //
49 | // Example:
50 | //
51 | // cmd := NewCmd(
52 | // "images",
53 | // "List images",
54 | // `List images`,
55 | // cobra.ExactArgs(3),
56 | // []flags.Flag{flags.FlagModelRegistryUrl},
57 | // SubCommandList,
58 | // func(args []string, flags map[string]string, subCommand SubCommand) tea.Model {
59 | // return NewImagesModel(args, flags, subCommand)
60 | // },
61 | // )
62 | func NewCmd(
63 | use, short, long string,
64 | args cobra.PositionalArgs,
65 | flags []flags.Flag,
66 | command SubCommand,
67 | modelFactory func(args []string, flags map[string]string, subCommand SubCommand) tea.Model,
68 | ) *cobra.Command {
69 |
70 | cmd := cobra.Command{
71 | Use: use,
72 | Short: short,
73 | Long: long,
74 | Args: args,
75 | Run: func(cmd *cobra.Command, args []string) {
76 | ff := make(map[string]string)
77 | for _, f := range flags {
78 | v := ""
79 | err := error(nil)
80 | if f.IsParentFlag() {
81 | v, err = cmd.InheritedFlags().GetString(f.String())
82 | if err != nil {
83 | cmd.PrintErrf("Error reading inherited flag %s: %v\n", f, err)
84 | os.Exit(1)
85 | }
86 | } else {
87 | v, err = cmd.Flags().GetString(f.String())
88 | if err != nil {
89 | cmd.PrintErrf("Error reading flag %s: %v\n", f, err)
90 | os.Exit(1)
91 | }
92 | }
93 | ff[f.String()] = v
94 | }
95 | _, err := tea.NewProgram(modelFactory(args, ff, command)).Run()
96 | if err != nil {
97 | cmd.PrintErrf("Error: %v\n", err)
98 | os.Exit(1)
99 | }
100 | },
101 | }
102 |
103 | // Disable the addition of [flags] to the usage line of a command when printing help or generating docs
104 | cmd.DisableFlagsInUseLine = true
105 |
106 | cmd.Flags().SortFlags = false
107 |
108 | for _, f := range flags {
109 | if !f.IsParentFlag() {
110 | if f.IsInherited() {
111 | cmd.PersistentFlags().StringP(f.String(), f.Shorthand(), f.Value(), f.Usage())
112 | } else {
113 | cmd.Flags().StringP(f.String(), f.Shorthand(), f.Value(), f.Usage())
114 | }
115 | if f.IsRequired() {
116 | err := cmd.MarkFlagRequired(f.String())
117 | if err != nil {
118 | cmd.PrintErrf("Error marking flag %s as required: %v\n", f, err)
119 | os.Exit(1)
120 | }
121 | }
122 | }
123 | }
124 |
125 | return &cmd
126 | }
127 |
--------------------------------------------------------------------------------
/cli/pkg/commands/common/errors.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package common
18 |
19 | // ErrMsg is a wrapper for an error that implements the error interface.
20 | //
21 | // This is useful for returning an error from a model in the bubbletea program.
22 | type ErrMsg struct{ Err error }
23 |
24 | // Error returns the error message.
25 | func (e ErrMsg) Error() string { return e.Err.Error() }
26 |
--------------------------------------------------------------------------------
/cli/pkg/commands/common/styles.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package common
18 |
19 | import "github.com/charmbracelet/lipgloss"
20 |
21 | // TableBaseStyle is the base style for the table
22 | var TableBaseStyle = lipgloss.NewStyle().
23 | BorderStyle(lipgloss.NormalBorder()).
24 | BorderForeground(lipgloss.Color("#04B575"))
25 |
26 | // MessageStyle is the style for regular messages
27 | var MessageStyle = lipgloss.NewStyle().
28 | Bold(true)
29 |
30 | // Success is the style for success messages
31 | var Success = lipgloss.NewStyle().
32 | Foreground(lipgloss.Color("#04B575")).
33 | Bold(true)
34 |
35 | // ErrorStyle is the style for error messages
36 | var ErrorStyle = lipgloss.NewStyle().
37 | Foreground(lipgloss.Color("#FF0000")).
38 | Bold(true).
39 | Height(4).
40 | Width(120)
41 |
42 | // KeyStyle is the style to decorate keys in map
43 | var KeyStyle = lipgloss.NewStyle().Bold(true).Width(20)
44 |
45 | // ParamKeyStyle is the style to decorate params value
46 | var ParamKeyStyle = lipgloss.NewStyle().Width(40).MarginLeft(10)
47 |
48 | // TitleStyle is the style to decorate the title text
49 | var TitleStyle = lipgloss.NewStyle().Bold(true).Underline(true).PaddingTop(1)
50 |
--------------------------------------------------------------------------------
/cli/pkg/commands/flags/flags.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package flags
18 |
19 | import (
20 | "fmt"
21 | "os"
22 | )
23 |
24 | // Flag represents a command line flag.
25 | //
26 | // Flags can be inherited by subcommands, in which case they will be passed to the subcommand.
27 | type Flag struct {
28 | name string
29 | inherited bool // Flag is inherited by subcommands
30 | parentFlag bool // Flag is defined in the parent command
31 | shorthand string
32 | required bool
33 | value string // Default value, only if the flag is required
34 | usage string
35 | }
36 |
37 | var (
38 | // FlagModelRegistryURL is the URL of the model registry
39 | FlagModelRegistryURL = Flag{
40 | name: "model-registry-url", inherited: true, shorthand: "r", value: "http://localhost:8080",
41 | usage: "URL of the model registry",
42 | }
43 |
44 | // FlagKubeconfig is the path to the kubeconfig file
45 | FlagKubeconfig = Flag{
46 | name: "kubeconfig", inherited: true, shorthand: "k",
47 | value: fmt.Sprintf("%s/.kube/config", os.Getenv("HOME")),
48 | usage: "path to the kubeconfig file",
49 | }
50 |
51 | // FlagNamespace is the namespace to use
52 | FlagNamespace = Flag{
53 | name: "namespace",
54 | shorthand: "n",
55 | value: "default",
56 | usage: "namespace to use",
57 | }
58 |
59 | // FlagParams is the path to the build parameters file
60 | FlagParams = Flag{
61 | name: "params",
62 | shorthand: "p",
63 | value: "params.yaml",
64 | usage: "path to the build parameters file",
65 | }
66 |
67 | // FlagModelID is the ID of the model
68 | FlagModelID = Flag{
69 | name: "model-id",
70 | shorthand: "i",
71 | usage: "ID of the model",
72 | }
73 |
74 | // FlagModelName is the name of the model
75 | FlagModelName = Flag{
76 | name: "model-name",
77 | shorthand: "m",
78 | usage: "model name",
79 | }
80 |
81 | // FlagModelDescription is the description of the model
82 | FlagModelDescription = Flag{
83 | name: "model-description",
84 | shorthand: "d",
85 | usage: "model description",
86 | }
87 |
88 | // FlagVersionName is the name of the model version
89 | FlagVersionName = Flag{
90 | name: "version-name",
91 | shorthand: "v",
92 | value: "v1",
93 | usage: "model version name. A model can have multiple versions.",
94 | }
95 |
96 | // FlagModelImageID is the ID of the model image
97 | FlagModelImageID = Flag{
98 | name: "image-id",
99 | shorthand: "g",
100 | usage: "model image ID",
101 | }
102 | )
103 |
104 | // String returns the name of the flag.
105 | func (f Flag) String() string {
106 | return f.name
107 | }
108 |
109 | // SetInherited sets the flag to be inherited by subcommands.
110 | func (f Flag) SetInherited() Flag {
111 | f.inherited = true
112 | return f
113 | }
114 |
115 | // IsInherited returns true if the flag is inherited by subcommands.
116 | func (f Flag) IsInherited() bool {
117 | return f.inherited
118 | }
119 |
120 | // SetParentFlag sets the flag as one that's defined in the parent command.
121 | func (f Flag) SetParentFlag() Flag {
122 | f.parentFlag = true
123 | return f
124 | }
125 |
126 | // IsParentFlag returns true if the flag is defined in the parent command.
127 | func (f Flag) IsParentFlag() bool {
128 | return f.parentFlag
129 | }
130 |
131 | // SetRequired sets the flag as required.
132 | func (f Flag) SetRequired() Flag {
133 | f.required = true
134 | f.usage += " (required)"
135 | return f
136 | }
137 |
138 | // IsRequired returns true if the flag is required.
139 | func (f Flag) IsRequired() bool {
140 | return f.required
141 | }
142 |
143 | // Shorthand returns the shorthand of the flag.
144 | func (f Flag) Shorthand() string {
145 | return f.shorthand
146 | }
147 |
148 | // Value returns the value of the flag.
149 | func (f Flag) Value() string {
150 | return f.value
151 | }
152 |
153 | // Usage returns the usage of the flag.
154 | func (f Flag) Usage() string {
155 | return f.usage
156 | }
157 |
--------------------------------------------------------------------------------
/cli/pkg/commands/images/build.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package images
18 |
19 | import (
20 | "fmt"
21 |
22 | "github.com/spf13/cobra"
23 |
24 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/common"
25 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/flags"
26 | )
27 |
28 | var buildCmd = common.NewCmd(
29 | fmt.Sprintf(
30 | "build -%s model-id -%s version [-%s model-registry-url] [-%s namespace] [-%s kubeconfig]",
31 | flags.FlagModelID.Shorthand(),
32 | flags.FlagVersionName.Shorthand(),
33 | flags.FlagModelRegistryURL.Shorthand(),
34 | flags.FlagNamespace.Shorthand(),
35 | flags.FlagKubeconfig.Shorthand(),
36 | ),
37 | "Build a synced edge model image",
38 | `Build a synced edge model image identified by the provided model image ID.
39 |
40 | This command allows you to build an edge model image from the provided model image and model version using
41 | the stored build parameters.
42 | `,
43 | cobra.NoArgs,
44 | []flags.Flag{
45 | flags.FlagModelID,
46 | flags.FlagVersionName,
47 | flags.FlagNamespace.SetParentFlag(),
48 | flags.FlagModelRegistryURL.SetParentFlag(),
49 | flags.FlagKubeconfig.SetParentFlag(),
50 | },
51 | common.SubCommandBuild,
52 | NewImagesModel,
53 | )
54 |
--------------------------------------------------------------------------------
/cli/pkg/commands/images/describe.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package images
18 |
19 | import (
20 | "fmt"
21 |
22 | "github.com/spf13/cobra"
23 |
24 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/common"
25 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/flags"
26 | )
27 |
28 | var describeCmd = common.NewCmd(
29 | fmt.Sprintf(
30 | "describe -%s model-id -%s version [-%s model-registry-url]",
31 | flags.FlagModelID.Shorthand(),
32 | flags.FlagVersionName.Shorthand(),
33 | flags.FlagModelRegistryURL.Shorthand(),
34 | ),
35 | "View details of an edge model image.",
36 | `View details of an edge model image.
37 |
38 | This command allows you to view details of a specific edge model image along with its parameters.
39 | `,
40 | cobra.NoArgs,
41 | []flags.Flag{
42 | flags.FlagModelID.SetRequired(),
43 | flags.FlagVersionName.SetRequired(),
44 | flags.FlagModelRegistryURL.SetParentFlag(),
45 | },
46 | common.SubCommandDescribe,
47 | NewImagesModel,
48 | )
49 |
--------------------------------------------------------------------------------
/cli/pkg/commands/images/msgs.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package images
18 |
19 | import "github.com/opendatahub-io/ai-edge/cli/pkg/edgeclient"
20 |
21 | type modelImagesMsg []edgeclient.ModelImage
22 | type modelImageSyncedMsg struct{}
23 | type modelImageBuiltMsg struct {
24 | pipelineRun edgeclient.PipelineRun
25 | }
26 | type modelImageDescribeMsg struct {
27 | selectedImage edgeclient.ModelImage
28 | }
29 |
--------------------------------------------------------------------------------
/cli/pkg/commands/images/update.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package images
18 |
19 | import (
20 | "fmt"
21 |
22 | "github.com/spf13/cobra"
23 |
24 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/common"
25 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/flags"
26 | )
27 |
28 | var updateCmd = common.NewCmd(
29 | fmt.Sprintf(
30 | "update -%s model-id -%s version [-%s model-registry-url] [-%s params-file]",
31 | flags.FlagModelID.Shorthand(),
32 | flags.FlagVersionName.Shorthand(),
33 | flags.FlagModelRegistryURL.Shorthand(),
34 | flags.FlagParams.Shorthand(),
35 | ),
36 | "Update image parameters for an edge model in the model registry.",
37 | `Update image parameters for an edge model in the model registry.
38 |
39 | This command allows you to update the build parameters stored in the model registry for a specific version of a model.
40 | `,
41 | cobra.NoArgs,
42 | []flags.Flag{
43 | flags.FlagModelID.SetRequired(),
44 | flags.FlagVersionName.SetRequired(),
45 | flags.FlagModelRegistryURL.SetParentFlag(),
46 | flags.FlagParams,
47 | },
48 | common.SubCommandUpdate,
49 | NewImagesModel,
50 | )
51 |
--------------------------------------------------------------------------------
/cli/pkg/commands/models/add.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package models
18 |
19 | import (
20 | "fmt"
21 |
22 | "github.com/spf13/cobra"
23 |
24 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/common"
25 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/flags"
26 | )
27 |
28 | var addCmd = common.NewCmd(
29 | fmt.Sprintf(
30 | "add -%s model-name -%s model-description [-%s version-name] [-%s model-registry-url] [-%s params-file]",
31 | flags.FlagModelName.Shorthand(),
32 | flags.FlagModelDescription.Shorthand(),
33 | flags.FlagVersionName.Shorthand(),
34 | flags.FlagModelRegistryURL.Shorthand(),
35 | flags.FlagParams.Shorthand(),
36 | ),
37 | "Add model information to the model registry including the model name, model description, model version "+
38 | "and build parameters.",
39 | `Add model image information to the model registry including the model name, model description, model version and `+
40 | `build parameters.
41 |
42 | If you don't provide a version name, the version name will be set to 'v1'.
43 |
44 | Build parameters are provided via a YAML file with the following format:
45 |
46 | params:
47 | - name:
48 | value:
49 | - name:
50 | value:
51 | ...
52 | `,
53 | cobra.NoArgs,
54 | []flags.Flag{
55 | flags.FlagModelName.SetRequired(),
56 | flags.FlagModelDescription.SetRequired(),
57 | flags.FlagVersionName,
58 | flags.FlagModelRegistryURL.SetParentFlag(),
59 | flags.FlagParams,
60 | },
61 | common.SubCommandAdd,
62 | NewTeaModel,
63 | )
64 |
--------------------------------------------------------------------------------
/cli/pkg/commands/models/models.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package models
18 |
19 | import (
20 | "fmt"
21 |
22 | "github.com/charmbracelet/bubbles/table"
23 | tea "github.com/charmbracelet/bubbletea"
24 | "github.com/charmbracelet/lipgloss"
25 | "github.com/spf13/cobra"
26 |
27 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/common"
28 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/flags"
29 | "github.com/opendatahub-io/ai-edge/cli/pkg/edgeclient"
30 | "github.com/opendatahub-io/ai-edge/cli/pkg/pipelines"
31 | )
32 |
33 | type registeredModelsMsg []edgeclient.Model
34 | type newModelAddedMsg struct{}
35 |
36 | type teaModel struct {
37 | args []string
38 | flags map[string]string
39 | edgeClient *edgeclient.Client
40 | registeredModelsList []edgeclient.Model
41 | err error
42 | subCommand common.SubCommand
43 | }
44 |
45 | // NewTeaModel creates a new bubbletea model for the models command
46 | func NewTeaModel(args []string, flgs map[string]string, subCommand common.SubCommand) tea.Model {
47 | return &teaModel{
48 | args: args,
49 | flags: flgs,
50 | edgeClient: edgeclient.NewClient(flgs[flags.FlagModelRegistryURL.String()]),
51 | subCommand: subCommand,
52 | }
53 | }
54 |
55 | // Init initializes the model according to the subcommand
56 | func (m teaModel) Init() tea.Cmd {
57 | switch m.subCommand {
58 | case common.SubCommandList:
59 | return m.listRegisteredModels()
60 | case common.SubCommandAdd:
61 | return m.addModel()
62 | }
63 | return nil
64 | }
65 |
66 | func (m teaModel) listRegisteredModels() func() tea.Msg {
67 | c := m.edgeClient
68 | return func() tea.Msg {
69 | models, err := c.GetModels()
70 | if err != nil {
71 | return common.ErrMsg{err}
72 | }
73 | return registeredModelsMsg(models)
74 | }
75 | }
76 |
77 | func (m teaModel) addModel() func() tea.Msg {
78 | c := m.edgeClient
79 | return func() tea.Msg {
80 | params, err := pipelines.ReadParams(m.flags[flags.FlagParams.String()])
81 | if err != nil {
82 | return common.ErrMsg{err}
83 | }
84 | _, err = c.AddNewModelWithImage(
85 | m.flags[flags.FlagModelName.String()],
86 | m.flags[flags.FlagModelDescription.String()],
87 | m.flags[flags.FlagVersionName.String()],
88 | "",
89 | params.ToSimpleMap(),
90 | )
91 | if err != nil {
92 | return common.ErrMsg{err}
93 | }
94 | return newModelAddedMsg{}
95 |
96 | }
97 | }
98 |
99 | // Update updates the model according to the message
100 | func (m teaModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
101 | switch msg := msg.(type) {
102 | case common.ErrMsg:
103 | m.err = msg
104 | return m, tea.Quit
105 |
106 | case registeredModelsMsg:
107 | m.registeredModelsList = msg
108 | return m, tea.Quit
109 | case newModelAddedMsg:
110 | return m, tea.Quit
111 | }
112 | return m, nil
113 | }
114 |
115 | // View returns the view corresponding to the subcommand
116 | func (m teaModel) View() string {
117 | if m.err != nil {
118 | return common.ErrorStyle.Render(fmt.Sprintf("Error: %s", m.err))
119 | }
120 | switch m.subCommand {
121 | case common.SubCommandList:
122 | return m.viewListModels()
123 | case common.SubCommandAdd:
124 | return common.MessageStyle.Render("\nAdding model information.......") + common.Success.Render("[OK]\n\n")
125 | }
126 | return ""
127 | }
128 |
129 | func (m teaModel) viewListModels() string {
130 | columns := []table.Column{
131 | {Title: "Id", Width: 4},
132 | {Title: "Name", Width: 20},
133 | {Title: "Description", Width: 60},
134 | }
135 |
136 | rows := make([]table.Row, 0)
137 |
138 | if m.registeredModelsList != nil {
139 | for _, model := range m.registeredModelsList {
140 | rows = append(
141 | rows, table.Row{
142 | model.ID,
143 | model.Name,
144 | model.Description,
145 | },
146 | )
147 | }
148 | }
149 |
150 | t := table.New(
151 | table.WithColumns(columns),
152 | table.WithRows(rows),
153 | table.WithHeight(len(rows)+1),
154 | )
155 |
156 | s := table.DefaultStyles()
157 | s.Cell.Foreground(lipgloss.Color("#FFF"))
158 | s.Header.
159 | BorderStyle(lipgloss.NormalBorder()).
160 | BorderForeground(lipgloss.Color("#04B575")).
161 | BorderBottom(true).
162 | Bold(true)
163 | t.SetStyles(s)
164 | return common.TableBaseStyle.Render(t.View()) + "\n"
165 | }
166 |
167 | // Cmd represents the models command
168 | var Cmd = common.NewCmd(
169 | "models",
170 | "Manage models",
171 | `Manage Open Data Hub models from the command line.
172 |
173 | This command will list all the registered models available in the Open Data Hub model registry.`,
174 | cobra.NoArgs,
175 | []flags.Flag{flags.FlagModelRegistryURL.SetParentFlag()},
176 | common.SubCommandList,
177 | NewTeaModel,
178 | )
179 |
180 | func init() {
181 | Cmd.AddCommand(addCmd)
182 | }
183 |
--------------------------------------------------------------------------------
/cli/pkg/commands/root.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright © 2024 Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package commands
18 |
19 | import (
20 | "os"
21 |
22 | "github.com/spf13/cobra"
23 |
24 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/flags"
25 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/images"
26 | "github.com/opendatahub-io/ai-edge/cli/pkg/commands/models"
27 | )
28 |
29 | // rootCmd represents the base command when called without any subcommands
30 | var rootCmd = &cobra.Command{
31 | Use: "odh",
32 | Short: "Manage Open Data Hub resources from the command line.",
33 | Long: `Manage Open Data Hub resources from the command line.
34 |
35 | This application is a tool to perform various operations on Open Data Hub.`,
36 | }
37 |
38 | var rootFlags = []flags.Flag{
39 | flags.FlagModelRegistryURL,
40 | flags.FlagKubeconfig,
41 | }
42 |
43 | // Execute adds all child commands to the root command and sets flags appropriately.
44 | // This is called by main.main(). It only needs to happen once to the rootCmd.
45 | func Execute() {
46 | err := rootCmd.Execute()
47 | if err != nil {
48 | os.Exit(1)
49 | }
50 | }
51 |
52 | func init() {
53 | cobra.OnInitialize(initConfig)
54 |
55 | for _, f := range rootFlags {
56 | rootCmd.PersistentFlags().StringP(f.String(), f.Shorthand(), f.Value(), f.Usage())
57 | }
58 | rootCmd.AddCommand(images.Cmd)
59 | rootCmd.AddCommand(models.Cmd)
60 | }
61 |
62 | // initConfig reads in config file and ENV variables if set.
63 | func initConfig() {
64 | }
65 |
--------------------------------------------------------------------------------
/cli/pkg/edgeclient/types.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package edgeclient
18 |
19 | type modelImageStatus int
20 |
21 | const (
22 | // ModelImageStatusUnknown - The status of the model image is unknown.
23 | ModelImageStatusUnknown modelImageStatus = iota
24 | // ModelImageStatusNeedsSync - The model image needs to be synced to the edge device.
25 | ModelImageStatusNeedsSync
26 | // ModelImageStatusSynced - The model image has been synced to the edge device.
27 | ModelImageStatusSynced
28 | // ModelImageStatusBuilding - The model image is being built.
29 | ModelImageStatusBuilding
30 | // ModelImageStatusLive - The model image is live on the container registry.
31 | ModelImageStatusLive
32 | // ModelImageStatusFailed - The model image build has failed.
33 | ModelImageStatusFailed
34 | )
35 |
36 | func (s modelImageStatus) String() string {
37 | return [...]string{"Unknown", "Needs Sync", "Synced", "Building", "Live", "Failed"}[s]
38 | }
39 |
40 | // Model - A registered model in the model registry.
41 | type Model struct {
42 | ID string
43 | Name string
44 | Description string
45 | }
46 |
47 | // ModelImage - A model to be registered in the model registry and is suitable for deployment in edge environments.
48 | type ModelImage struct {
49 | ModelID string
50 | Name string
51 | Description string
52 | Version string
53 | URI string
54 | BuildParams map[string]interface{}
55 | Status modelImageStatus
56 | }
57 |
58 | // PipelineRun - Is used to identify a PipelineRun resource
59 | type PipelineRun struct {
60 | Name string
61 | Namespace string
62 | }
63 |
--------------------------------------------------------------------------------
/cli/pkg/httptest/server.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package httptest
18 |
19 | import (
20 | "encoding/json"
21 | "log"
22 | "net/http"
23 | "net/http/httptest"
24 | )
25 |
26 | // Response represents a response from the mock server that can be set for a given path and method
27 | type Response struct {
28 | StatusCode int
29 | ContentType string
30 | Body any
31 | }
32 |
33 | // MockServer is a simple mock server that can be used to mock HTTP responses
34 | type MockServer struct {
35 | server *httptest.Server
36 | routes map[string]map[string]Response
37 | }
38 |
39 | // NewMockServer creates a new mock server
40 | func NewMockServer() *MockServer {
41 |
42 | return &MockServer{
43 | routes: map[string]map[string]Response{
44 | "GET": {},
45 | "POST": {},
46 | "PUT": {},
47 | "DELETE": {},
48 | "PATCH": {},
49 | },
50 | }
51 | }
52 |
53 | // WithGet sets a response for a GET request to the given path
54 | func (m *MockServer) WithGet(path string, response Response) {
55 | m.routes["GET"][path] = response
56 | }
57 |
58 | // WithPost sets a response for a POST request to the given path
59 | func (m *MockServer) WithPost(path string, response Response) {
60 | m.routes["POST"][path] = response
61 | }
62 |
63 | // Reset clears all the set responses
64 | func (m *MockServer) Reset() {
65 | m.routes = map[string]map[string]Response{
66 | "GET": {},
67 | "POST": {},
68 | "PUT": {},
69 | "DELETE": {},
70 | "PATCH": {},
71 | }
72 | }
73 |
74 | // Start starts the mock server
75 | func (m *MockServer) Start() {
76 | // Create a new httptest server using the handler
77 | m.server = httptest.NewServer(m.handler())
78 | }
79 |
80 | func (m *MockServer) handler() http.HandlerFunc {
81 | return func(w http.ResponseWriter, r *http.Request) {
82 | // Get the response for the request
83 | response, ok := m.routes[r.Method][r.URL.Path]
84 | if !ok {
85 | http.NotFound(w, r)
86 | return
87 | }
88 |
89 | // Set content type
90 | w.Header().Set("Content-Type", response.ContentType)
91 |
92 | // Set the status code
93 | w.WriteHeader(response.StatusCode)
94 |
95 | // Write the response
96 | if err := json.NewEncoder(w).Encode(response.Body); err != nil {
97 | log.Fatalf("Error encoding response: %v", err)
98 | }
99 | }
100 | }
101 |
102 | // Close closes the mock server
103 | func (m *MockServer) Close() {
104 | m.server.Close()
105 | }
106 |
107 | // GetURL returns the URL of the mock server
108 | func (m *MockServer) GetURL() string {
109 | return m.server.URL
110 | }
111 |
--------------------------------------------------------------------------------
/cli/pkg/modelregistry/errors.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package modelregistry
18 |
19 | import (
20 | "errors"
21 | "strings"
22 |
23 | "github.com/kubeflow/model-registry/pkg/openapi"
24 | )
25 |
26 | var (
27 | // ErrModelExists is returned when a model already exists
28 | ErrModelExists = errors.New("model already exists")
29 | // ErrModelNotFound is returned when a model is not found
30 | ErrModelNotFound = errors.New("no registered model found")
31 | // ErrFindModel is returned when a model is not found using FindRegisteredModel
32 | ErrFindModel = errors.New("no registered models found")
33 |
34 | // ErrVersionExists is returned when a version already exists for a given model
35 | ErrVersionExists = errors.New("version already exists")
36 | // ErrVersionNotFound is returned when a version is not found
37 | ErrVersionNotFound = errors.New("no model version found")
38 | // ErrFindModelVersion is returned when no model versions are found using FindModelVersion
39 | ErrFindModelVersion = errors.New("no model versions found")
40 |
41 | // ErrArtifactExists is returned when a model version artifact already exists for a given model version
42 | ErrArtifactExists = errors.New("artifact already exists")
43 | // ErrArtifactNotFound is returned when a model version artifact is not found
44 | ErrArtifactNotFound = errors.New("artifact not found")
45 | // ErrFindArtifact is returned when no artifacts are found using FindArtifact
46 | ErrFindArtifact = errors.New("no model artifacts found")
47 |
48 | // ErrAlreadyExists is a generic error to check the model registry returned errors when an entity (Model, Version,
49 | // Artifact) already exists
50 | ErrAlreadyExists = errors.New("already exists")
51 | )
52 |
53 | // isOpenAPIErrorOfKind checks if the error is of the given kind (targetErr). It checks if sourceErr is an
54 | // openapi.GenericOpenAPIError and if the error message contains the targetErr message.
55 | //
56 | // This is a workaround to handle the error until the model registry supports returning standard HTTP status codes for
57 | // errors with known status codes.
58 | func isOpenAPIErrorOfKind(sourceErr, targetErr error) bool {
59 | var e *openapi.GenericOpenAPIError
60 | if errors.As(sourceErr, &e) {
61 | if me, ok := e.Model().(openapi.Error); ok {
62 | if msg, ok := me.GetMessageOk(); ok {
63 | return strings.Contains(*msg, targetErr.Error())
64 | }
65 | }
66 | }
67 | return false
68 | }
69 |
--------------------------------------------------------------------------------
/cli/pkg/pipelines/params.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024. Open Data Hub Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package pipelines
18 |
19 | import (
20 | "log"
21 | "os"
22 |
23 | "gopkg.in/yaml.v2"
24 | )
25 |
26 | // Param represents a parameter for a pipeline run
27 | type Param struct {
28 | Name string `yaml:"name"`
29 | Value interface{} `yaml:"value"`
30 | }
31 |
32 | // RunParams represents the parameters list for a pipeline run as defined in a YAML file
33 | type RunParams struct {
34 | Params []Param `yaml:"params"`
35 | }
36 |
37 | // GetParamValue returns the value of a parameter by name
38 | func (p *RunParams) GetParamValue(name string) interface{} {
39 | for _, param := range p.Params {
40 | if param.Name == name {
41 | return param.Value
42 | }
43 | }
44 | return nil
45 | }
46 |
47 | // ToSimpleMap converts the RunParams struct to a simple map of string to interface{}
48 | func (p *RunParams) ToSimpleMap() map[string]interface{} {
49 | params := make(map[string]interface{})
50 | for _, param := range p.Params {
51 | params[param.Name] = param.Value
52 | }
53 | return params
54 | }
55 |
56 | // ReadParams reads a YAML file with pipeline run parameters and returns a RunParams struct
57 | func ReadParams(paramsFile string) (*RunParams, error) {
58 | // Read YAML file
59 | data, err := os.ReadFile(paramsFile)
60 | if err != nil {
61 | log.Fatal("error reading file: ", err)
62 | return nil, err
63 | }
64 |
65 | // Unmarshal YAML to struct
66 | var runParams RunParams
67 | err = yaml.Unmarshal([]byte(data), &runParams)
68 | if err != nil {
69 | log.Fatal("error unmarshalling yaml: ", err)
70 | return nil, err
71 | }
72 |
73 | return &runParams, nil
74 | }
75 |
--------------------------------------------------------------------------------
/docs/byo-mgmt-gitops.md:
--------------------------------------------------------------------------------
1 | # Bring Your Own Cluster Management
2 |
3 | If you don't have a centralized ACM managing your edge cluster(s), you can follow the steps in this document to deploy
4 | the inference service container image that was built using the MLOps Pipeline.
5 |
6 | ## Install OpenShift Pipelines / Argo CD
7 |
8 | Just like in the case with ACM, Argo CD will need to be installed on each target edge cluster that the GitOps-managed
9 | inference service container will be deployed on.
10 | As a user with the ability to install operators from the OperatorHub on the target edge cluster(s), find and install the
11 | OpenShift GitOps operator in the OpenShift Console, in Menu > Operators > OperatorHub.
12 |
13 | ## Ensure namespace in GitOps repo has correct label
14 |
15 | In your GitOps repo, where the manifests for your application are stored, make sure that the namespace definition has
16 | the correct label so that OpenShift GitOps can manage resources in it once it creates it.
17 |
18 | To allow the default configuration of OpenShift GitOps to deploy into the namespace, it will need to have the following
19 | label:
20 |
21 | ``` yaml
22 | argocd.argoproj.io/managed-by: openshift-gitops
23 | ```
24 |
25 | For more information on this label, see the [OpenShift GitOps docs][OpenShift GitOps docs: Deploying resources to a
26 | different namespace].
27 |
28 | ## Create Argo CD Application on edge clusters
29 |
30 | Once the operator has been installed from the earlier step, the `Application` CRD will be available on the edge
31 | cluster(s), allowing the creation of Application CRs in the GitOps namespace.
32 |
33 | On each edge cluster, create an Argo CD Application CR to point the Argo CD instance at the location in the GitOps repo
34 | where the manifests are located.
35 |
36 | Examples using `kustomize` can be found in the [byo-mgmt](../byo-mgmt) directory.
37 |
38 | ### Examples
39 |
40 | In the root of this repository, the following can be run to roll out the example inference container applications that
41 | exist in this project:
42 |
43 | #### Tensorflow housing example application
44 |
45 | ```
46 | oc apply -k byo-mgmt/registration/near-edge/overlays/tensorflow-housing-app/
47 | ```
48 |
49 | #### Bike rental example application
50 |
51 | ```
52 | oc apply -k byo-mgmt/registration/near-edge/overlays/bike-rental-app/
53 | ```
54 |
55 | ## Observability
56 |
57 | On each of the edge clusters, you can enable [monitoring for user-defined projects]:
58 | * `oc -n openshift-monitoring edit configmap cluster-monitoring-config`
59 | * Set variable `enableUserWorkload` to `true`.
60 |
61 | If you forward metrics from each edge cluster to a central location, you can find a list of example metrics from the
62 | OpenVINO and Seldon model servers from our examples in
63 | [metrics_list](../acm/odh-core/acm-observability/files/uwl_metrics_list.yaml) that you may wish to forward.
64 |
65 |
66 |
67 | [OpenShift GitOps docs: Deploying resources to a different namespace]: https://docs.openshift.com/gitops/1.11/argocd_instance/setting-up-argocd-instance.html#gitops-deploy-resources-different-namespaces_setting-up-argocd-instance
68 | [monitoring for user-defined projects]: https://access.redhat.com/documentation/en-us/openshift_container_platform/4.14/html/monitoring/enabling-monitoring-for-user-defined-projects
69 |
--------------------------------------------------------------------------------
/docs/glossary.md:
--------------------------------------------------------------------------------
1 | # AI Edge Terminology
2 |
3 | ## Core
4 |
5 | - The central OpenShift cluster containing the tools responsible for creating any artifacts required for the successful deployment of an Inference Application to Near Edge environments.
6 | - There are no resources or network constraints expected in the core cluster as it is expected that it fully supports all workflows required for creating and verifying Inference Application container images.
7 |
8 | ## Near Edge
9 |
10 | - This is a non-core distributed environment to run and serve AI/ML inference workloads in moderate yet constrained compute resources and network.
11 | - For the purpose of this repository, the near edge environment is represented by separate OpenShift clusters which may be disconnected from the core, the internet or both but may be managed from a core OpenShift cluster.
12 |
13 | ## Model Server
14 |
15 | - A Model Server is responsible for hosting models as a service "to return predictions based on data inputs that you provide through API calls."[^2]
16 | - For any workflows under opendatahub-io/ai-edge, we will be focusing on using the Model Servers and serving runtimes supported by Open Data Hub.
17 |
18 | ## Inference Application Container
19 |
20 | - OCI compliant container image[^3] with the models included during the build process.
21 | - Support for container images where the model and model serving runtimes are stored together.
22 |
23 | ## Model Registry
24 |
25 | - A centralized repository for the models and their metadata and managing the model lifecycle and versions. Currently our pipelines do not support any Model Registry; only S3 and Git can be used as directly referenced sources where models can be stored.
26 |
27 | ## OCI Distribution Registry
28 |
29 | - Open Container Initiative (OCI) compliant container registry where the Inference Application Container images and other artifacts are stored and versioned ready to be deployed on production or staging environments.
30 |
31 | ## GitOps
32 |
33 | - GitOps is an established configuration management pattern to store the configuration of your infrastructure and workflow automation in a Git repository for reproducibility and version control.
34 | - "GitOps uses Git repositories as a single source of truth to deliver infrastructure as code."[^1]
35 |
36 | [^1]: [What is GitOps](https://www.redhat.com/en/topics/devops/what-is-gitops)
37 | [^2]: [Red Hat OpenShift AI -> Serving models](https://access.redhat.com/documentation/en-us/red_hat_openshift_ai_self-managed/2.8/html/serving_models/about-model-serving_about-model-serving)
38 | [^3]: [Open Container Initiative](https://opencontainers.org/)
39 |
--------------------------------------------------------------------------------
/docs/images/edge-architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/opendatahub-io/ai-edge/62d03f0571e1448e556ed4ba2892c6e5f954ed1f/docs/images/edge-architecture.png
--------------------------------------------------------------------------------
/docs/images/poc-interaction-diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/opendatahub-io/ai-edge/62d03f0571e1448e556ed4ba2892c6e5f954ed1f/docs/images/poc-interaction-diagram.png
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | # Pipelines Examples
2 |
3 | This directory contains resources used to successfully run pipelines and tasks from the [manifests](../manifests) directory
4 | using the [Pipelines Setup](../manifests/README.md) guide.
5 |
6 | ## Container files
7 |
8 | These files are used for building images with models during the pipeline run.
9 |
10 | ## Models
11 |
12 | This directory contains the following trained examples.
13 |
14 | - [bike-rentals-auto-ml](models/bike-rentals-auto-ml/) is using MLFlow format and can run in [Seldon MLServer](https://github.com/SeldonIO/MLServer).
15 | - [tensorflow-housing](models/tensorflow-housing/) is using MLFlow format and wraps a TensorFlow model. It can run in [Seldon MLServer](https://github.com/SeldonIO/MLServer), but can also run in [OVMS](https://github.com/openvinotoolkit/model_server) by loading the [tf2model](models/tensorflow-housing/tf2model) artifacts.
16 | - [MNIST](models/onnx-mnist) is using ONNX format that can run on [OVMS](https://github.com/openvinotoolkit/model_server).
17 | - [Face Detection](models/tensorflow-facedetection) is using OpenVino IR propietary format and would run only on [OVMS](https://github.com/openvinotoolkit/model_server).
18 | - [Iris](models/lightgbm-iris) is using Booster format which can run on [Seldon MLServer](https://github.com/SeldonIO/MLServer).
19 | - [Mushrooms](models/lightgbm-mushrooms) is using Booster format which can run on [Seldon MLServer](https://github.com/SeldonIO/MLServer).
20 |
21 | ```plaintext
22 | bike-rentals-auto-ml/
23 | ├── conda.yaml
24 | ├── MLmodel
25 | ├── model.pkl
26 | ├── python_env.yaml
27 | └── requirements.txt
28 |
29 | tensorflow-housing/
30 | ├── conda.yaml
31 | ├── MLmodel
32 | ├── model.pkl
33 | ├── python_env.yaml
34 | ├── requirements.txt
35 | └── tf2model/
36 | ├── saved_model.pb
37 | └── ...
38 |
39 | onnx-mnist/
40 | ├── 1
41 | │ ├── mnist.onnx
42 | │ └── schema
43 | │ └── schema.json
44 | └── README.md
45 |
46 | tensorflow-facedetection/
47 | ├── 1
48 | │ ├── face-detection-retail-0004.bin
49 | │ └── face-detection-retail-0004.xml
50 | └── README.md
51 |
52 | lightgbm-iris/
53 | ├── iris-lightgbm.bst
54 | ├── model-settings.json
55 | ├── README.md
56 | └── settings.json
57 |
58 | lightgbm-mushrooms/
59 | ├── model-settings.json
60 | ├── mushroom-lightgbm.bst
61 | ├── README.md
62 | └── settings.json
63 |
64 | ```
65 |
66 | ## Tekton
67 | This directory contains example Tekton Pipeline Runs, templates for secrets and test data.
68 |
--------------------------------------------------------------------------------
/examples/containerfiles/Containerfile.ab-jq:
--------------------------------------------------------------------------------
1 | FROM registry.access.redhat.com/ubi9/ubi-minimal
2 |
3 | # `jq` is required to parse and format JSON
4 | # `httpd-tools` provides ab benchmarking tool
5 | RUN microdnf --nodocs --setopt=keepcache=0 install -y jq httpd-tools && \
6 | microdnf clean all
7 |
8 | LABEL org.opencontainers.image.licenses="MIT"
9 | LABEL org.opencontainers.image.title="ab-jq"
10 | LABEL org.opencontainers.image.source="https://github.com/opendatahub-io/ai-edge/blob/main/examples/containerfiles/Containerfile.ab-jq"
11 | LABEL org.opencontainers.image.url="https://quay.io/repository/rhoai-edge/ab-jq"
12 |
13 | CMD [ "bash" ]
14 |
--------------------------------------------------------------------------------
/examples/containerfiles/Containerfile.openvino.mlserver.mlflow:
--------------------------------------------------------------------------------
1 | FROM quay.io/opendatahub/openvino_model_server:stable
2 |
3 | ARG MODEL_NAME
4 | ARG MODEL_DIR="."
5 | ARG GRPC_PORT=9090
6 | ARG REST_PORT=8080
7 |
8 | ENV MODEL_NAME_ENV=$MODEL_NAME
9 | ENV GRPC_PORT_ENV=$GRPC_PORT
10 | ENV REST_PORT_ENV=$REST_PORT
11 |
12 | USER root
13 |
14 | RUN mkdir /models && chown ovms:ovms /models
15 |
16 | # CHANGE THIS LINE TO MATCH YOUR MODEL
17 | COPY --chown=ovms:ovms $MODEL_DIR /models/1
18 |
19 | # Not deleting this file causes a bug # https://docs.openshift.com/container-platform/4.13/openshift_images/create-images.html#use-uid_create-images
20 | RUN if echo "${MODEL_DIR}" | grep -q "tf2model"; then rm -f /models/1/fingerprint.pb; fi && chmod o+rwX /models/1 && chgrp -R 0 /models/1 && chmod -R g=u /models/1
21 |
22 | EXPOSE $REST_PORT $GRPC_PORT
23 |
24 | USER ovms
25 |
26 | # CHANGE THIS LINE TO MATCH YOUR MODEL
27 | ENTRYPOINT /ovms/bin/ovms --model_path /models --model_name $MODEL_NAME_ENV --port $GRPC_PORT_ENV --rest_port $REST_PORT_ENV --shape auto --metrics_enable
28 |
--------------------------------------------------------------------------------
/examples/containerfiles/Containerfile.seldonio.mlserver.mlflow:
--------------------------------------------------------------------------------
1 | FROM registry.access.redhat.com/ubi9/python-39:1-143 as env-creator
2 |
3 | ARG MODEL_DIR=.
4 |
5 | USER root
6 |
7 | # Install miniconda as a helper to create a portable python environment
8 | RUN mkdir -p ~/miniconda3 && \
9 | wget --show-progress=off https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh && \
10 | bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3 && \
11 | rm -rf ~/miniconda3/miniconda.sh
12 |
13 | # CHANGE THIS LINE TO MATCH YOUR MODEL
14 | COPY $MODEL_DIR /opt/app-root/src/model/
15 |
16 | # Download model dependencies and create a portable tarball
17 | # The tarball is placed inside the model directory.
18 | RUN . ~/miniconda3/bin/activate && \
19 | conda env create -n mlflow-env -f model/conda.yaml && \
20 | conda activate mlflow-env && \
21 | conda list && \
22 | conda deactivate && \
23 | conda activate && \
24 | conda install conda-pack && \
25 | conda-pack -n mlflow-env -o model/environment.tar.gz
26 |
27 | # Create the MLServer container. Use the slim image, since we are providing an environment tarball.
28 | #
29 | FROM docker.io/seldonio/mlserver:1.3.5-slim
30 |
31 | ARG MODEL_NAME
32 | ARG GRPC_PORT=9090
33 | ARG REST_PORT=8080
34 | ARG METRICS_PORT=8082
35 |
36 | USER root
37 |
38 | RUN mkdir /mnt/models/ && chown mlserver:mlserver /mnt/models/
39 |
40 | # Copy both the model together with its environment tarball.
41 | COPY --from=env-creator --chown=mlserver:mlserver /opt/app-root/src/model /mnt/models/
42 |
43 | RUN chmod o+rwX /mnt/models/
44 | # https://docs.openshift.com/container-platform/4.13/openshift_images/create-images.html#use-uid_create-images
45 | RUN chgrp -R 0 /mnt/models/ && chmod -R g=u /mnt/models/
46 |
47 | # Specify that the model is in MLFlow format, and some additional flags.
48 | ENV MLSERVER_MODEL_IMPLEMENTATION=mlserver_mlflow.MLflowRuntime MLSERVER_HTTP_PORT=$REST_PORT MLSERVER_GRPC_PORT=$GRPC_PORT MLSERVER_METRICS_PORT=$METRICS_PORT
49 | # CHANGE THIS LINE TO MATCH YOUR MODEL
50 | ENV MLSERVER_MODEL_URI=/mnt/models MLSERVER_MODEL_NAME=$MODEL_NAME
51 |
52 | EXPOSE $REST_PORT $GRPC_PORT $METRICS_PORT
53 |
54 | USER mlserver
55 |
--------------------------------------------------------------------------------
/examples/model-upload/Makefile:
--------------------------------------------------------------------------------
1 | # Used for uploading local model files to the cluster through a
2 | # pod to a PVC. Use the create target to create the pod and PVC
3 | # needed and upload the file
4 |
5 | SIZE=1G
6 | PVC=model-upload-pvc
7 |
8 | # Creates a PVC and a pod which mounts the PVC created
9 | # then uploads the file given
10 | # SIZE - size of the storage used in the PVC created
11 | # PVC - name of the PVC
12 | # MODEL_PATH - local path to model file to upload
13 | # NAME - name of the file when uploaded to the pod
14 | .PHONY: create
15 | create:
16 | # create pod and PVC and wait for pod to be ready
17 | @oc process -f local-model-pvc-template.yaml -p PVC=${PVC} -p SIZE=${SIZE} | oc create -f -
18 | @oc process -f local-model-to-pvc-pod-template.yaml -p PVC=${PVC} | oc create -f -
19 | @oc wait pod local-model-to-pvc-pod --for condition=Ready=True --timeout=60s
20 |
21 | # upload model to the pod to the PVC
22 | @oc exec local-model-to-pvc-pod -- mkdir workspace/${PVC}/model_dir/ -p
23 | @oc cp ${MODEL_PATH} local-model-to-pvc-pod:/workspace/${PVC}/model_dir-${NAME}/${NAME}
24 |
25 | # delete pod
26 | @oc delete pod local-model-to-pvc-pod
27 |
28 | @echo "PVC name: ${PVC}"
29 | @echo "Size: ${SIZE}"
30 | @echo "Model path in pod: /workspace/${PVC}/model_dir-${NAME}/${NAME}"
31 |
--------------------------------------------------------------------------------
/examples/model-upload/local-model-pvc-template.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: template.openshift.io/v1
2 | kind: Template
3 | metadata:
4 | name: local-model-pvc-template
5 | objects:
6 | - apiVersion: v1
7 | kind: PersistentVolumeClaim
8 | metadata:
9 | name: ${PVC}
10 | spec:
11 | accessModes:
12 | - ReadWriteOnce
13 | resources:
14 | requests:
15 | storage: ${SIZE}
16 | parameters:
17 | - name: PVC
18 | description: Name of PVC to be mounted to pod
19 | value: local-model
20 | - name: SIZE
21 | description: Size of the PVC
22 | value: 1Gi
23 |
--------------------------------------------------------------------------------
/examples/model-upload/local-model-to-pvc-pod-template.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: template.openshift.io/v1
2 | kind: Template
3 | metadata:
4 | name: local-model-to-pvc-pod-template
5 | objects:
6 | - apiVersion: v1
7 | kind: Pod
8 | metadata:
9 | name: local-model-to-pvc-pod
10 | spec:
11 | volumes:
12 | - name: ${PVC}
13 | persistentVolumeClaim:
14 | claimName: ${PVC}
15 | containers:
16 | - name: local-model-to-pvc-container
17 | image: ubi9
18 | stdin: true
19 | tty: true
20 | securityContext:
21 | allowPrivilegeEscalation: false
22 | volumeMounts:
23 | - mountPath: /workspace/${PVC}
24 | name: ${PVC}
25 | parameters:
26 | - name: PVC
27 | description: Name of PVC to be mounted to pod
28 | value: local-model
29 |
--------------------------------------------------------------------------------
/examples/models/bike-rentals-auto-ml/MLmodel:
--------------------------------------------------------------------------------
1 | artifact_path: outputs/mlflow-model
2 | flavors:
3 | python_function:
4 | env:
5 | conda: conda.yaml
6 | virtualenv: python_env.yaml
7 | loader_module: mlflow.sklearn
8 | model_path: model.pkl
9 | predict_fn: predict
10 | python_version: 3.8.16
11 | sklearn:
12 | code: null
13 | pickled_model: model.pkl
14 | serialization_format: pickle
15 | sklearn_version: 0.22.1
16 | mlflow_version: 2.2.2
17 | model_uuid: a888987a582840c0a55accdf82b9d0a1
18 | run_id: AutoML_80979059-4243-494b-b391-8b5f12c5518b_65
19 | signature:
20 | inputs: '[{"name": "day", "type": "integer"}, {"name": "mnth", "type": "integer"},
21 | {"name": "year", "type": "integer"}, {"name": "season", "type": "integer"}, {"name":
22 | "holiday", "type": "integer"}, {"name": "weekday", "type": "integer"}, {"name":
23 | "workingday", "type": "integer"}, {"name": "weathersit", "type": "integer"}, {"name":
24 | "temp", "type": "float"}, {"name": "hum", "type": "float"}, {"name": "windspeed",
25 | "type": "float"}]'
26 | outputs: '[{"type": "tensor", "tensor-spec": {"dtype": "int64", "shape": [-1]}}]'
27 | utc_time_created: '2023-05-28 16:48:31.765083'
28 |
--------------------------------------------------------------------------------
/examples/models/bike-rentals-auto-ml/model.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/opendatahub-io/ai-edge/62d03f0571e1448e556ed4ba2892c6e5f954ed1f/examples/models/bike-rentals-auto-ml/model.pkl
--------------------------------------------------------------------------------
/examples/models/bike-rentals-auto-ml/python_env.yaml:
--------------------------------------------------------------------------------
1 | python: 3.8.16
2 | build_dependencies:
3 | - pip==22.1.2
4 | - setuptools==49.6.0
5 | - wheel==0.38.1
6 | dependencies:
7 | - -r requirements.txt
8 |
--------------------------------------------------------------------------------
/examples/models/bike-rentals-auto-ml/requirements.txt:
--------------------------------------------------------------------------------
1 | adal==1.2.7
2 | applicationinsights==0.11.10
3 | arch==5.3.1
4 | argcomplete==2.1.2
5 | asttokens==2.2.1
6 | attrs==22.2.0
7 | azure-common==1.1.28
8 | azure-core==1.26.4
9 | azure-graphrbac==0.61.1
10 | azure-identity==1.12.0
11 | azure-mgmt-authorization==3.0.0
12 | azure-mgmt-containerregistry==10.1.0
13 | azure-mgmt-core==1.4.0
14 | azure-mgmt-keyvault==10.2.1
15 | azure-mgmt-resource==22.0.0
16 | azure-mgmt-storage==21.0.0
17 | azure-storage-blob==12.13.0
18 | azure-storage-queue==12.6.0
19 | azureml-automl-core==1.50.0
20 | azureml-automl-runtime==1.50.0
21 | azureml-core==1.50.0
22 | azureml-dataprep==4.10.6
23 | azureml-dataprep-native==38.0.0
24 | azureml-dataprep-rslex==2.17.5
25 | azureml-dataset-runtime==1.50.0
26 | azureml-defaults==1.50.0
27 | azureml-inference-server-http==0.8.3
28 | azureml-interpret==1.50.0
29 | azureml-mlflow==1.50.0
30 | azureml-pipeline-core==1.50.0.post1
31 | azureml-responsibleai==1.50.0
32 | azureml-telemetry==1.50.0
33 | azureml-train-automl-client==1.50.0
34 | azureml-train-automl-runtime==1.50.0
35 | azureml-train-core==1.50.0
36 | azureml-train-restclients-hyperdrive==1.50.0
37 | azureml-training-tabular==1.50.0
38 | backcall==0.2.0
39 | backports-tempfile==1.0
40 | backports-weakref==1.0.post1
41 | bcrypt==4.0.1
42 | bokeh==2.4.3
43 | boto==2.49.0
44 | cachetools==5.3.0
45 | cmdstanpy==0.9.5
46 | contextlib2==21.6.0
47 | contourpy==1.0.7
48 | cycler==0.11.0
49 | cython==0.29.17
50 | dask==2023.2.0
51 | databricks-cli==0.17.6
52 | dataclasses==0.6
53 | debugpy==1.6.7
54 | decorator==5.1.1
55 | dice-ml==0.9
56 | dill==0.3.6
57 | distributed==2023.2.0
58 | distro==1.8.0
59 | docker==5.0.3
60 | dotnetcore2==3.1.23
61 | econml==0.14.0
62 | entrypoints==0.4
63 | ephem==4.1.4
64 | erroranalysis==0.4.2
65 | executing==1.2.0
66 | fairlearn==0.8.0
67 | fire==0.5.0
68 | flask==2.2.3
69 | flask-cors==3.0.10
70 | flatbuffers==23.3.3
71 | fonttools==4.39.3
72 | fsspec==2023.4.0
73 | fusepy==3.0.1
74 | gensim==3.8.3
75 | gitdb==4.0.10
76 | gitpython==3.1.31
77 | google-api-core==2.11.0
78 | google-auth==2.17.3
79 | googleapis-common-protos==1.59.0
80 | gunicorn==20.1.0
81 | h5py==3.8.0
82 | heapdict==1.0.1
83 | humanfriendly==10.0
84 | importlib-metadata==6.3.0
85 | importlib-resources==5.12.0
86 | inference-schema==1.5.1
87 | interpret-community==0.28.0
88 | interpret-core==0.2.7
89 | ipykernel==6.8.0
90 | ipython==8.12.0
91 | isodate==0.6.1
92 | itsdangerous==2.1.2
93 | jedi==0.18.2
94 | jeepney==0.8.0
95 | jinja2==3.1.2
96 | jmespath==0.10.0
97 | jsonpickle==3.0.1
98 | jsonschema==4.17.3
99 | jupyter-client==7.4.9
100 | jupyter-core==5.3.0
101 | keras2onnx==1.6.0
102 | kiwisolver==1.4.4
103 | knack==0.10.1
104 | lightgbm==3.2.1
105 | locket==1.0.0
106 | lunarcalendar==0.0.9
107 | markupsafe==2.1.2
108 | matplotlib==3.7.1
109 | matplotlib-inline==0.1.6
110 | ml-wrappers==0.4.7
111 | mlflow==2.14.3
112 | mlflow-skinny==2.14.3
113 | mlserver==1.4.0
114 | mlserver-mlflow==1.4.0
115 | mltable==1.3.0
116 | msal==1.21.0
117 | msal-extensions==1.0.0
118 | msgpack==1.0.5
119 | msrest==0.7.1
120 | msrestazure==0.6.4
121 | ndg-httpsclient==0.5.1
122 | nest-asyncio==1.5.6
123 | networkx==2.5
124 | numba==0.55.2
125 | oauthlib==3.2.2
126 | onnx==1.13.0
127 | onnxconverter-common==1.6.0
128 | onnxmltools==1.4.1
129 | onnxruntime==1.11.1
130 | opencensus==0.11.2
131 | opencensus-context==0.1.3
132 | opencensus-ext-azure==1.1.9
133 | packaging==23.0
134 | paramiko==3.1.0
135 | parso==0.8.3
136 | partd==1.4.0
137 | pathspec==0.11.1
138 | patsy==0.5.3
139 | pexpect==4.8.0
140 | pickleshare==0.7.5
141 | pillow==9.5.0
142 | pkginfo==1.9.6
143 | pkgutil-resolve-name==1.3.10
144 | pmdarima==1.7.1
145 | portalocker==2.7.0
146 | prompt-toolkit==3.0.38
147 | property-cached==1.6.4
148 | protobuf==3.20.3
149 | ptyprocess==0.7.0
150 | pure-eval==0.2.2
151 | pyarrow==9.0.0
152 | pyasn1==0.4.8
153 | pyasn1-modules==0.2.8
154 | pydantic==1.10.7
155 | pygments==2.15.0
156 | pyjwt==2.6.0
157 | pynacl==1.5.0
158 | pyparsing==3.0.9
159 | pyrsistent==0.19.3
160 | pytz==2022.7.1
161 | pyyaml==6.0
162 | pyzmq==25.0.2
163 | raiutils==0.4.0
164 | requests-oauthlib==1.3.1
165 | responsibleai==0.26.0
166 | rsa==4.9
167 | s3transfer==0.3.7
168 | scipy==1.5.3
169 | secretstorage==3.3.3
170 | semver==2.13.0
171 | setuptools==49.6.0
172 | skl2onnx==1.4.9
173 | sklearn-pandas==1.7.0
174 | slicer==0.0.7
175 | smart-open==1.9.0
176 | smmap==5.0.0
177 | sortedcontainers==2.4.0
178 | sparse==0.14.0
179 | sqlparse==0.4.3
180 | stack-data==0.6.2
181 | statsmodels==0.11.1
182 | tabulate==0.9.0
183 | tblib==1.7.0
184 | termcolor==2.2.0
185 | toolz==0.12.0
186 | tornado==6.2
187 | tqdm==4.65.0
188 | traitlets==5.9.0
189 | urllib3==1.25.11
190 | wcwidth==0.2.6
191 | websocket-client==1.5.1
192 | werkzeug==2.2.3
193 | wrapt==1.12.1
194 | zict==2.2.0
195 | zipp==3.15.0
196 |
--------------------------------------------------------------------------------
/examples/models/bike-rentals-auto-ml/test_data_generator.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import argparse
3 |
4 | def csv_to_json(input_csv, output_json):
5 | df = pd.read_csv(input_csv)
6 |
7 | # Rename the year column
8 | df = df.rename(columns={"yr": "year"})
9 |
10 | # Extract 'day' from the 'date' column
11 | df['day'] = df['dteday'].str.split('-').str[-1].astype(int)
12 |
13 | # Drop some of the columns which aren't needed
14 | columns_to_drop = ["instant", "dteday", "atemp", "casual", "registered", "cnt"]
15 | df = df.drop(columns=columns_to_drop, errors='ignore')
16 |
17 | # Randomly sample 20 rows from the DataFrame
18 | df_sample = df.sample(n=20)
19 |
20 | data = {
21 | "dataframe_split": {
22 | "columns": list(df_sample.columns),
23 | "data": df_sample.values.tolist()
24 | }
25 | }
26 |
27 | with open(output_json, 'w') as json_file:
28 | json_file.write(str(data))
29 |
30 | if __name__ == "__main__":
31 | parser = argparse.ArgumentParser(description="Convert CSV to JSON format.")
32 | parser.add_argument("input_csv", help="Path to the input CSV file.")
33 | parser.add_argument("output_json", help="Path to the output JSON file.")
34 |
35 | args = parser.parse_args()
36 |
37 | csv_to_json(args.input_csv, args.output_json)
38 |
--------------------------------------------------------------------------------
/examples/models/lightgbm-iris/README.md:
--------------------------------------------------------------------------------
1 | # Iris model
2 | ## Description
3 | This model classifies Iris flowers in order to distinguish between the three Irish species (Iris setosa, Iris virginica and Iris versicolor).
4 |
5 | ## Dataset
6 |
7 | This model was trained using the well known [Iris](https://archive.ics.uci.edu/dataset/53/iris) dataset.
8 |
9 | ## Test
10 | ### Run Seldon MLServer with IRIS model
11 | Execute the following command from the [examples](../../) folder:
12 | ```
13 | podman run -d --rm -v ${PWD}/models:/opt/models:Z -p 8080:8080 -p 8081:8081 -p 8082:8082 -ti seldonio/mlserver:1.3.5-lightgbm mlserver start /opt/models/lightgbm-iris
14 | ```
15 |
16 | ### Test call
17 | ```
18 | curl -s -d '{"inputs": [{"name": "predict-prob", "shape": [1, 4], "datatype": "FP32", "data": [[4.9, 3.0, 1.4, 0.2]]}]}' -H 'Content-Type: application/json' -X POST http://localhost:8080/v2/models/iris/versions/v0.1.0/infer
19 | ```
20 |
21 | ## Credits
22 | - https://www.kaggle.com/code/ajsherlock/iris-classification-using-lightgbm
23 | - https://github.com/SeldonIO/MLServer/blob/master/docs/examples/lightgbm/README.md
24 |
--------------------------------------------------------------------------------
/examples/models/lightgbm-iris/model-settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "iris",
3 | "implementation": "mlserver_lightgbm.LightGBMModel",
4 | "parameters": {
5 | "uri": "./iris-lightgbm.bst",
6 | "version": "v0.1.0"
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/examples/models/lightgbm-iris/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "debug": "true"
3 | }
4 |
--------------------------------------------------------------------------------
/examples/models/lightgbm-mushrooms/README.md:
--------------------------------------------------------------------------------
1 | # Mushroom model
2 | ## Description
3 | This model classifies mushrooms based on the input image.
4 |
5 | ## Dataset
6 |
7 | This model was trained using the [Mushroom Classification](https://www.kaggle.com/datasets/uciml/mushroom-classification) dataset.
8 |
9 | ## Test
10 | ### Run Seldon MLServer with Mushrooms model
11 | Execute the following command from the [examples](../../) folder:
12 | ```
13 | podman run -d --rm -v ${PWD}/models:/opt/models:Z -p 8080:8080 -p 8081:8081 -p 8082:8082 -ti seldonio/mlserver:1.3.5-lightgbm mlserver start /opt/models/lightgbm-mushrooms
14 | ```
15 | ### Test call
16 |
17 | ```
18 | curl -s -d '{"inputs": [{"name": "predict", "shape": [1, 126], "datatype": "FP32", "data": [[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0]]}]}' -H 'Content-Type: application/json' -X POST http://localhost:8080/v2/models/mushroom-lgb/versions/v0.1.0/infer
19 | ```
20 |
21 | ## Credits
22 | - https://www.kaggle.com/code/stpeteishii/mushroom-predict-and-visualize-importance
23 |
--------------------------------------------------------------------------------
/examples/models/lightgbm-mushrooms/model-settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "mushroom-lgb",
3 | "implementation": "mlserver_lightgbm.LightGBMModel",
4 | "parameters": {
5 | "uri": "./mushroom-lightgbm.bst",
6 | "version": "v0.1.0"
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/examples/models/lightgbm-mushrooms/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "debug": "true"
3 | }
4 |
--------------------------------------------------------------------------------
/examples/models/onnx-mnist/1/mnist.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/opendatahub-io/ai-edge/62d03f0571e1448e556ed4ba2892c6e5f954ed1f/examples/models/onnx-mnist/1/mnist.onnx
--------------------------------------------------------------------------------
/examples/models/onnx-mnist/1/schema/schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "inputs": [
3 | {
4 | "name": "Input3",
5 | "datatype": "FP32",
6 | "shape": [1, 1, 28, 28]
7 | }
8 | ],
9 | "outputs": [
10 | {
11 | "name": "Plus214_Output_0",
12 | "datatype": "FP32",
13 | "shape": [1, 10]
14 | }
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/examples/models/tensorflow-facedetection/1/face-detection-retail-0004.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/opendatahub-io/ai-edge/62d03f0571e1448e556ed4ba2892c6e5f954ed1f/examples/models/tensorflow-facedetection/1/face-detection-retail-0004.bin
--------------------------------------------------------------------------------
/examples/models/tensorflow-facedetection/README.md:
--------------------------------------------------------------------------------
1 | # Tensorflow Face Detection
2 | ## Description
3 | Face detector based on SqueezeNet light (half-channels) as a backbone with a single SSD for indoor/outdoor scenes shot by a front-facing camera. The backbone consists of fire modules to reduce the number of computations. The single SSD head from 1/16 scale feature map has nine clustered prior boxes.
4 |
5 | ## Test
6 | ### Run OVMS with the model inside
7 | Execute the following command from the [examples](../../) folder:
8 | ```
9 | podman run -d --rm -v ${PWD}/models:/model:Z -p 9000:9000 quay.io/opendatahub/openvino_model_server:stable --model_name face-detection --model_path /model/tensorflow-facedetection --port 9000 --shape auto
10 | ```
11 |
12 | ### Test call
13 |
14 | ```
15 | git clone https://github.com/openvinotoolkit/model_server.git
16 | cd model_server/demos/face_detection/python
17 | # Patch the requirements file, because tensorflow 2.11.0 is not working
18 | sed -i 's/tensorflow-serving-api==2.11.0/tensorflow-serving-api==2.13.0/' ../../common/python/requirements.txt
19 | python -m venv .venv
20 | source .venv/bin/activate
21 | pip install -r ../../common/python/requirements.txt
22 | # In case of errors remove the tensorflow-serving-api version from the ../../common/python/requirements.txt
23 | mkdir results
24 | python face_detection.py --batch_size 1 --width 300 --height 300 --grpc_port 9000
25 | # Open the results folder
26 | ```
27 | ## Credits
28 | - https://docs.openvino.ai/archive/2022.1/omz_models_model_face_detection_retail_0004.html
29 | - https://docs.openvino.ai/2023.2/ovms_demo_face_detection.html
30 |
--------------------------------------------------------------------------------
/examples/models/tensorflow-housing/MLmodel:
--------------------------------------------------------------------------------
1 | artifact_path: model
2 | flavors:
3 | python_function:
4 | env:
5 | conda: conda.yaml
6 | virtualenv: python_env.yaml
7 | loader_module: mlflow.tensorflow
8 | python_version: 3.8.17
9 | tensorflow:
10 | code: null
11 | model_type: tf2-module
12 | saved_model_dir: tf2model
13 | mlflow_version: 2.6.0
14 | model_uuid: 684e39d2f25047f087d29574415f8369
15 | run_id: 57518d90481545eb901fceac6da93e38
16 | signature:
17 | inputs: '[{"type": "tensor", "tensor-spec": {"dtype": "float32", "shape": [-1, 8]}}]'
18 | outputs: '[{"type": "tensor", "tensor-spec": {"dtype": "float32", "shape": [-1]}}]'
19 | params: null
20 | utc_time_created: '2023-08-23 11:35:11.000210'
21 |
--------------------------------------------------------------------------------
/examples/models/tensorflow-housing/README.md:
--------------------------------------------------------------------------------
1 | # How was this model generated?
2 |
3 | ## Data source
4 |
5 | The model uses
6 | [scikit-learn California Housing Dataset](https://scikit-learn.org/stable/datasets/real_world.html#california-housing-dataset),
7 | see [this course notebook](https://inria.github.io/scikit-learn-mooc/python_scripts/datasets_california_housing.html)
8 | for more information about the data.
9 |
10 | ## Generating model
11 |
12 | A command similar to the following was used to generate the model:
13 | ```
14 | mlflow run 'https://github.com/mlflow/mlflow#examples/tensorflow'
15 | ```
16 | It used the training script
17 | https://github.com/mlflow/mlflow/blob/master/examples/tensorflow/train.py
18 | present in that repo.
19 |
20 | Running it on a checkout of the repository also works:
21 | ```
22 | mlflow run mlflow/examples/tensorflow
23 | ```
24 |
25 | The result gets stored in a `mlruns/0//artifacts/model/` directory,
26 | and was copied over to this repository.
27 |
--------------------------------------------------------------------------------
/examples/models/tensorflow-housing/conda.yaml:
--------------------------------------------------------------------------------
1 | channels:
2 | - conda-forge
3 | dependencies:
4 | - python=3.8.17
5 | - pip<=23.2.1
6 | - pip:
7 | - mlflow==2.6.0
8 | - importlib-metadata==6.8.0
9 | - numpy==1.24.3
10 | - packaging==23.1
11 | - pandas==2.0.3
12 | - pillow==10.0.0
13 | - protobuf==4.24.1
14 | - requests==2.31.0
15 | - scipy==1.10.1
16 | - tensorflow==2.13.0
17 | name: mlflow-env
18 |
--------------------------------------------------------------------------------
/examples/models/tensorflow-housing/convert_csv_to_json.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import argparse
3 |
4 | def csv_to_json(input_csv, output_json):
5 | df = pd.read_csv(input_csv)
6 |
7 | # The model expectes 8 float32 inputs, so we just need to drop one
8 | # of them
9 | columns_to_drop = ["population"]
10 | df = df.drop(columns=columns_to_drop)
11 |
12 | # Randomly sample 20 rows from the DataFrame
13 | df_sample = df.sample(n=20)
14 |
15 | data = {
16 | "data": df_sample.values.tolist()
17 | }
18 |
19 | with open(output_json, 'w') as json_file:
20 | json_file.write(str(data))
21 |
22 | if __name__ == "__main__":
23 | parser = argparse.ArgumentParser(description="Convert CSV to JSON format.")
24 | parser.add_argument("input_csv", help="Path to the input CSV file.")
25 | parser.add_argument("output_json", help="Path to the output JSON file.")
26 |
27 | args = parser.parse_args()
28 |
29 | csv_to_json(args.input_csv, args.output_json)
30 |
--------------------------------------------------------------------------------
/examples/models/tensorflow-housing/python_env.yaml:
--------------------------------------------------------------------------------
1 | python: 3.8.17
2 | build_dependencies:
3 | - pip==23.2.1
4 | - setuptools==68.0.0
5 | - wheel==0.41.1
6 | dependencies:
7 | - -r requirements.txt
8 |
--------------------------------------------------------------------------------
/examples/models/tensorflow-housing/requirements.txt:
--------------------------------------------------------------------------------
1 | mlflow==2.6.0
2 | importlib-metadata==6.8.0
3 | numpy==1.24.3
4 | packaging==23.1
5 | pandas==2.0.3
6 | pillow==10.0.0
7 | protobuf==4.24.1
8 | requests==2.31.0
9 | scipy==1.10.1
10 | tensorflow==2.13.0
11 |
--------------------------------------------------------------------------------
/examples/models/tensorflow-housing/tf2model/fingerprint.pb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/opendatahub-io/ai-edge/62d03f0571e1448e556ed4ba2892c6e5f954ed1f/examples/models/tensorflow-housing/tf2model/fingerprint.pb
--------------------------------------------------------------------------------
/examples/models/tensorflow-housing/tf2model/saved_model.pb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/opendatahub-io/ai-edge/62d03f0571e1448e556ed4ba2892c6e5f954ed1f/examples/models/tensorflow-housing/tf2model/saved_model.pb
--------------------------------------------------------------------------------
/examples/models/tensorflow-housing/tf2model/variables/variables.data-00000-of-00001:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/opendatahub-io/ai-edge/62d03f0571e1448e556ed4ba2892c6e5f954ed1f/examples/models/tensorflow-housing/tf2model/variables/variables.data-00000-of-00001
--------------------------------------------------------------------------------
/examples/models/tensorflow-housing/tf2model/variables/variables.index:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/opendatahub-io/ai-edge/62d03f0571e1448e556ed4ba2892c6e5f954ed1f/examples/models/tensorflow-housing/tf2model/variables/variables.index
--------------------------------------------------------------------------------
/examples/tekton/aiedge-e2e/example-pipelineruns/git-fetch.tensorflow-housing.pipelinerun.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: PipelineRun
3 | metadata:
4 | labels:
5 | tekton.dev/pipeline: git-fetch
6 | model-name: tensorflow-housing
7 | generateName: git-fetch-tensorflow-housing-
8 | spec:
9 | params:
10 | - name: model-name
11 | value: tensorflow-housing
12 | - name: model-version
13 | value: "1"
14 | - name: git-containerfile-repo
15 | value: https://github.com/opendatahub-io/ai-edge.git
16 | - name: git-containerfile-revision
17 | value: "main"
18 | - name: containerfile-relative-path
19 | value: examples/containerfiles/Containerfile.openvino.mlserver.mlflow
20 | - name: git-model-repo
21 | value: https://github.com/opendatahub-io/ai-edge.git
22 | - name: model-relative-path
23 | value: examples/models
24 | - name: model-dir
25 | value: "tf2model"
26 | - name: git-model-revision
27 | value: "main"
28 | - name: test-endpoint
29 | value: "v1/models/tensorflow-housing/versions/1:predict"
30 | - name: candidate-image-tag-reference
31 | value: image-registry.openshift-image-registry.svc:5000/$(context.pipelineRun.namespace)/$(params.model-name):$(params.model-version)-candidate
32 | - name: target-image-tag-references
33 | value:
34 | - quay.io/rhoai-edge/$(params.model-name):$(params.model-version)-$(context.pipelineRun.uid)
35 | - quay.io/rhoai-edge/$(params.model-name):$(params.model-version)
36 | - quay.io/rhoai-edge/$(params.model-name):latest
37 | - name: upon-end
38 | value: "delete"
39 | pipelineRef:
40 | name: git-fetch
41 | serviceAccountName: pipeline
42 | timeout: 1h0m0s
43 | workspaces:
44 | - name: build-workspace-pv
45 | # Have the tekton controller allocate a PVC for each pipeline run that persists for the life of each PipelineRun object.
46 | # NOTE: This PVC will be deleted by the Tekton controller when the PipelineRun is deleted
47 | volumeClaimTemplate:
48 | spec:
49 | accessModes:
50 | - ReadWriteOnce
51 | resources:
52 | requests:
53 | storage: 1Gi
54 | - configMap:
55 | name: tensorflow-housing-test-data
56 | name: test-data
57 |
--------------------------------------------------------------------------------
/examples/tekton/aiedge-e2e/example-pipelineruns/s3-fetch.bike-rentals.pipelinerun.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: PipelineRun
3 | metadata:
4 | labels:
5 | tekton.dev/pipeline: s3-fetch
6 | model-name: bike-rentals-auto-ml
7 | generateName: s3-fetch-bike-rentals-auto-ml-
8 | spec:
9 | params:
10 | - name: model-name
11 | value: bike-rentals-auto-ml
12 | - name: model-version
13 | value: "1"
14 | - name: s3-bucket-name
15 | value: rhoai-edge-models
16 | - name: git-containerfile-repo
17 | value: https://github.com/opendatahub-io/ai-edge.git
18 | - name: git-containerfile-revision
19 | value: "main"
20 | - name: containerfile-relative-path
21 | value: examples/containerfiles/Containerfile.seldonio.mlserver.mlflow
22 | - name: git-model-repo
23 | value: https://github.com/opendatahub-io/ai-edge.git
24 | - name: model-relative-path
25 | value: ""
26 | - name: git-model-revision
27 | value: "main"
28 | - name: test-endpoint
29 | value: "invocations"
30 | - name: candidate-image-tag-reference
31 | value: image-registry.openshift-image-registry.svc:5000/$(context.pipelineRun.namespace)/$(params.model-name):$(params.model-version)-candidate
32 | - name: target-image-tag-references
33 | value:
34 | - quay.io/rhoai-edge/$(params.model-name):$(params.model-version)-$(context.pipelineRun.uid)
35 | - quay.io/rhoai-edge/$(params.model-name):$(params.model-version)
36 | - quay.io/rhoai-edge/$(params.model-name):latest
37 | - name: upon-end
38 | value: "delete"
39 | pipelineRef:
40 | name: s3-fetch
41 | serviceAccountName: pipeline
42 | timeout: 1h0m0s
43 | workspaces:
44 | - name: build-workspace-pv
45 | # Have the tekton controller allocate a PVC for each pipeline run that persists for the life of each PipelineRun object.
46 | # NOTE: This PVC will be deleted by the Tekton controller when the PipelineRun is deleted
47 | volumeClaimTemplate:
48 | spec:
49 | accessModes:
50 | - ReadWriteOnce
51 | resources:
52 | requests:
53 | storage: 1Gi
54 | - name: s3-secret
55 | secret:
56 | secretName: credentials-s3
57 | - configMap:
58 | name: bike-rentals-test-data
59 | name: test-data
60 |
--------------------------------------------------------------------------------
/examples/tekton/aiedge-e2e/templates/credentials-git.secret.yaml.template:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: credentials-git
5 | type: Opaque
6 | stringData:
7 | # This .git-credentials field is used to specify credentials when
8 | # interacting with a Git server (clone/fetch/push). It will be
9 | # placed as a file on disk so that the Git CLI can use it, so change
10 | # it to appropriate details for your Git server.
11 | .git-credentials: "https://{username}:{github_pat_1234567890ABCDAPI_TOKEN}@github.com"
12 | .gitconfig: |
13 | [credential]
14 | helper = store
15 |
--------------------------------------------------------------------------------
/examples/tekton/aiedge-e2e/templates/credentials-image-registry.secret.yaml.template:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: credentials-image-registry
5 | labels:
6 | app: rhoai-edge-pipelines
7 | app.kubernetes.io/part-of: rhoai-edge-pipelines
8 | annotations:
9 | # Required for configuring basic-auth to image registries compatible with docker v2 API
10 | # https://tekton.dev/docs/pipelines/auth/#configuring-basic-auth-authentication-for-docker
11 | tekton.dev/docker-0: https://quay.io
12 | type: kubernetes.io/basic-auth
13 | stringData:
14 | username: "{{ IMAGE_REGISTRY_USERNAME }}"
15 | password: "{{ IMAGE_REGISTRY_PASSWORD }}"
16 |
17 |
--------------------------------------------------------------------------------
/examples/tekton/aiedge-e2e/templates/credentials-s3.secret.yaml.template:
--------------------------------------------------------------------------------
1 | kind: Secret
2 | apiVersion: v1
3 | metadata:
4 | name: credentials-s3
5 | labels:
6 | app: rhoai-edge-pipelines
7 | app.kubernetes.io/part-of: rhoai-edge-pipelines
8 |
9 | stringData:
10 | # The endpoint_url property is an optional one when using AWS S3, you can ignore it totally in case of AWS S3.
11 | # Generally it takes priority over the region when specified.
12 | # If the endpoint_url is not specified it would default to an AWS endpoint based on region specified.
13 | # Set the bucket region correctly.
14 | s3-storage-config: |+
15 | { "type": "s3", "access_key_id": "{{ AWS_ACCESS_KEY_ID }}", "secret_access_key": "{{ AWS_SECRET_ACCESS_KEY }}", "endpoint_url": "{{ S3_ENDPOINT }}", "region": "{{ S3_REGION }}" }
16 |
--------------------------------------------------------------------------------
/examples/tekton/aiedge-e2e/templates/self-signed-cert.configmap.yaml.template:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: self-signed-cert
5 | data:
6 | ca-bundle.crt: "{{SELF_SIGNED_CERT}}"
7 |
--------------------------------------------------------------------------------
/examples/tekton/aiedge-e2e/test-data/bike-rentals-test-data-cm.yaml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: bike-rentals-test-data
5 | labels:
6 | model-name: bike-rentals-auto-ml
7 |
8 | data:
9 | data.json: |+
10 | {"dataframe_split": {"columns":[ "day", "mnth", "year", "season","holiday", "weekday", "workingday", "weathersit", "temp", "hum", "windspeed" ], "data":[[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ]]}}
11 |
12 | output.json: '{"predictions": [331]}'
13 |
--------------------------------------------------------------------------------
/examples/tekton/aiedge-e2e/test-data/kustomization.yaml:
--------------------------------------------------------------------------------
1 | kind: Kustomization
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | resources:
4 | - bike-rentals-test-data-cm.yaml
5 | - tensorflow-housing-test-data-cm.yaml
6 |
--------------------------------------------------------------------------------
/examples/tekton/aiedge-e2e/test-data/tensorflow-housing-test-data-cm.yaml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: tensorflow-housing-test-data
5 | labels:
6 | model-name: tensorflow-housing
7 |
8 | data:
9 | data.json: |+
10 | { "instances": [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]] }
11 |
12 | output.json: '{"predictions": [-28.7639923]}'
13 |
--------------------------------------------------------------------------------
/examples/tekton/gitops-update-pipeline/example-pipelineruns/gitops-update-pipelinerun-bike-rentals.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: PipelineRun
3 | metadata:
4 | generateName: gitops-update-pipeline-bike-rentals-
5 | labels:
6 | tekton.dev/pipeline: gitops-update-pipeline
7 | spec:
8 | params:
9 | - name: image-registry-repo
10 | value: quay.io/rhoai-edge/bike-rentals-auto-ml
11 | - name: image-digest
12 | value: sha256:c978e95d366ecf057be47dbee958b69f45720d7e71ba1a91c28f0e6b3572c670
13 | - name: gitServer
14 | value: https://github.com
15 | - name: gitApiServer
16 | value: api.github.com
17 | - name: gitOrgName
18 | value: username
19 | - name: gitRepoName
20 | value: ai-edge-gitops
21 | - name: gitRepoBranchBase
22 | value: main
23 | - name: gitTokenSecretName
24 | value: edge-user-1
25 | - name: yq-script
26 | value: |
27 | yq eval -i "(.images[] | select(.name == \"edge-model-template-image\").newName) = \"${IMAGE_NAME}\"" \
28 | acm/odh-edge/apps/bike-rental-app/kustomization.yaml
29 | yq eval -i "(.images[] | select(.name == \"edge-model-template-image\").digest) = \"${IMAGE_DIGEST}\"" \
30 | acm/odh-edge/apps/bike-rental-app/kustomization.yaml
31 | pipelineRef:
32 | name: gitops-update-pipeline
33 | serviceAccountName: pipeline
34 | timeout: 1h0m0s
35 | workspaces:
36 | - name: git-workspace
37 | volumeClaimTemplate:
38 | spec:
39 | accessModes:
40 | - ReadWriteOnce
41 | resources:
42 | requests:
43 | storage: 1Gi
44 | - name: git-basic-auth
45 | secret:
46 | secretName: edge-user-1
47 |
--------------------------------------------------------------------------------
/examples/tekton/gitops-update-pipeline/example-pipelineruns/gitops-update-pipelinerun-json.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: PipelineRun
3 | metadata:
4 | generateName: gitops-update-pipeline-test-json-
5 | labels:
6 | tekton.dev/pipeline: gitops-update-pipeline
7 | spec:
8 | params:
9 | - name: image-registry-repo
10 | value: quay.io/rhoai-edge/tensorflow-housing
11 | - name: image-digest
12 | value: sha256:de11e6ee5519dfec8d9e388dd003cbdbdc4f4a00e292bf5d6d1293efa29729da
13 | - name: gitServer
14 | value: https://github.com
15 | - name: gitApiServer
16 | value: api.github.com
17 | - name: gitOrgName
18 | value: opendatahub-io
19 | - name: gitRepoName
20 | value: ai-edge
21 | - name: gitRepoBranchBase
22 | value: main
23 | - name: gitTokenSecretName
24 | value: edge-user-1
25 | - name: yq-script
26 | value: |
27 | yq eval -i -o=json -I=4 "(.spec.template.spec.containers[] | select(.name == \"model\").image) = \"${IMAGE_NAME}@${IMAGE_DIGEST}\"" \
28 | examples/tekton/gitops-update-pipeline/test/json/my-deployment.json
29 | pipelineRef:
30 | name: gitops-update-pipeline
31 | serviceAccountName: pipeline
32 | timeout: 1h0m0s
33 | workspaces:
34 | - name: git-workspace
35 | volumeClaimTemplate:
36 | spec:
37 | accessModes:
38 | - ReadWriteOnce
39 | resources:
40 | requests:
41 | storage: 1Gi
42 | - name: git-basic-auth
43 | secret:
44 | secretName: edge-user-1
45 |
--------------------------------------------------------------------------------
/examples/tekton/gitops-update-pipeline/example-pipelineruns/gitops-update-pipelinerun-tensorflow-housing.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: PipelineRun
3 | metadata:
4 | generateName: gitops-update-pipeline-tensorflow-housing-
5 | labels:
6 | tekton.dev/pipeline: gitops-update-pipeline
7 | spec:
8 | params:
9 | - name: image-registry-repo
10 | value: quay.io/rhoai-edge/tensorflow-housing
11 | - name: image-digest
12 | value: sha256:de11e6ee5519dfec8d9e388dd003cbdbdc4f4a00e292bf5d6d1293efa29729da
13 | - name: gitServer
14 | value: https://github.com
15 | - name: gitApiServer
16 | value: api.github.com
17 | - name: gitOrgName
18 | value: username
19 | - name: gitRepoName
20 | value: ai-edge-gitops
21 | - name: gitRepoBranchBase
22 | value: main
23 | - name: gitTokenSecretName
24 | value: edge-user-1
25 | - name: yq-script
26 | value: |
27 | yq eval -i "(.images[] | select(.name == \"edge-model-template-image\").newName) = \"${IMAGE_NAME}\"" \
28 | acm/odh-edge/apps/tensorflow-housing-app/kustomization.yaml
29 | yq eval -i "(.images[] | select(.name == \"edge-model-template-image\").digest) = \"${IMAGE_DIGEST}\"" \
30 | acm/odh-edge/apps/tensorflow-housing-app/kustomization.yaml
31 | pipelineRef:
32 | name: gitops-update-pipeline
33 | serviceAccountName: pipeline
34 | timeout: 1h0m0s
35 | workspaces:
36 | - name: git-workspace
37 | volumeClaimTemplate:
38 | spec:
39 | accessModes:
40 | - ReadWriteOnce
41 | resources:
42 | requests:
43 | storage: 1Gi
44 | - name: git-basic-auth
45 | secret:
46 | secretName: edge-user-1
47 |
--------------------------------------------------------------------------------
/examples/tekton/gitops-update-pipeline/templates/example-git-credentials-secret.yaml.template:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: edge-user-1
5 | type: Opaque
6 | stringData:
7 | # This "token" field is used for interacting with the GitHub API to
8 | # create a pull request. Change the value to a token from your
9 | # GitHub-compatible forge.
10 | token: "{github_pat_1234567890ABCDAPI_TOKEN}"
11 | # This .git-credentials field is used to specify credentials when
12 | # interacting with a Git server (clone/fetch/push). It will be
13 | # placed as a file on disk so that the Git CLI can use it, so change
14 | # it to appropriate details for your Git server.
15 | .git-credentials: "https://{username}:{github_pat_1234567890ABCDAPI_TOKEN}@github.com"
16 | .gitconfig: |
17 | [credential]
18 | helper = store
19 |
--------------------------------------------------------------------------------
/examples/tekton/gitops-update-pipeline/test/json/my-deployment.json:
--------------------------------------------------------------------------------
1 | {
2 | "apiVersion": "apps/v1",
3 | "kind": "Deployment",
4 | "metadata": {
5 | "name": "tensorflow-housing-app-model-1",
6 | "namespace": "tensorflow-housing-app"
7 | },
8 | "spec": {
9 | "progressDeadlineSeconds": 600,
10 | "replicas": 1,
11 | "revisionHistoryLimit": 10,
12 | "selector": {
13 | "matchLabels": {
14 | "app": "tensorflow-housing-app-1"
15 | }
16 | },
17 | "strategy": {
18 | "rollingUpdate": {
19 | "maxSurge": "25%",
20 | "maxUnavailable": "25%"
21 | },
22 | "type": "RollingUpdate"
23 | },
24 | "template": {
25 | "metadata": {
26 | "creationTimestamp": null,
27 | "labels": {
28 | "app": "tensorflow-housing-app-1"
29 | }
30 | },
31 | "spec": {
32 | "containers": [
33 | {
34 | "image": "quay.io/rhoai-edge/tensorflow-housing@sha256:8486af7728e1214ac5f81c6b93d560078d357f2096d1b40c38a9ac8e1cd68767",
35 | "imagePullPolicy": "IfNotPresent",
36 | "livenessProbe": {
37 | "failureThreshold": 8,
38 | "httpGet": {
39 | "path": "/v2/health/live",
40 | "port": 8080,
41 | "scheme": "HTTP"
42 | },
43 | "periodSeconds": 5,
44 | "successThreshold": 1,
45 | "timeoutSeconds": 1
46 | },
47 | "name": "model",
48 | "ports": [
49 | {
50 | "containerPort": 8080,
51 | "protocol": "TCP"
52 | }
53 | ],
54 | "readinessProbe": {
55 | "failureThreshold": 8,
56 | "httpGet": {
57 | "path": "/v2/models/tensorflow-housing/ready",
58 | "port": 8080,
59 | "scheme": "HTTP"
60 | },
61 | "periodSeconds": 5,
62 | "successThreshold": 1,
63 | "timeoutSeconds": 1
64 | },
65 | "resources": {},
66 | "terminationMessagePath": "/dev/termination-log",
67 | "terminationMessagePolicy": "File"
68 | }
69 | ],
70 | "dnsPolicy": "ClusterFirst",
71 | "restartPolicy": "Always",
72 | "schedulerName": "default-scheduler",
73 | "securityContext": {},
74 | "terminationGracePeriodSeconds": 30
75 | }
76 | }
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/gitea/README.md:
--------------------------------------------------------------------------------
1 | ### Gitea in cluster git server for GitOps workflow
2 | You can deploy a [Gitea](https://about.gitea.com/) server in your cluster instead of GitHub to provide a self contained GitOps workflow environment for your specific use case.
3 |
4 | The [gitea-operator](https://github.com/rhpds/gitea-operator) can be used to manage the Gitea server installation in the cluster. It will simplify the setup so that you can create a minimal [Gitea](https://github.com/rhpds/gitea-operator#migrating-repositories-for-created-users) CR to configure and install the Gitea server.
5 |
6 | 1. Install the [OpenShift Lifecycle Manager](https://docs.openshift.com/container-platform/4.13/operators/understanding/olm/olm-understanding-olm.html) `CatalogSource` and `Subscription` to deploy the `gitea-operator` in the cluster
7 | ```bash
8 | oc apply -k gitea/operator
9 | ```
10 |
11 | 1. Wait for the gitea-operator installation to complete and the `gitea.pfe-rhpds.com` CRD is available on the `odh-core` cluster
12 | ```bash
13 | $ oc get crd gitea.pfe.rhpds.com
14 | NAME CREATED AT
15 | gitea.pfe.rhpds.com 2023-08-25T03:00:13Z
16 | ```
17 |
18 | 1. Create the Gitea CustomResource to deploy the server with an admin user
19 | ```bash
20 | oc apply -k gitea/server
21 | ```
22 |
23 | 1. Once complete, there will be a gitea application deployed in the `gitea` namespace on the cluster.
24 | You can login to the gitea server on the route in the `gitea` namespace using the credentials specifed
25 | in the [gitea](server/gitea.yaml)
26 | ```bash
27 | GITEA_SERVER_URL="http://$(oc get route -n gitea gitea-ai-edge -o jsonpath='{.spec.host}')"
28 | ```
29 |
30 | Open a browser to `${GITEA_SERVER_URL}` OR `git clone` the repo locally to customize the manifests for your use case
31 | ```bash
32 | git clone ${GITEA_SERVER_URL}/edge-user-1/ai-edge-gitops
33 | ```
34 |
--------------------------------------------------------------------------------
/gitea/operator/catalogsource.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: operators.coreos.com/v1alpha1
2 | kind: CatalogSource
3 | metadata:
4 | name: redhat-rhpds-gitea
5 | spec:
6 | sourceType: grpc
7 | image: quay.io/rhpds/gitea-catalog:latest
8 | displayName: Red Hat Demo Platform (Gitea)
9 | publisher: Red Hat Demo Platform
10 |
11 |
--------------------------------------------------------------------------------
/gitea/operator/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: gitea
5 |
6 | resources:
7 | - namespace.yaml
8 | - catalogsource.yaml
9 | - operatorgroup.yaml
10 | - subscription.yaml
11 |
--------------------------------------------------------------------------------
/gitea/operator/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: gitea
6 |
--------------------------------------------------------------------------------
/gitea/operator/operatorgroup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: operators.coreos.com/v1
3 | kind: OperatorGroup
4 | metadata:
5 | name: gitea-operator
6 | spec:
7 | targetNamespaces: []
8 |
--------------------------------------------------------------------------------
/gitea/operator/subscription.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: operators.coreos.com/v1alpha1
3 | kind: Subscription
4 | metadata:
5 | name: gitea-operator
6 | spec:
7 | channel: stable
8 | installPlanApproval: Automatic
9 | name: gitea-operator
10 | source: redhat-rhpds-gitea
11 | sourceNamespace: gitea-operator
12 |
--------------------------------------------------------------------------------
/gitea/server/gitea.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: gitea
6 | ---
7 | apiVersion: pfe.rhpds.com/v1
8 | kind: Gitea
9 | metadata:
10 | name: gitea-ai-edge
11 | namespace: gitea
12 | spec:
13 | # Create the admin user
14 | giteaAdminUser: admin-edge
15 | giteaAdminEmail: admin@ai-edge
16 | giteaAdminPassword: "opendatahub"
17 |
18 | # Create the gitea users accounts to access the cluster
19 | giteaCreateUsers: true
20 | giteaGenerateUserFormat: "edge-user-%d"
21 | # Change this to the number of users you want to pre-populate on the Gitea server
22 | giteaUserNumber: 3
23 | giteaUserPassword: "opendatahub"
24 |
25 | # Populate each gitea user org with a clone of the entries in the giteaRepositoriesList
26 | giteaMigrateRepositories: true
27 | giteaRepositoriesList:
28 | - repo: https://github.com/opendatahub-io/ai-edge.git
29 | name: ai-edge-gitops
30 | private: false
31 |
--------------------------------------------------------------------------------
/gitea/server/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: gitea
5 |
6 | resources:
7 | - gitea.yaml
8 |
--------------------------------------------------------------------------------
/manifests/kustomization.yaml:
--------------------------------------------------------------------------------
1 | kind: Kustomization
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | commonLabels:
4 | app: rhoai-edge-pipelines
5 | app.kubernetes.io/part-of: rhoai-edge-pipelines
6 | resources:
7 | - tasks/
8 | - pipelines/
9 |
--------------------------------------------------------------------------------
/manifests/pipelines/kustomization.yaml:
--------------------------------------------------------------------------------
1 | kind: Kustomization
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | resources:
4 | - s3-fetch-pipeline.yaml
5 | - git-fetch-pipeline.yaml
6 | - gitops-update-pipeline.yaml
7 |
8 |
--------------------------------------------------------------------------------
/manifests/tasks/check-model-and-containerfile-exists/README.md:
--------------------------------------------------------------------------------
1 | # `check-model-and-containerfile-exists`
2 |
3 | This Task can be used to check if the model files fetched exist and the containerfile cloned from git is also present
4 |
5 | ## Parameters
6 | * **model-name**: The name of the model to be checked
7 | * **containerfilePath**: A path from the root of the orignial git repo cloned to the containerfile to be checked
8 |
9 | ## Workspaces
10 | * **workspace**: The workspace that contains the downloaded model
11 |
12 | ## Results
13 | * **model-files-size**: Total size of the model files in MB
14 | * **model-files-list**: Space separated list of files that are within the model folder
15 |
--------------------------------------------------------------------------------
/manifests/tasks/check-model-and-containerfile-exists/check-model-and-containerfile-exists.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: check-model-and-containerfile-exists
5 | spec:
6 | description: This Task can be used to check if the model files fetched exist and the containerfile cloned from git is also present
7 | params:
8 | - name: model-name
9 | type: string
10 | description: The name of the model to be checked
11 | - name: containerfilePath
12 | type: string
13 | description: A path from the root of the orignial git repo cloned to the containerfile to be checked
14 | results:
15 | - name: model-files-size
16 | description: Total size of the model files in MB
17 | - name: model-files-list
18 | description: Space separated list of files that are within the model folder
19 | steps:
20 | - name: check-model-and-containerfile
21 | image: registry.access.redhat.com/ubi9/ubi-micro
22 | script: |
23 | #!/usr/bin/env bash
24 |
25 | set -Eeuo pipefail
26 |
27 | ls -l model_dir-$(params.model-name)/$(params.model-name)/
28 |
29 | du -s model_dir-$(params.model-name)/$(params.model-name)/ | cut -f1 | tee $(results.model-files-size.path) ;
30 | ls -p model_dir-$(params.model-name)/$(params.model-name)/ | tee $(results.model-files-list.path) ;
31 |
32 | # Check containerfile exists
33 | cat $(params.containerfilePath)
34 | workingDir: $(workspaces.workspace.path)
35 | workspaces:
36 | - description: The workspace that contains the downloaded model
37 | name: workspace
38 |
--------------------------------------------------------------------------------
/manifests/tasks/copy-model-from-pvc/README.md:
--------------------------------------------------------------------------------
1 | # `copy-model-from-pvc`
2 |
3 | This Task can be used to copy a model from one pvc to another. The model is copied from the `source-workspace` to the `destination-workspace`. The relative path from the workspace will be the same after the copy
4 |
5 | ## Parameters
6 | * **model-name**: The name of the model folder to be copied
7 |
8 | ## Workspaces
9 | * **source-workspace**: The workspace the model is being copied from
10 | * **destination-workspace**: The workspace the model is being copied to
11 |
12 | ## Results
13 |
--------------------------------------------------------------------------------
/manifests/tasks/copy-model-from-pvc/copy-model-from-pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: copy-model-from-pvc
5 | spec:
6 | description: This Task can be used to copy a model from one pvc to another. The model is copied from the `source-workspace` to the `destination-workspace`. The relative path from the workspace will be the same after the copy
7 | params:
8 | - name: model-name
9 | type: string
10 | description: The name of the model folder to be copied
11 | steps:
12 | - name: copy-model-from-pvc
13 | image: quay.io/opendatahub/kserve-storage-initializer:v0.11
14 | script: |
15 | SOURCE_PATH="$(workspaces.source-workspace.path)/model_dir-$(params.model-name)/$(params.model-name)"
16 |
17 | DEST_PATH="$(workspaces.destination-workspace.path)/model_dir-$(params.model-name)/$(params.model-name)"
18 |
19 | echo "Copying model file $SOURCE_PATH"
20 | echo "to $DEST_PATH"
21 |
22 | DIR_PATH="$(dirname $(workspaces.destination-workspace.path)/model_dir-$(params.model-name)/$(params.model-name))"
23 |
24 | mkdir -p $DIR_PATH
25 |
26 | cp -r $SOURCE_PATH $DEST_PATH
27 | workspaces:
28 | - description: The workspace the model is being copied from
29 | name: source-workspace
30 | - description: The workspace the model is being copied to
31 | name: destination-workspace
32 |
--------------------------------------------------------------------------------
/manifests/tasks/kserve-download-model/README.md:
--------------------------------------------------------------------------------
1 | # `kserve-download-model`
2 |
3 | This task is used to download a model folder from an S3 bucket. Credentials to allow the download of the model are stored in the `s3-secret` workspace.
4 |
5 | ## Parameters
6 | * **model-name**: The name of the folder that contains the model files
7 | * **s3-bucket-name**: The name of the S3 bucket to be downloaded from
8 | * **model-relative-path**: The path from the root of the S3 bucket to the folder in which the model folder is located. Passing in an empty value means the model is stored at the root of the bucket
9 |
10 | ## Workspaces
11 | * **workspace**: The workspace for the downloaded model
12 | * **s3-secret**: The workspace containing the S3 credentials needed to download the model. A config map can be used with the key `s3-storage-config`
13 | ```yaml
14 | stringData:
15 | s3-storage-config: |+
16 | {
17 | "type": "s3",
18 | "access_key_id": "ACCESSKEY",
19 | "secret_access_key": "SECRETKEY",
20 | "endpoint_url": "https://s3.us-west-2.amazonaws.com",
21 | "region": "us-east-1"
22 | }
23 | ```
24 | * **ssl-ca-directory** (optional): A workspace containing CA certificates, this will be used by the model download script to
25 | verify the peer with when fetching over HTTPS.
26 |
27 |
28 | ## Results
29 | * **s3-url**: The S3 URL used to download the model
30 |
--------------------------------------------------------------------------------
/manifests/tasks/kserve-download-model/kserve-download-model.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: kserve-download-model
5 | spec:
6 | description: This task is used to download a model folder from an S3 bucket. Credentials to allow the download of the model are stored in the `s3-secret` workspace.
7 | params:
8 | - name: model-name
9 | type: string
10 | description: The name of the folder that contains the model files
11 | - name: s3-bucket-name
12 | type: string
13 | description: The name of the S3 bucket to be downloaded from
14 | - name: model-relative-path
15 | type: string
16 | description: The path from the root of the S3 bucket to the folder in which the model folder is located. Passing in an empty value means the model is stored at the root of the bucket
17 | results:
18 | - name: s3-url
19 | description: The S3 URL used to download the model
20 | steps:
21 | - name: download-model-s3
22 | image: quay.io/opendatahub/kserve-storage-initializer:v0.11.1.3
23 | script: |
24 | mkdir -p $(workspaces.workspace.path)/model_dir-$(params.model-name)/
25 |
26 | if [ -n "$(params.model-relative-path)" ]; then
27 | export S3_URL="s3://$(params.s3-bucket-name)/$(params.model-relative-path)/$(params.model-name)"
28 | else
29 | export S3_URL="s3://$(params.s3-bucket-name)/$(params.model-name)"
30 | fi
31 |
32 | echo -n $S3_URL | tee $(results.s3-url.path) ;
33 |
34 | if [ -n "$(workspaces.ssl-ca-directory.path)" ]; then
35 | export CA_BUNDLE_CONFIGMAP_NAME=ssl-ca-directory
36 | export AWS_CA_BUNDLE=$(workspaces.ssl-ca-directory.path)/ca-bundle.crt
37 | fi
38 |
39 | STORAGE_CONFIG="$(cat $(workspaces.s3-secret.path)/s3-storage-config)" /storage-initializer/scripts/initializer-entrypoint \
40 | $S3_URL \
41 | $(workspaces.workspace.path)/model_dir-$(params.model-name)/$(params.model-name)
42 | workspaces:
43 | - description: The workspace for the downloaded model
44 | name: workspace
45 | - name: s3-secret
46 | description: The workspace containing the S3 credentials needed to download the model
47 | - name: ssl-ca-directory
48 | description: |
49 | A workspace containing CA certificates, this will be used by the model download script to
50 | verify the peer with when fetching over HTTPS.
51 | optional: true
52 |
--------------------------------------------------------------------------------
/manifests/tasks/kustomization.yaml:
--------------------------------------------------------------------------------
1 | kind: Kustomization
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | resources:
4 | - check-model-and-containerfile-exists/check-model-and-containerfile-exists.yaml
5 | - kserve-download-model/kserve-download-model.yaml
6 | - test-model-rest-svc/test-model-rest-svc.yaml
7 | - retrieve-build-image-info/retrieve-build-image-info.yaml
8 | - copy-model-from-pvc/copy-model-from-pvc.yaml
9 | - yq-update/yq-update.yaml
10 | - move-model-to-root-dir/move-model-to-root-dir.yaml
11 | - sanitise-object-name/sanitise-object-name.yaml
12 | - https://raw.githubusercontent.com/tektoncd/catalog/main/task/github-open-pr/0.2/github-open-pr.yaml
13 |
--------------------------------------------------------------------------------
/manifests/tasks/move-model-to-root-dir/README.md:
--------------------------------------------------------------------------------
1 | # `move-model-to-root-dir`
2 |
3 | Used when using the git fetch method. This task moves the model folder which can be in any location in a cloned repo to the same directory as the git repo orignialy cloned. The cloned repo is then deleted from the workspace. This task can be used to match the behaviour of each fetch method to ensure the location of the model is consistant between tasks
4 |
5 | ## Parameters
6 | * **model-name**: The name of the model folder that contains the model files
7 | * **subdirectory**: The relative path from the workspace to the location of the git repo cloned
8 | * **src-model-relative-path**: The relative path from the root of the git repo to the folder containing the model folder
9 |
10 | ## Workspaces
11 | * **workspace**: The workspace for the downloaded model
12 |
13 | ## Results
14 |
--------------------------------------------------------------------------------
/manifests/tasks/move-model-to-root-dir/move-model-to-root-dir.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: move-model-to-root-dir
5 | spec:
6 | description: Used when using the git fetch method. This task moves the model folder which can be in any location in a cloned repo to the same directory as the git repo orignialy cloned. The cloned repo is then deleted from the workspace. This task can be used to match the behaviour of each fetch method to ensure the location of the model is consistant between tasks
7 | params:
8 | - name: model-name
9 | type: string
10 | description: The name of the model folder that contains the model files
11 | - name: subdirectory
12 | type: string
13 | description: The relative path from the workspace to the location of the git repo cloned
14 | - name: src-model-relative-path
15 | type: string
16 | description: The relative path from the root of the git repo to the folder containing the model folder
17 | steps:
18 | - name: copy-model-to-subdirectory
19 | image: registry.access.redhat.com/ubi9/ubi
20 | script: |
21 | #!/usr/bin/env bash
22 |
23 | set -Eeuo pipefail
24 |
25 | mv $(workspaces.workspace.path)/$(params.subdirectory)/$(params.src-model-relative-path)/$(params.model-name) $(workspaces.workspace.path)/
26 | rm -rf $(workspaces.workspace.path)/$(params.subdirectory)
27 | mkdir $(workspaces.workspace.path)/$(params.subdirectory)
28 | mv $(workspaces.workspace.path)/$(params.model-name) $(workspaces.workspace.path)/$(params.subdirectory)/$(params.model-name)
29 | workspaces:
30 | - description: The workspace for the downloaded model
31 | name: workspace
32 |
--------------------------------------------------------------------------------
/manifests/tasks/retrieve-build-image-info/README.md:
--------------------------------------------------------------------------------
1 | # `retrieve-build-image-info`
2 |
3 | This task returns more detailed info about a model that has just been built and builds a url.txt file with all image tags to be pushed to
4 |
5 | ## Parameters
6 | * **namespace**: The namespace where the model was built
7 | * **model-name**: The name of the model
8 | * **model-version**: The version of the model built
9 | * **buildah-sha**: The built image digest
10 | * **pipeline-run-uid**: The pipeline run id that was run to build the model
11 | * **candidate-image-tag-reference**: The image tag references used when testing the image
12 | * **target-image-tag-references**: The image tag references used for the final built image
13 |
14 | ## Workspaces
15 | * **images_url**: workspace where url.txt file is created
16 |
17 | ## Results
18 | * **model-name**: The name of the model
19 | * **model-version**: The version of the model
20 | * **image-size-bytes**: The size of the image in bytes
21 | * **image-creation-time**: The date and time the image was created at
22 | * **buildah-version**: The version of buildah used to build the image
23 | * **image-digest-reference**: The fully qualified image digest reference of the image
24 | * **target-image-tag-references**: The fully qualified image reference that the image was pushed to (e.g. registry.example.com/my-org/ai-model:1.0-1)
25 |
--------------------------------------------------------------------------------
/manifests/tasks/retrieve-build-image-info/retrieve-build-image-info.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: retrieve-build-image-info
5 | spec:
6 | description: This task returns more detailed info about a model that has just been built and builds a url.txt file with all image tags to be pushed to
7 | workspaces:
8 | - name: images_url
9 | description: workspace where url.txt file is created
10 | params:
11 | - name: namespace
12 | type: string
13 | description: The namespace where the model was built
14 | - name: model-name
15 | type: string
16 | description: The name of the model
17 | - name: model-version
18 | type: string
19 | description: The version of the model built
20 | - name: buildah-sha
21 | type: string
22 | description: The built image digest
23 | - name: pipeline-run-uid
24 | type: string
25 | description: The pipeline run id that was run to build the model
26 | - name: candidate-image-tag-reference
27 | type: string
28 | description: The image tag references used when testing the image
29 | - name: target-image-tag-references
30 | type: array
31 | description: The image tag references used for the final built image
32 | steps:
33 | - name: get-image-sha
34 | image: registry.access.redhat.com/ubi9/skopeo
35 | args:
36 | - "$(params.target-image-tag-references[*])"
37 | script: |
38 | #!/usr/bin/env bash
39 |
40 | set -Eeuo pipefail
41 |
42 | echo -n "$(params.model-name)" | tee $(results.model-name.path) ;
43 | echo ;
44 | echo -n "$(params.model-version)" | tee $(results.model-version.path) ;
45 | echo ;
46 | export DOCKER_IMAGE_REF=$(skopeo inspect --format '{{.Name}}@{{.Digest}}' docker://$(params.candidate-image-tag-reference)) ;
47 | if [[ $DOCKER_IMAGE_REF != *"$(params.buildah-sha)"* ]]; then
48 | echo "Candidate image tag does not contain the correct manifest digest after push"
49 | exit 1 ;
50 | fi
51 | echo -n $DOCKER_IMAGE_REF | tee $(results.image-digest-reference.path) ;
52 | echo ;
53 | echo $(($(skopeo inspect --format '{{range .LayersData}}+{{.Size}}{{end}}' docker://$DOCKER_IMAGE_REF))) | tee $(results.image-size-bytes.path) ;
54 | echo ;
55 | skopeo inspect --format '{{.Created}}' docker://$DOCKER_IMAGE_REF | tee $(results.image-creation-time.path) ;
56 | echo ;
57 | skopeo inspect --format '{{index .Labels "io.buildah.version"}}' docker://$DOCKER_IMAGE_REF | tee $(results.buildah-version.path) ;
58 | echo ;
59 | echo -n "$@" | tee $(results.target-image-tag-references.path) ;
60 | - name: build-urls-txt
61 | image: registry.access.redhat.com/ubi9/ubi-micro
62 | args:
63 | - "$(params.target-image-tag-references[*])"
64 | script: |
65 | #!/usr/bin/env bash
66 |
67 | set -Eeuo pipefail
68 |
69 | # The skopeo-copy task looks for this file in its workspace if the source and destination parameters are
70 | # empty. This is what allows pushing to more than one tag from the single taskrun.
71 | export URLTXT=$(workspaces.images_url.path)/url.txt
72 | export SOURCE_IMAGE_REF=$(cat $(results.image-digest-reference.path))
73 |
74 | rm -f ${URLTXT}
75 | for target in "$@"; do
76 | echo "docker://${SOURCE_IMAGE_REF} docker://${target}" >> "${URLTXT}"
77 | done
78 |
79 | echo "Contents of ${URLTXT}:"
80 | cat ${URLTXT}
81 | results:
82 | - name: model-name
83 | description: The name of the model
84 | - name: model-version
85 | description: The version of the model
86 | - name: image-size-bytes
87 | description: The size of the image in bytes
88 | - name: image-creation-time
89 | description: The date and time the image was created at
90 | - name: buildah-version
91 | description: The version of buildah used to build the image
92 | - name: image-digest-reference
93 | description: The fully qualified image digest reference of the image
94 | - name: target-image-tag-references
95 | description: The fully qualified image reference that the image was pushed to (e.g. registry.example.com/my-org/ai-model:1.0-1)
96 |
--------------------------------------------------------------------------------
/manifests/tasks/sanitise-object-name/README.md:
--------------------------------------------------------------------------------
1 | # `sanitise-object-name`
2 |
3 | This Task can be used to sanitise a string that will be used for naming a k8s object. This task converts the string to a value that can be used for an object name. It converts any uppercase character to lowercase, and converts any non-alphanumeric character that is not `-` or `.` to `-` and then trims these characters from either side of the string
4 |
5 | ## Parameters
6 | * **input-string**: The string to be sanitised
7 |
8 | ## Workspaces
9 |
10 | ## Results
11 | * **output-string**: Sanitised output string
12 |
--------------------------------------------------------------------------------
/manifests/tasks/sanitise-object-name/sanitise-object-name.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: sanitise-object-name
5 | spec:
6 | description: This Task can be used to sanitise a string that will be used for naming a k8s object. This task converts the string to a value that can be used for an object name. It converts any uppercase character to lowercase, and converts any non-alphanumeric character that is not `-` or `.` to `-` and then trims these characters from either side of the string
7 | params:
8 | - name: input-string
9 | type: string
10 | description: The string to be sanitised
11 | results:
12 | - name: output-string
13 | description: Sanitised output string
14 | steps:
15 | - name: sanitise-input-string
16 | image: registry.access.redhat.com/ubi9/ubi-minimal
17 | script: |
18 | #!/usr/bin/env bash
19 |
20 | set -Eeuo pipefail
21 |
22 | # convert any upercase cahracters to lowercase
23 | STRING=`tr 'A-Z' 'a-z' <<< $(params.input-string)`
24 |
25 | # replace all non-alphanumeric characters that are not - or . with -
26 | STRING=`sed 's/[^a-z0-9.-]/-/g' <<< $STRING`
27 |
28 | # trim - and . from either end of the string
29 | STRING=`sed 's/^[.-]*//; s/[.-]*$//' <<< $STRING`
30 |
31 | # remove line ending at the end of the string
32 | STRING=`tr -d "\n" <<< $STRING`
33 |
34 | echo -n $STRING | tee $(results.output-string.path);
35 |
--------------------------------------------------------------------------------
/manifests/tasks/test-model-rest-svc/README.md:
--------------------------------------------------------------------------------
1 | # `test-model-rest-svc`
2 |
3 | This task will send data to a testing container with the model inferencing endpoint and verify that expected output is returned. The `test-data` workspace is required to store the required test data used for the test
4 |
5 | ## Parameters
6 | * **service-name**: The name of the service to be tested against
7 | * **test-endpoint**: The endpoint of the service that will be tested against
8 |
9 | ## Workspaces
10 | * **test-data**: A workspace that contains test data to be used. The files expected are data.json, the jsondata payload for your model, and output.json, the expected json output for that input payload
11 |
12 | ## Results
13 |
--------------------------------------------------------------------------------
/manifests/tasks/test-model-rest-svc/test-model-rest-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: test-model-rest-svc
5 | spec:
6 | description: This task will send data to a testing container with the model inferencing endpoint and verify that expected output is returned. The `test-data` workspace is required to store the required test data used for the test
7 | params:
8 | - name: service-name
9 | type: string
10 | description: The name of the service to be tested against
11 | - name: test-endpoint
12 | type: string
13 | description: The endpoint of the service that will be tested against
14 | steps:
15 | - name: call-rest
16 | image: registry.access.redhat.com/ubi9/ubi
17 | script: |
18 | #!/usr/bin/env bash
19 | set -xe
20 | echo "Test inference REST web service"
21 | echo "Data:"
22 | cat $(workspaces.test-data.path)/data.json
23 | echo "Expected response:"
24 | cat $(workspaces.test-data.path)/output.json
25 |
26 | echo "\nCall service:"
27 |
28 | # call service
29 | curl -v -X POST -H "Content-Type:application/json" --data @$(workspaces.test-data.path)/data.json -o /tmp/output.json http://$(params.service-name):8080/$(params.test-endpoint)
30 |
31 | # Check response:
32 | echo "Check response:"
33 | cat /tmp/output.json
34 | test "$( python3 -m json.tool --no-indent $(workspaces.test-data.path)/output.json )" = "$( python3 -m json.tool --no-indent /tmp/output.json )"
35 |
36 | workspaces:
37 | - description: A workspace that contains test data to be used. The files expected are data.json, the jsondata payload for your model, and output.json, the expected json output for that input payload
38 | name: test-data
39 |
--------------------------------------------------------------------------------
/manifests/tasks/yq-update/README.md:
--------------------------------------------------------------------------------
1 | # `yq-update`
2 |
3 | This task is used by the GitOps Update Pipeline, to allow modifying files with yq
4 |
5 | ## Parameters
6 | * **SCRIPT**: The yq script to execute. Can be multiple lines for complex tasks. `(Default: )`
7 | * **image**: The yq image to use. `(Default: docker.io/mikefarah/yq:4.27.5@sha256:2be3626ed633fbe1fc33ee9343a1256a6be53334412b2251b9a859f8c145bb53)`
8 | * **git-repo-path**: The path of the git repo directory inside the source workspace. SCRIPT will be run inside it. `(Default: )`
9 | * **env-image-name**: The image name to be made available as the environment variable IMAGE_NAME within the task script. `(Default: )`
10 | * **env-image-digest**: The image digest to be made available as the environment variable IMAGE_DIGEST within the task script. `(Default: )`
11 |
12 | ## Workspaces
13 | * **source**: A workspace that contains the files which needs to be altered.
14 |
15 | ## Results
16 | * **yq**: The result from your yq script. You can write to it using `$(results.yq.path)`.
17 |
--------------------------------------------------------------------------------
/manifests/tasks/yq-update/yq-update.yaml:
--------------------------------------------------------------------------------
1 | # Adapted from:
2 | # https://github.com/tektoncd/catalog/blob/main/task/yq/0.4/yq.yaml
3 | #
4 | # If we decide to pass image details to pipeline rather than determining them
5 | # from existing PipelineRuns, then we could use that task directly.
6 | apiVersion: tekton.dev/v1
7 | kind: Task
8 | metadata:
9 | name: yq-update
10 | spec:
11 | description: >-
12 | This task is used by the GitOps Update Pipeline, to allow modifying files with yq
13 | workspaces:
14 | - name: source
15 | description: A workspace that contains the files which needs to be altered.
16 | params:
17 | - name: SCRIPT
18 | type: string
19 | description: The yq script to execute. Can be multiple lines for complex tasks.
20 | default: ""
21 | - name: image
22 | type: string
23 | description: The yq image to use.
24 | default: docker.io/mikefarah/yq:4.27.5@sha256:2be3626ed633fbe1fc33ee9343a1256a6be53334412b2251b9a859f8c145bb53
25 | - name: git-repo-path
26 | type: string
27 | description: The path of the git repo directory inside the source workspace. SCRIPT will be run inside it.
28 | default: ""
29 | - name: env-image-name
30 | type: string
31 | description: The image name to be made available as the environment variable IMAGE_NAME within the task script.
32 | default: ""
33 | - name: env-image-digest
34 | type: string
35 | description: The image digest to be made available as the environment variable IMAGE_DIGEST within the task script.
36 | default: ""
37 | results:
38 | - name: yq
39 | description: The result from your yq script. You can write to it using `$(results.yq.path)`.
40 | steps:
41 | - name: yq-script
42 | image: $(params.image)
43 | workingDir: $(workspaces.source.path)/$(params.git-repo-path)
44 | script: |
45 | /usr/bin/env sh
46 | set -e
47 |
48 | export IMAGE_NAME="$(params.env-image-name)"
49 | export IMAGE_DIGEST="$(params.env-image-digest)"
50 |
51 | $(params.SCRIPT)
52 |
--------------------------------------------------------------------------------
/test/acm/bike-rental-app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../../acm/registration/near-edge/overlays/bike-rental-app/
6 |
7 | namePrefix: custom-prefix-
8 |
9 | patches:
10 | - patch: |-
11 | - op: replace
12 | path: /spec/template/metadata/name
13 | value: '{{name}}-bike-rental-app'
14 | - op: replace
15 | path: /spec/template/spec/source/targetRevision
16 | value: my-git-branch
17 | - op: replace
18 | path: /spec/template/spec/source/path
19 | value: test/gitops/bike-rental-app/
20 | - op: replace
21 | path: /spec/template/spec/destination/namespace
22 | value: custom-app-namespace
23 | target:
24 | kind: ApplicationSet
25 |
26 | replacements:
27 | - source:
28 | kind: Placement
29 | group: cluster.open-cluster-management.io
30 | fieldPath: metadata.name
31 | targets:
32 | - select:
33 | group: argoproj.io
34 | kind: ApplicationSet
35 | fieldPaths:
36 | - spec.generators.0.clusterDecisionResource.labelSelector.matchLabels.cluster\.open-cluster-management\.io/placement
37 | - source:
38 | kind: Placement
39 | group: cluster.open-cluster-management.io
40 | fieldPath: metadata.name
41 | targets:
42 | - select:
43 | group: policy.open-cluster-management.io
44 | kind: PlacementBinding
45 | fieldPaths:
46 | - placementRef.name
47 |
--------------------------------------------------------------------------------
/test/acm/tensorflow-housing-app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../../acm/registration/near-edge/overlays/tensorflow-housing-app/
6 |
7 | namePrefix: custom-prefix-
8 |
9 | patches:
10 | - patch: |-
11 | - op: replace
12 | path: /spec/template/metadata/name
13 | value: '{{name}}-tensorflow-housing-app'
14 | - op: replace
15 | path: /spec/template/spec/source/targetRevision
16 | value: my-git-branch
17 | - op: replace
18 | path: /spec/template/spec/source/path
19 | value: test/gitops/tensorflow-housing-app/
20 | - op: replace
21 | path: /spec/template/spec/destination/namespace
22 | value: custom-app-namespace
23 | target:
24 | kind: ApplicationSet
25 |
26 | replacements:
27 | - source:
28 | kind: Placement
29 | group: cluster.open-cluster-management.io
30 | fieldPath: metadata.name
31 | targets:
32 | - select:
33 | group: argoproj.io
34 | kind: ApplicationSet
35 | fieldPaths:
36 | - spec.generators.0.clusterDecisionResource.labelSelector.matchLabels.cluster\.open-cluster-management\.io/placement
37 | - source:
38 | kind: Placement
39 | group: cluster.open-cluster-management.io
40 | fieldPath: metadata.name
41 | targets:
42 | - select:
43 | group: policy.open-cluster-management.io
44 | kind: PlacementBinding
45 | fieldPaths:
46 | - placementRef.name
47 |
--------------------------------------------------------------------------------
/test/e2e-tests/README.md:
--------------------------------------------------------------------------------
1 | # AI Edge testing
2 |
3 | Place for testing and to verify the MLOps pipelines are working correctly. These tests use the `k8s.io/client-go` package to interact with the cluster. Using `oc login` to log into the cluster as normal should mean little setup is needed to run the tests.
4 |
5 | A local install of the Go compiler is needed to run the tests. Go version `1.21` is required to run the tests.
6 |
7 | ## Setup
8 | - Log into the target cluster using `oc login`. This will update your default `kubeconfig` for the tests to use
9 |
10 | ## Run tests locally
11 |
12 | The e2e-tests use a `config.json` to read values passed to it. in the `e2e-tests` directory copy the `template.config.json` to `config.json`. You can now fill in the fields in the `config.json`.
13 |
14 | ```bash
15 | cp template.config.json config.json
16 | ```
17 |
18 | The structure of the `config.json` is in four sections, the top level fields, `git_fetch`, `s3_fetch` and `gitops`. All fields at the top level are required.
19 |
20 | - `namespace` - Cluster namespace that tests are run in
21 | - `image_registry_username` - quay.io username
22 | - `image_registry_password` - quay.io password
23 | - `target_image_tags` - JSON array of image tags that the final image will be pushed to. E.g. '["quay.io/user/model-name:e2e-test"]'
24 | - `git_container_file_repo` - Git repo containing the container file
25 | - `git_container_file_revision` - Git branch in the container file repo
26 | - `container_relative_path` - Relative path from the root of the container file repo to where the container file is
27 | - `git_username` - (optional) Used for when the git repos containing the containerfile and model are private. This is the username associated with the private repo, when set the `git_token` field must also be set
28 | - `git_token` - (optional) Used for when the git repos containing the containerfile and model are private. This is the token associated with the user who is the owner of the private repo, when set the `git_username` field must also be set, [see info here](../../pipelines/README.md#git-repository-and-credentials)
29 |
30 | After the top level fields each sub object is used for a type of test. Setting `enabled` to `true` in each of these will tell the test suite to use those values in that object and to run those tests.
31 |
32 | These are all the fields in `git_fetch`
33 |
34 | - `model_repo` - Git repo of the model
35 | - `model_relative_path` - Relative path from the root of the model repo to where the model is
36 | - `model_revision` - Branch of the model repo
37 | - `model_dir` - Sub-directory of the model in the model folder
38 | - `self_signed_cert` - (optional) path to a self signed cert to connect to access the repo
39 |
40 | These are all the fields in `s3_fetch`
41 |
42 | - `aws_secret` - AWS secret key
43 | - `aws_access` - AWS access key
44 | - `region` - AWS region of the bucket used
45 | - `endpoint` - Endpoint of the bucket used
46 | - `bucket_name` - Name of the bucket
47 | - `self_signed_cert` - (optional) path to a self signed cert to connect to access the bucket
48 |
49 | These are all the fields in `gitops`
50 |
51 | - `token` - Auth token used by the git ops pipeline to make a pull request, [see info here](../../manifests/README.md#git-repository-and-credentials)
52 | - `username` - Username linked to the `GIT_TOKEN`
53 | - `repo` - Git repo URL used to make a pull request in the git ops pipeline (https://github.com/org/repo)
54 | - `api_server` - Git API server (api.github.com)
55 | - `branch` - Base branch used for pull request in git ops pipeline
56 |
57 | Now run the `e2e-tests` with:
58 |
59 | ```bash
60 | make go-test
61 | ```
62 | Set the go binary used for testing that is in your `PATH`
63 | ```bash
64 | make GO=go1.19 go-test
65 | ```
66 |
67 | If you want to re-run the tests in the same namespace you can just re-run the `go-test` target. The tests fail if there are **any** failed pipeline runs. Therefore if you have a failed run and want to re-test make sure to delete any that have failed.
68 |
69 | ## CI/CD with Github Actions
70 | Not yet implemented
71 |
--------------------------------------------------------------------------------
/test/e2e-tests/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/opendatahub-io/ai-edge/test/e2e-tests
2 |
3 | go 1.21
4 |
5 | require (
6 | github.com/tektoncd/pipeline v0.56.0
7 | golang.org/x/net v0.20.0
8 | k8s.io/api v0.29.1
9 | k8s.io/apimachinery v0.29.1
10 | k8s.io/client-go v0.29.1
11 | knative.dev/pkg v0.0.0-20240108071142-697d66936c81
12 | sigs.k8s.io/kustomize/api v0.16.0
13 | sigs.k8s.io/kustomize/kyaml v0.16.0
14 | sigs.k8s.io/yaml v1.4.0
15 | )
16 |
17 | require (
18 | contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect
19 | contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect
20 | github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
21 | github.com/beorn7/perks v1.0.1 // indirect
22 | github.com/blendle/zapdriver v1.3.1 // indirect
23 | github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
24 | github.com/cespare/xxhash/v2 v2.2.0 // indirect
25 | github.com/davecgh/go-spew v1.1.1 // indirect
26 | github.com/emicklei/go-restful/v3 v3.11.2 // indirect
27 | github.com/evanphx/json-patch/v5 v5.8.1 // indirect
28 | github.com/go-errors/errors v1.4.2 // indirect
29 | github.com/go-kit/log v0.2.1 // indirect
30 | github.com/go-logfmt/logfmt v0.5.1 // indirect
31 | github.com/go-logr/logr v1.4.1 // indirect
32 | github.com/go-openapi/jsonpointer v0.20.2 // indirect
33 | github.com/go-openapi/jsonreference v0.20.4 // indirect
34 | github.com/go-openapi/swag v0.22.8 // indirect
35 | github.com/gogo/protobuf v1.3.2 // indirect
36 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
37 | github.com/golang/protobuf v1.5.3 // indirect
38 | github.com/google/cel-go v0.18.1 // indirect
39 | github.com/google/gnostic-models v0.6.8 // indirect
40 | github.com/google/go-cmp v0.6.0 // indirect
41 | github.com/google/go-containerregistry v0.17.0 // indirect
42 | github.com/google/gofuzz v1.2.0 // indirect
43 | github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
44 | github.com/google/uuid v1.5.0 // indirect
45 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
46 | github.com/hashicorp/errwrap v1.1.0 // indirect
47 | github.com/hashicorp/go-multierror v1.1.1 // indirect
48 | github.com/imdario/mergo v0.3.16 // indirect
49 | github.com/josharian/intern v1.0.0 // indirect
50 | github.com/json-iterator/go v1.1.12 // indirect
51 | github.com/mailru/easyjson v0.7.7 // indirect
52 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
53 | github.com/modern-go/reflect2 v1.0.2 // indirect
54 | github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
55 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
56 | github.com/onsi/ginkgo/v2 v2.14.0 // indirect
57 | github.com/onsi/gomega v1.30.0 // indirect
58 | github.com/opencontainers/go-digest v1.0.0 // indirect
59 | github.com/openzipkin/zipkin-go v0.4.2 // indirect
60 | github.com/pkg/errors v0.9.1 // indirect
61 | github.com/prometheus/client_golang v1.18.0 // indirect
62 | github.com/prometheus/client_model v0.5.0 // indirect
63 | github.com/prometheus/common v0.46.0 // indirect
64 | github.com/prometheus/procfs v0.12.0 // indirect
65 | github.com/prometheus/statsd_exporter v0.22.7 // indirect
66 | github.com/spf13/pflag v1.0.5 // indirect
67 | github.com/stoewer/go-strcase v1.2.0 // indirect
68 | github.com/xlab/treeprint v1.2.0 // indirect
69 | go.opencensus.io v0.24.0 // indirect
70 | go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
71 | go.uber.org/multierr v1.11.0 // indirect
72 | go.uber.org/zap v1.26.0 // indirect
73 | golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
74 | golang.org/x/oauth2 v0.16.0 // indirect
75 | golang.org/x/sync v0.6.0 // indirect
76 | golang.org/x/sys v0.16.0 // indirect
77 | golang.org/x/term v0.16.0 // indirect
78 | golang.org/x/text v0.14.0 // indirect
79 | golang.org/x/time v0.5.0 // indirect
80 | gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
81 | google.golang.org/api v0.156.0 // indirect
82 | google.golang.org/appengine v1.6.8 // indirect
83 | google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 // indirect
84 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect
85 | google.golang.org/grpc v1.60.1 // indirect
86 | google.golang.org/protobuf v1.32.0 // indirect
87 | gopkg.in/evanphx/json-patch.v5 v5.6.0 // indirect
88 | gopkg.in/inf.v0 v0.9.1 // indirect
89 | gopkg.in/yaml.v2 v2.4.0 // indirect
90 | gopkg.in/yaml.v3 v3.0.1 // indirect
91 | k8s.io/klog/v2 v2.120.1 // indirect
92 | k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15 // indirect
93 | k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect
94 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
95 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
96 | )
97 |
--------------------------------------------------------------------------------
/test/e2e-tests/support/clients.go:
--------------------------------------------------------------------------------
1 | package support
2 |
3 | import (
4 | "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
5 | pipelinev1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1"
6 | "k8s.io/client-go/kubernetes"
7 | _ "k8s.io/client-go/plugin/pkg/client/auth"
8 | "k8s.io/client-go/rest"
9 | knativetest "knative.dev/pkg/test"
10 | )
11 |
12 | type Clients struct {
13 | Kubernetes *kubernetes.Clientset
14 | Task pipelinev1.TaskInterface
15 | Pipeline pipelinev1.PipelineInterface
16 | PipelineRun pipelinev1.PipelineRunInterface
17 | }
18 |
19 | func CreateClients(namespace string) (Clients, error) {
20 | clients := Clients{}
21 |
22 | config, err := knativetest.BuildClientConfig("", "")
23 | if err != nil {
24 | return clients, err
25 | }
26 |
27 | clientSet, err := ClusterClientSet(config)
28 | if err != nil {
29 | return clients, err
30 | }
31 |
32 | tektonClientSet, err := versioned.NewForConfig(config)
33 |
34 | clients.Kubernetes = clientSet
35 | clients.Pipeline = tektonClientSet.TektonV1().Pipelines(namespace)
36 | clients.PipelineRun = tektonClientSet.TektonV1().PipelineRuns(namespace)
37 | clients.Task = tektonClientSet.TektonV1().Tasks(namespace)
38 |
39 | return clients, nil
40 | }
41 |
42 | func ClusterClientSet(config *rest.Config) (*kubernetes.Clientset, error) {
43 | clientset, err := kubernetes.NewForConfig(config)
44 | if err != nil {
45 | return nil, err
46 | }
47 |
48 | return clientset, nil
49 | }
50 |
--------------------------------------------------------------------------------
/test/e2e-tests/support/config.go:
--------------------------------------------------------------------------------
1 | package support
2 |
3 | import (
4 | "encoding/json"
5 | "os"
6 | )
7 |
8 | const (
9 | ConfigPath = "../config.json"
10 | )
11 |
12 | var (
13 | config *Config = nil
14 | )
15 |
16 | type Config struct {
17 | Namespace string `json:"namespace"`
18 | ImageRegistryUsername string `json:"image_registry_username"`
19 | ImageRegistryPassword string `json:"image_registry_password"`
20 | TargetImageTags []string `json:"target_image_tags"`
21 | GitContainerFileRepo string `json:"git_container_file_repo"`
22 | GitContainerFileRevision string `json:"git_container_file_revision"`
23 | ContainerRelativePath string `json:"container_relative_path"`
24 | GitUsername string `json:"git_username"`
25 | GitToken string `json:"git_token"`
26 |
27 | GitFetchConfig GitFetchConfig `json:"git_fetch"`
28 | S3FetchConfig S3FetchConfig `json:"s3_fetch"`
29 | GitOpsConfig GitOpsConfig `json:"gitops"`
30 |
31 | Clients *Clients
32 | }
33 |
34 | type GitFetchConfig struct {
35 | Enabled bool `json:"enabled"`
36 | ModelRepo string `json:"model_repo"`
37 | ModelRelativePath string `json:"model_relative_path"`
38 | ModelRevision string `json:"model_revision"`
39 | ModelDir string `json:"model_dir"`
40 | SelfSignedCert string `json:"self_signed_cert"`
41 | }
42 |
43 | type S3FetchConfig struct {
44 | Enabled bool `json:"enabled"`
45 | AWSSecret string `json:"aws_secret"`
46 | AWSAccess string `json:"aws_access"`
47 | Region string `json:"region"`
48 | Endpoint string `json:"endpoint"`
49 | BucketName string `json:"bucket_name"`
50 | SelfSignedCert string `json:"self_signed_cert"`
51 | }
52 |
53 | type GitOpsConfig struct {
54 | Enabled bool `json:"enabled"`
55 | Token string `json:"token"`
56 | Username string `json:"username"`
57 | Repo string `json:"repo"`
58 | ApiServer string `json:"api_server"`
59 | Branch string `json:"branch"`
60 | }
61 |
62 | func GetConfig() (*Config, error) {
63 | if config != nil {
64 | return config, nil
65 | }
66 |
67 | config = &Config{}
68 |
69 | bytes, err := os.ReadFile(ConfigPath)
70 | if err != nil {
71 | return config, err
72 | }
73 |
74 | err = json.Unmarshal(bytes, &config)
75 | if err != nil {
76 | return config, err
77 | }
78 |
79 | clients, err := CreateClients(config.Namespace)
80 | if err != nil {
81 | return nil, err
82 | }
83 |
84 | config.Clients = &clients
85 |
86 | return config, err
87 | }
88 |
--------------------------------------------------------------------------------
/test/e2e-tests/support/git.go:
--------------------------------------------------------------------------------
1 | package support
2 |
3 | import (
4 | "fmt"
5 | "net/url"
6 | "strings"
7 | )
8 |
9 | type GitRepoURL struct {
10 | Server string
11 | OrgName string
12 | RepoName string
13 | }
14 |
15 | func ParseGitURL(rawURL string) (GitRepoURL, error) {
16 | gitRepoURL := GitRepoURL{}
17 |
18 | URL, err := url.Parse(rawURL)
19 | if err != nil {
20 | return gitRepoURL, err
21 | }
22 |
23 | // using trim here aswell because leading and trailing / causes empty strings when slicing
24 | subPaths := strings.Split(strings.Trim(URL.Path, "/"), "/")
25 | if len(subPaths) != 2 {
26 | return gitRepoURL, fmt.Errorf("cannot parse git repo URL, expected [scheme]://[host]/[org]/[repo] got %v", rawURL)
27 | }
28 |
29 | gitRepoURL.OrgName = subPaths[0]
30 | gitRepoURL.RepoName = subPaths[1]
31 | gitRepoURL.Server = fmt.Sprintf("%v://%v", URL.Scheme, URL.Host)
32 |
33 | return gitRepoURL, nil
34 | }
35 |
--------------------------------------------------------------------------------
/test/e2e-tests/support/kustomize.go:
--------------------------------------------------------------------------------
1 | package support
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | pipepinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
8 | corev1 "k8s.io/api/core/v1"
9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | "k8s.io/apimachinery/pkg/types"
11 | "sigs.k8s.io/kustomize/api/krusty"
12 | "sigs.k8s.io/kustomize/api/resmap"
13 | "sigs.k8s.io/kustomize/api/resource"
14 | "sigs.k8s.io/kustomize/kyaml/filesys"
15 | )
16 |
17 | func ResourceToType[T any](resource *resource.Resource, t *T) error {
18 | bytes, err := resource.MarshalJSON()
19 | if err != nil {
20 | return err
21 | }
22 |
23 | err = json.Unmarshal(bytes, &t)
24 | if err != nil {
25 | return err
26 | }
27 |
28 | return nil
29 | }
30 |
31 | func KustomizeBuild(path string) (resmap.ResMap, error) {
32 | options := krusty.MakeDefaultOptions()
33 | k := krusty.MakeKustomizer(options)
34 | fs := filesys.FileSystemOrOnDisk{
35 | FileSystem: nil,
36 | }
37 |
38 | resourceMap, err := k.Run(fs, path)
39 | if err != nil {
40 | return nil, err
41 | }
42 |
43 | return resourceMap, nil
44 | }
45 |
46 | func CreateObjectsFromResourceMap(ctx context.Context, clients *Clients, resourceMap resmap.ResMap, namespace string) error {
47 | for _, rsc := range resourceMap.Resources() {
48 | kind := rsc.GetKind()
49 | switch kind {
50 | case "ConfigMap":
51 | {
52 | var configMap corev1.ConfigMap
53 | err := ResourceToType(rsc, &configMap)
54 | if err != nil {
55 | return err
56 | }
57 |
58 | yaml, err := rsc.AsYAML()
59 | if err != nil {
60 | return err
61 | }
62 |
63 | _, err = clients.Kubernetes.CoreV1().ConfigMaps(namespace).Patch(ctx, configMap.Name, types.ApplyPatchType, yaml, metav1.PatchOptions{
64 | FieldManager: "Apply",
65 | })
66 | if err != nil {
67 | return err
68 | }
69 | }
70 | case "Task":
71 | {
72 | var task pipepinev1.Task
73 | err := ResourceToType(rsc, &task)
74 | if err != nil {
75 | return err
76 | }
77 |
78 | _, err = clients.Task.Get(ctx, task.Name, metav1.GetOptions{})
79 | if err == nil {
80 | return nil
81 | }
82 |
83 | _, err = clients.Task.Create(ctx, &task, metav1.CreateOptions{})
84 | if err != nil {
85 | return err
86 | }
87 | }
88 | case "Pipeline":
89 | {
90 | var pipeline pipepinev1.Pipeline
91 | err := ResourceToType(rsc, &pipeline)
92 | if err != nil {
93 | return err
94 | }
95 |
96 | _, err = clients.Pipeline.Get(ctx, pipeline.Name, metav1.GetOptions{})
97 | if err == nil {
98 | return nil
99 | }
100 |
101 | _, err = clients.Pipeline.Create(ctx, &pipeline, metav1.CreateOptions{})
102 | if err != nil {
103 | return err
104 | }
105 | }
106 | default:
107 | {
108 | return fmt.Errorf("object kind '%v' cannot be created - not supported", kind)
109 | }
110 | }
111 | }
112 |
113 | return nil
114 | }
115 |
--------------------------------------------------------------------------------
/test/e2e-tests/support/tekton.go:
--------------------------------------------------------------------------------
1 | package support
2 |
3 | import (
4 | "fmt"
5 | pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
6 | v1 "k8s.io/api/core/v1"
7 | "os"
8 | "sigs.k8s.io/yaml"
9 | )
10 |
11 | func MountConfigMapAsWorkspaceToPipelineRun(configMapName string, workspaceName string, pipelineRun *pipelinev1.PipelineRun) {
12 | pipelineRun.Spec.Workspaces = append(pipelineRun.Spec.Workspaces, pipelinev1.WorkspaceBinding{
13 | Name: workspaceName,
14 | ConfigMap: &v1.ConfigMapVolumeSource{
15 | LocalObjectReference: v1.LocalObjectReference{Name: configMapName},
16 | },
17 | })
18 | }
19 |
20 | func MountSecretAsWorkspaceToPipelineRun(secretName string, workspaceName string, pipelineRun *pipelinev1.PipelineRun) {
21 | pipelineRun.Spec.Workspaces = append(pipelineRun.Spec.Workspaces, pipelinev1.WorkspaceBinding{
22 | Name: workspaceName,
23 | Secret: &v1.SecretVolumeSource{
24 | SecretName: secretName,
25 | },
26 | })
27 | }
28 |
29 | func SetPipelineRunParam(name string, value pipelinev1.ParamValue, pipelineRun *pipelinev1.PipelineRun) {
30 | for index := range pipelineRun.Spec.Params {
31 | param := &pipelineRun.Spec.Params[index]
32 | if param.Name == name {
33 | param.Value = value
34 | }
35 | }
36 | }
37 |
38 | func NewStringParamValue(value string) pipelinev1.ParamValue {
39 | return pipelinev1.ParamValue{
40 | Type: pipelinev1.ParamTypeString,
41 | StringVal: value,
42 | }
43 | }
44 |
45 | func NewArrayParamValue(value []string) pipelinev1.ParamValue {
46 | return pipelinev1.ParamValue{
47 | Type: pipelinev1.ParamTypeArray,
48 | ArrayVal: value,
49 | }
50 | }
51 |
52 | func NewObjectParamValue(value map[string]string) pipelinev1.ParamValue {
53 | return pipelinev1.ParamValue{
54 | Type: pipelinev1.ParamTypeObject,
55 | ObjectVal: value,
56 | }
57 | }
58 |
59 | func ReadFileAsPipelineRun(path string) (pipelinev1.PipelineRun, error) {
60 | var pipelineRun pipelinev1.PipelineRun
61 |
62 | bytes, err := os.ReadFile(path)
63 | if err != nil {
64 | return pipelineRun, err
65 | }
66 |
67 | err = yaml.Unmarshal(bytes, &pipelineRun)
68 | if err != nil {
69 | return pipelineRun, err
70 | }
71 |
72 | return pipelineRun, nil
73 | }
74 |
75 | func GetResultValueFromPipelineRun(resultName string, pipelineRun *pipelinev1.PipelineRun) (pipelinev1.ResultValue, error) {
76 | for _, result := range pipelineRun.Status.Results {
77 | if result.Name == resultName {
78 | return result.Value, nil
79 | }
80 | }
81 |
82 | return pipelinev1.ResultValue{}, fmt.Errorf("no result with name %v in pipeline run %v", resultName, pipelineRun.Name)
83 | }
84 |
--------------------------------------------------------------------------------
/test/e2e-tests/support/utils.go:
--------------------------------------------------------------------------------
1 | package support
2 |
3 | import (
4 | "fmt"
5 | "time"
6 | )
7 |
8 | func WaitFor(duration time.Duration, tickRate time.Duration, f func() (bool, error)) error {
9 | timeout := time.After(duration)
10 | tick := time.Tick(tickRate)
11 |
12 | for {
13 | select {
14 | case <-timeout:
15 | return fmt.Errorf("waiting for operation to complete timedout after %v", duration.String())
16 | case <-tick:
17 | complete, err := f()
18 | if err != nil {
19 | return err
20 | }
21 |
22 | if complete {
23 | return nil
24 | }
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/test/e2e-tests/template.config.json:
--------------------------------------------------------------------------------
1 | {
2 | "namespace": "my-test-namespace",
3 | "image_registry_username": "username+myrobot",
4 | "image_registry_password": "",
5 | "target_image_tags": ["quay.io/username/repo:latest"],
6 | "git_container_file_repo": "https://github.com/opendatahub-io/ai-edge.git",
7 | "git_container_file_revision": "main",
8 | "container_relative_path": "path/to/containerfile/containerfile.openvino.mlserver.mlflow",
9 | "git_username": "",
10 | "git_token": "",
11 |
12 | "git_fetch": {
13 | "enabled": true,
14 | "model_repo": "https://github.com/opendatahub-io/ai-edge.git",
15 | "model_relative_path": "path/to/model",
16 | "model_revision": "main",
17 | "model_dir": "model_dir_name",
18 | "self_signed_cert": ""
19 | },
20 |
21 | "s3_fetch": {
22 | "enabled": true,
23 | "aws_secret": "",
24 | "aws_access": "",
25 | "region": "us-east-2",
26 | "endpoint": "https://s3.us-west-2.amazonaws.com",
27 | "bucket_name": "mybucket",
28 | "self_signed_cert": ""
29 | },
30 |
31 | "gitops": {
32 | "enabled": false,
33 | "token": "github_pat_AJSHD12312312jbjj1h231",
34 | "username": "githubusername",
35 | "repo": "https://github.com/jackdelahunt/ai-edge",
36 | "api_server": "api.github.com",
37 | "branch": "main"
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/test/gitops/bike-rental-app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../../acm/odh-edge/apps/bike-rental-app/
6 |
7 | namespace: custom-app-namespace
8 |
--------------------------------------------------------------------------------
/test/gitops/tensorflow-housing-app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../../acm/odh-edge/apps/tensorflow-housing-app/
6 |
7 | namespace: my-test-namespace
8 |
--------------------------------------------------------------------------------
/test/shell-pipeline-tests/README.md:
--------------------------------------------------------------------------------
1 | # Shell Pipeline Tests
2 |
3 | This directory contains 2 shell pipeline tests:
4 | * seldon-bike-rentals - Seldon.io image using the bike rentals model
5 | * openvino-tensorflow-housing - OpenVino image using the Tensorflow housing model
6 |
7 | First test runs the [S3 Fetch](../../manifests/pipelines/s3-fetch-pipeline.yaml) pipeline, while the second one
8 | runs both [Git Fetch](../../manifests/pipelines/git-fetch-pipeline.yaml) and [GitOps pipeline](../../manifests/pipelines/gitops-update-pipeline.yaml)
9 |
10 | Scripts are primarily run in the OpenShift CI environment, so they make use of
11 | OpenShift CI secrets. You need to configure these if you want to run it locally, see the next section.
12 |
13 | ## Local execution
14 |
15 | For local execution, these environment variables need to be set:
16 |
17 | * **ARTIFACT_DIR** - Directory where logs and yaml files from the namespace should be stored for easier debugging.
18 | * **CUSTOM_AWS_SECRET_PATH** - Directory where credentials for the AWS S3 bucket are stored. S3 bucket is used as a source of the AI model. The directory should have 2 files:
19 | * accessKey - containing the access key, sometimes also called access key ID
20 | * secretAccessKey - containing the secret access key
21 | * **CUSTOM_IMAGE_REGISTRY_SECRET_PATH** - Directory where credentials for the image repository (e.g. Quay) are stored. This repository is used to publish the image after it is tested. The pipeline uses [basic-auth](https://tekton.dev/docs/pipelines/auth/#configuring-basic-auth-authentication-for-docker) for authentication. The directory should contain the files:
22 | * username - containing the username of the account used to access the image registry
23 | * password - containing the password used to access the image registry
24 | * **CUSTOM_GIT_CREDENTIALS_SECRET_PATH** - Directory where the GitHub-compatible forge token is stored. The directory should have 1 file:
25 | * token - containing the GitHub-compatible forge token, e.g. for GitHub specifically it will have the form of `github_pat_123...`
26 |
27 | After the credentials are configured, you can run the pipeline tests using:
28 |
29 | ```shell
30 | ARTIFACT_DIR=./artifacts CUSTOM_AWS_SECRET_PATH=./secrets CUSTOM_IMAGE_REGISTRY_SECRET_PATH=./secrets CUSTOM_GIT_CREDENTIALS_SECRET_PATH=./secrets ./seldon-bike-rentals/pipelines-test-seldon-bike-rentals.sh
31 | ```
32 | and
33 | ```shell
34 | ARTIFACT_DIR=./artifacts CUSTOM_AWS_SECRET_PATH=./secrets CUSTOM_IMAGE_REGISTRY_SECRET_PATH=./secrets CUSTOM_GIT_CREDENTIALS_SECRET_PATH=./secrets ./openvino-tensorflow-housing/pipelines-test-openvino-tensorflow-housing.sh
35 | ```
36 |
37 | This would put all the logs into the `$PWD/artifacts` directory and it also expects all the credential files to be stored under the `$PWD/secrets` directory.
38 |
39 | > [!NOTE]
40 | > If you have made changes to Containerfiles or models used in the tests, change the Pipeline Run parameters accordingly, i.e. to fetch these files from your branch.
41 | > This is done automatically if running in the OpenShift CI environment.
42 |
--------------------------------------------------------------------------------
/test/shell-pipeline-tests/seldon-bike-rentals/pipelines-test-seldon-bike-rentals.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | REPO_ROOT_DIR=$(dirname ${BASH_SOURCE[0]})/../../..
3 | EXAMPLES_DIR="$REPO_ROOT_DIR/examples"
4 | MANIFESTS_DIR="$REPO_ROOT_DIR/manifests"
5 |
6 | source "$REPO_ROOT_DIR"/test/shell-pipeline-tests/common.sh
7 |
8 | NAMESPACE="pipeline-test-openvino-bike-rentals"
9 | oc delete project "$NAMESPACE" --ignore-not-found --timeout=60s
10 | oc new-project "$NAMESPACE"
11 |
12 | echo "Waiting for OpenShift Pipelines operator to be fully installed"
13 | waitForOpResult 60 "True" "N/A" "oc get tektonconfig -n openshift-operators config -o jsonpath={.status.conditions[?\(@.type==\'Ready\'\)].status}"
14 | waitForOpResult 10 "pipeline" "N/A" "oc get serviceaccount -o=custom-columns=NAME:.metadata.name | grep pipeline"
15 |
16 | ##### AIEDGE E2E PIPELINE
17 | AIEDGE_E2E_PIPELINE_DIR_PATH="$EXAMPLES_DIR"/tekton/aiedge-e2e
18 |
19 | AWS_SECRET_PATH_TEMPLATE="$AIEDGE_E2E_PIPELINE_DIR_PATH"/templates/credentials-s3.secret.yaml.template
20 | AWS_SECRET_PATH="$AIEDGE_E2E_PIPELINE_DIR_PATH"/templates/credentials-s3.secret.yaml
21 |
22 | createS3Secret "$AWS_SECRET_PATH_TEMPLATE" "$AWS_SECRET_PATH"
23 |
24 | oc create -f "$AWS_SECRET_PATH"
25 |
26 | IMAGE_REGISTRY_SECRET_PATH_TEMPLATE="$AIEDGE_E2E_PIPELINE_DIR_PATH"/templates/credentials-image-registry.secret.yaml.template
27 | IMAGE_REGISTRY_SECRET_PATH="$AIEDGE_E2E_PIPELINE_DIR_PATH"/templates/credentials-image-registry.secret.yaml
28 |
29 | createImageRegistrySecret "$IMAGE_REGISTRY_SECRET_PATH_TEMPLATE" "$IMAGE_REGISTRY_SECRET_PATH"
30 |
31 | oc create -f "$IMAGE_REGISTRY_SECRET_PATH"
32 | oc secret link pipeline credentials-image-registry
33 |
34 | ## apply test data directory
35 | oc apply -k "$AIEDGE_E2E_PIPELINE_DIR_PATH"/test-data
36 |
37 | ## oc apply -k manifests
38 | oc apply -k "$MANIFESTS_DIR"/
39 |
40 | ## prepare parameters
41 | S3_FETCH_PIPELINE_OVERRIDDEN_PATH="$AIEDGE_E2E_PIPELINE_DIR_PATH"/example-pipelineruns/s3-fetch.bike-rentals.pipelinerun-overridden.yaml
42 | cp "$AIEDGE_E2E_PIPELINE_DIR_PATH"/example-pipelineruns/s3-fetch.bike-rentals.pipelinerun.yaml "$S3_FETCH_PIPELINE_OVERRIDDEN_PATH"
43 | sed -i "s|value: rhoai-edge-models|value: rhoai-edge-models-ci|" "$S3_FETCH_PIPELINE_OVERRIDDEN_PATH"
44 | sed -i "s|value: \"delete\"|value: \"keep\"|" "$S3_FETCH_PIPELINE_OVERRIDDEN_PATH"
45 | usePRBranchInPipelineRunIfPRCheck "$S3_FETCH_PIPELINE_OVERRIDDEN_PATH"
46 |
47 | ## oc create pipeline run
48 | oc create -f "$S3_FETCH_PIPELINE_OVERRIDDEN_PATH"
49 | sleep 5 # Just to have the startTime field available
50 |
51 | PIPELINE_RUN_NAME=$(oc get pipelinerun --sort-by={.status.startTime} -o=custom-columns=NAME:.metadata.name | grep "s3-fetch-.*" | tail -n 1)
52 |
53 | if [[ $PIPELINE_RUN_NAME == "" ]]; then
54 | echo "Could not find any pipeline run"
55 | exit 1
56 | fi
57 |
58 | ## wait for the result
59 | waitForOpResult 300 "True" "False" "oc get pipelinerun $PIPELINE_RUN_NAME -o jsonpath={.status.conditions[?\(@.type==\'Succeeded\'\)].status}"
60 | PIPELINE_RUN_RESULT=$?
61 |
62 | saveArtifacts "$PIPELINE_RUN_NAME"
63 |
64 | if [[ $PIPELINE_RUN_RESULT != 0 ]]; then
65 | echo "The s3-fetch pipeline failed"
66 | exit 1
67 | else
68 | echo "The s3-fetch pipeline finished successfully"
69 | fi
70 |
--------------------------------------------------------------------------------