├── .gitignore
├── 1.InstallKnativeAndTekton.md
├── 10.KafkaSource.md
├── 11.BrokerTriggerWithKafka.md
├── 12.KafkaChannelSubscription.md
├── 13.SQSDirectSource.md
├── 14.SQSBrokerTrigger.md
├── 15.CamelDirectSource.md
├── 2.BuildUsingOpenShiftPipelines.md
├── 3.DeployServerlessApp.md
├── 4.Autoscaling.md
├── 5.BlueGreen.md
├── 6.TrafficSplitting.md
├── 7.AddingDirectEventSource.md
├── 8.AddingChannelsAndSubscriptions.md
├── 9.UsingBrokersAndTriggers.md
├── Issues.txt
├── README.md
├── awssqs
├── awssqs-source-broker.yaml
└── awssqs-source-direct.yaml
├── camelk
├── camel-source.yaml
└── camel-source1.yaml
├── eventing
├── event-source-heartbeat-180s.yaml
├── event-source-heartbeat.yaml
├── event-source-to-channel.yaml
├── event-source-to-channel.yaml.bak
├── event-source.yaml
├── event-subscription.yaml
├── event-subscription.yaml.bak
├── event-trigger.yaml
├── event-trigger.yaml.bak
├── in-memory-channel.yaml
├── in-memory-channel.yaml.bak
└── pingsource.yaml
├── images
├── 1.clidownload.png
├── awssqs1.png
├── awssqs2.png
├── bluegreen1.png
├── bluegreen2.png
├── brokertrigger1.png
├── camelsource1.png
├── camelsource2.png
├── devconsole1.png
├── devconsole2.png
├── devconsole3.png
├── directsource1.png
├── directsource2.png
├── directsource3.png
├── eventing1.png
├── kafkacluster.png
├── pipeline1.png
├── scaling1.png
├── scaling2.png
├── serving1.png
├── serving2.png
├── trafficsplit1.png
├── trafficsplit2.png
└── trafficsplit3.png
├── kafka
├── channel.yaml
├── event-subscription.yaml
├── kafka-channel.yaml
├── kafka-source.yaml
└── kafka-topic.yaml
├── knative-serving-ingress.json
├── network-policies
├── all-open.yaml
├── allow-openshift-ingress.yaml
├── allowIngressToAppInSMMR.yaml
├── dumpy-network-policy.yaml
└── knative-eventing-network-policies.yaml
├── oc.yaml
└── pipeline
├── openshift-objects.yaml
├── pipeline-resources.yaml
├── pipeline.yaml
└── s2i-quarkus-native.yaml
/.gitignore:
--------------------------------------------------------------------------------
1 | np1.yaml
2 |
--------------------------------------------------------------------------------
/1.InstallKnativeAndTekton.md:
--------------------------------------------------------------------------------
1 | # Install OpenShift Serverless, OpenShift Pipelines, Client Tools and AMQ Streams
2 |
3 | This tutorial has been tested with the following version.
4 | ```
5 | OCP 4.4.3
6 | OpenShift Serverless Operator 1.7.0
7 | OpenShift Pipelines Operator 0.11.2
8 | kn client v0.13.2
9 | tkn client v0.9.0
10 | ```
11 | **Note:** OpenShift Pipelines is still Community and Knative Eventing is still in tech preview.
12 |
13 | ## Install Knative Serving, Knative Eventing
14 | Install OpenShift Serverless Operator as explained [here](https://docs.openshift.com/container-platform/4.4/serverless/installing_serverless/installing-openshift-serverless.html)
15 | * Install Knative Serving as explained [here](https://docs.openshift.com/container-platform/4.4/serverless/installing_serverless/installing-knative-serving.html)
16 | * Install Knative Eventing as explained [here](https://docs.openshift.com/container-platform/4.4/serverless/installing_serverless/installing-knative-eventing.html)
17 |
18 | ## Install OpenShift Pipelines
19 | * Select OpenShift Pipelines Operator provided by Red Hat, Community and install the operator
20 |
21 | ## Install CLI tools
22 | * On the right top corner of your openshift console, click on `?` , navigate to `Command Line Tools` and download CLIs for `oc`, `kn` and `tkn`
23 |
24 | 
25 |
26 | * Add the CLI to your PATH
27 |
28 | ## Latest Developer Console (OPTIONAL)
29 |
30 | These labs use latest nightly build of Developer Console. If you are interested in running these labs using GUI where possible, you may want to install latest developer console using the approach below
31 |
32 | * [Install the latest Dev Console as another application](https://github.com/VeerMuchandi/ocp4-extras/tree/master/devconsole)
33 |
34 | ## Install Knative Kafka Operator
35 |
36 | You will need this only if you are trying [eventing use cases with Kafka](./README.md#kafka). Otherwise you can skip this section.
37 |
38 | * Find `Knative Apache Kafka Operator` by Red Hat in the Operator Hub, and install the same
39 | * In the `knative-eventing` namespace, create an Custom Resource for `Knative components for Apache Kafka` with the spec
40 | ```
41 | spec:
42 | bootstrapServers: 'my-cluster-kafka-bootstrap.kafka:9092'
43 | setAsDefaultChannelProvisioner: true
44 | ```
45 | *Note* this is a default spec that configures bootstrap expecting a kafka cluster named `my-cluster` in the namespace `kafka`
46 |
47 | * This CR adds following pods to `knative-eventing` namespace
48 | ```
49 | kafka-ch-controller-f9589648f-hqz6c 1/1 Running 0 7d4h
50 | kafka-ch-dispatcher-64976f876b-6xh49 1/1 Running 7 6d8h
51 | kafka-controller-manager-6fb468f444-5lmvt 1/1 Running 0 7d4h
52 | kafka-webhook-66875c495-cb5s8 1/1 Running 0 7d4h
53 | ```
54 |
55 |
56 | ## Install AMQ Streams
57 |
58 | You will need this only if you are trying [eventing use cases with Kafka](./README.md#kafka). Otherwise you can skip this section.
59 |
60 | * Find `Red Hat Integration - AMQ Streams` by Red Hat in the Operator Hub and install this cluster wide operator.
61 | * Create a new project named `kafka`
62 | * Create a new kafka cluster by instantiating `Kafka` custom resource in the `kafka` project
63 | You can choose the default specification for kafka cluster which creates a kafka cluster named `my-cluster` with 3 replicas.
64 |
65 | ```
66 | apiVersion: kafka.strimzi.io/v1beta1
67 | kind: Kafka
68 | metadata:
69 | name: my-cluster
70 | namespace: kafka
71 | spec:
72 | kafka:
73 | version: 2.4.0
74 | replicas: 3
75 | listeners:
76 | plain: {}
77 | tls: {}
78 | config:
79 | offsets.topic.replication.factor: 3
80 | transaction.state.log.replication.factor: 3
81 | transaction.state.log.min.isr: 2
82 | log.message.format.version: '2.4'
83 | storage:
84 | type: ephemeral
85 | zookeeper:
86 | replicas: 3
87 | storage:
88 | type: ephemeral
89 | entityOperator:
90 | topicOperator: {}
91 | userOperator: {}
92 | ```
93 |
94 | *Note* In the previous step we installed Kafka Knative Serving that expects a kafka cluster in `kafka` namespace with a name `my-cluster`
95 |
96 | * In a few minutes you will see a `my-cluster` running with the following pods
97 |
98 | ```
99 | % oc get po -n kafka
100 | NAME READY STATUS RESTARTS AGE
101 | my-cluster-entity-operator-f796fb9c4-djssr 3/3 Running 0 7d4h
102 | my-cluster-kafka-0 2/2 Running 1 7d4h
103 | my-cluster-kafka-1 2/2 Running 0 7d4h
104 | my-cluster-kafka-2 2/2 Running 0 7d4h
105 | my-cluster-zookeeper-0 2/2 Running 0 7d5h
106 | my-cluster-zookeeper-1 2/2 Running 0 7d5h
107 | my-cluster-zookeeper-2 2/2 Running 0 7d5h
108 | ```
109 |
110 |
111 | Here is how the kafka cluster shows up on developer console when you navigate to `kafka` namespace
112 |
113 | 
114 |
115 |
116 |
117 | ## Install AWS SQS Controller
118 |
119 | You will need this only if you are trying eventing use cases with AWS Simple Queue Service (SQS). Otherwise you can skip this section.
120 |
121 | The steps described here help a cluster administrator set up AWS SQS Controller on your cluster.
122 |
123 | * Currently, installing AWS SQS controller requires [ko client](https://medium.com/knative/ko-fast-kubernetes-microservice-development-in-go-f94a934a7240). Note this has dependencies on golang, docker client and access to a public repository where the AWS SQS Controller images will be built and pushed to.
124 |
125 | * We will be building AWS SQS controller from source code. So git clone the source code from the [eventing-contrib github](https://github.com/knative/eventing-contrib/tree/master/awssqs) repository. Once cloned move to `eventing-contrib/awssqs` folder.
126 |
127 | * Run the following command (as a cluster administrator), to build and deploy AWS SQS Controller on your cluster
128 |
129 | ```
130 | export KO_DOCKER_REPO=
131 | ko apply -f config/
132 | ```
133 |
134 | and note the output as follows
135 |
136 | ```
137 | 2020/06/24 10:51:41 NOTICE!
138 | -----------------------------------------------------------------
139 | We are changing the default base image in a subsequent release.
140 |
141 | For more information (including how to suppress this message):
142 |
143 | https://github.com/google/ko/issues/160
144 |
145 | -----------------------------------------------------------------
146 | 2020/06/24 10:51:42 Using base gcr.io/distroless/static:latest for knative.dev/eventing-contrib/awssqs/cmd/receive_adapter
147 | 2020/06/24 10:51:42 Using base gcr.io/distroless/static:latest for knative.dev/eventing-contrib/awssqs/cmd/controller
148 | namespace/knative-sources created
149 | serviceaccount/awssqs-controller-manager created
150 | 2020/06/24 10:51:43 Building knative.dev/eventing-contrib/awssqs/cmd/receive_adapter
151 | 2020/06/24 10:51:43 Building knative.dev/eventing-contrib/awssqs/cmd/controller
152 | clusterrole.rbac.authorization.k8s.io/awssqs-controller created
153 | clusterrole.rbac.authorization.k8s.io/eventing-contrib-awssqs-source-observer created
154 | clusterrolebinding.rbac.authorization.k8s.io/awssqs-controller-rolebinding created
155 | clusterrolebinding.rbac.authorization.k8s.io/eventing-sources-awssqs-controller-addressable-resolver created
156 | customresourcedefinition.apiextensions.k8s.io/awssqssources.sources.knative.dev created
157 | 2020/06/24 10:51:46 Publishing quay.io/veermuchandi/receive_adapter-60659434536f4a21c0a85abbf570544a:latest
158 | 2020/06/24 10:51:49 Publishing quay.io/veermuchandi/controller-312a0ccf75926fd0a58b0187285bc0ce:latest
159 | 2020/06/24 10:52:28 Published quay.io/veermuchandi/receive_adapter-60659434536f4a21c0a85abbf570544a@sha256:79bb4ac4b9ed42af98599f70b690ebacbf5b0fff75485a9c371a7e782dd8b977
160 | 2020/06/24 10:52:48 Published quay.io/veermuchandi/controller-312a0ccf75926fd0a58b0187285bc0ce@sha256:817e073ff12fecf9d05660226fcb61ddbb1e7e09e5f8f0cc61c14fce2735b378
161 | deployment.apps/awssqs-controller created
162 | serviceentry.networking.istio.io/awssqs-bus-ext created
163 | configmap/config-leader-election-awssqs created
164 | configmap/config-logging created
165 | configmap/config-observability created
166 | ```
167 | This will create a new namespace `knative-sources` on your cluster and deploys the controller into that namespace. It also adds roles, service accounts and configmaps required for your deployment.
168 |
169 | In a few minutes, you can find `awssqs-controller` running in this namespace.
170 |
171 | ```
172 | $ oc get po -n knative-sources
173 | NAME READY STATUS RESTARTS AGE
174 | awssqs-controller-794667495c-p4jd5 1/1 Running 0 4h29m
175 | ```
--------------------------------------------------------------------------------
/10.KafkaSource.md:
--------------------------------------------------------------------------------
1 | # Adding Kafka source to Serverless Application
2 |
3 | ## Prerequisites
4 | * Knative Serving and Knative Eventing are deployed on your cluster using OpenShift Serverless Operator
5 | * RedHat AMQ Integration Streams Operator is deployed and a kafka cluster `my-cluster` is deployed and running in `kafka` namespace
6 | * You have a Knative service deployed. Add a knative service running `kn service create msgtxr-sl --image=image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr -l app.openshift.io/runtime=quarkus --env format=none`
7 |
8 |
9 | ## Add Kafka Topic
10 |
11 | This should be added to the same namespace where kafka cluster is running. So depending on who created kafka cluster, you may need to ask the owner of `kafka` namespace to create a topic.
12 |
13 | Add a topic by creating a custom resource with the following spec
14 |
15 | ```
16 | apiVersion: kafka.strimzi.io/v1beta1
17 | kind: KafkaTopic
18 | metadata:
19 | name: knative-demo-topic
20 | labels:
21 | strimzi.io/cluster: my-cluster
22 | spec:
23 | partitions: 3
24 | replicas: 1
25 | config:
26 | retention.ms: 7200000
27 | segment.bytes: 1073741824
28 | ```
29 | You can add this by navigating to `kafka` namespace using administration console, installed operators, finding `Red Hat Integration AMQ Streams` operator and creating a kafka topic.
30 |
31 | Or you can run this command:
32 |
33 | ```
34 | % oc create -f kafka/kafka-topic.yaml -n kafka
35 |
36 | kafkatopic.kafka.strimzi.io/knative-demo-topic created
37 | ```
38 | Verify by running `oc get kafkatopics -n kafka` and you should see `knative-demo-topic` in the list.
39 |
40 | ## Add a Kafka Source
41 |
42 | We will now create a KafkaSource that connects the topic we created above to the our serverless service that acts as the sink. This way any messages posted to the topic will be delivered to the serverless application.
43 |
44 | ```
45 | apiVersion: sources.knative.dev/v1alpha1
46 | kind: KafkaSource
47 | metadata:
48 | name: kafka-source
49 | spec:
50 | bootstrapServers:
51 | - my-cluster-kafka-bootstrap.kafka:9092
52 | topics:
53 | - knative-demo-topic
54 | sink:
55 | ref:
56 | apiVersion: serving.knative.dev/v1
57 | kind: Service
58 | name: msgtxr-sl
59 | ```
60 |
61 | * Note the bootstrapserver and topic which opoints to the topic we created for the kafka cluster in the `kafka` namespace
62 | * Note the sink that points to knative service `msgtxr-sl`
63 |
64 | Let us create this kafka source
65 |
66 | ```
67 | % oc create -f kafka/kafka-source.yaml
68 |
69 | kafkasource.sources.knative.dev/kafka-source created
70 | ```
71 |
72 | Verify the kafka source that we just added
73 | ```
74 | % oc get kafkasource.sources.knative.dev
75 |
76 | NAME TOPICS BOOTSTRAPSERVERS READY REASON AGE
77 | kafka-source [knative-demo-topic] [my-cluster-kafka-bootstrap.kafka:9092] True 2m54s
78 | ```
79 | Also note that kafka source is now running as a pod
80 |
81 | ```
82 | % oc get po | grep Running
83 | kafkasource-kafka-source-4767db0c-73cc-46c2-ad3c-14b806058sfkx2 1/1 Running 0 10m
84 | ```
85 |
86 | ## Post Messages
87 |
88 | We can now post messages to kafka topic to deliver to our serverless application.
89 |
90 | Let us test the same by running a pod that allows us to post these messages. Start a `kafka-producer` pod with the following command
91 |
92 | ```
93 | oc run kafka-producer -ti --image=strimzi/kafka:0.14.0-kafka-2.3.0 --rm=true --restart=Never -- bin/kafka-console-producer.sh --broker-list my-cluster-kafka-bootstrap.kafka:9092 --topic knative-demo-topic
94 | ```
95 | * Note the broker and topic we are connecting to from this pod
96 |
97 | This will connect you to the running pod and display a prompt `>`
98 |
99 | Now start sending messages that will post to kafka topic.
100 |
101 | ```
102 | If you don't see a command prompt, try pressing enter.
103 | >hello world
104 | >this is a message posted to kafka topic
105 | >^C
106 | ```
107 | **Note** you can exit the running pod by pressing Ctrl+C.
108 |
109 | Watch the pod logs for your serverless service and you will see the messages delivered there. **Note** you will have to check the logs before the serverless pod goes down.
110 |
111 | ```
112 | __ ____ __ _____ ___ __ ____ ______
113 | --/ __ \/ / / / _ | / _ \/ //_/ / / / __/
114 | -/ /_/ / /_/ / __ |/ , _/ ,< / /_/ /\ \
115 | --\___\_\____/_/ |_/_/|_/_/|_|\____/___/
116 | 2020-05-15 13:58:57,791 INFO [io.quarkus] (main) getting-started 1.0-SNAPSHOT (powered by Quarkus 1.3.2.Final) started in 0.014s. Listening on: http://0.0.0.0:8080
117 | 2020-05-15 13:58:57,791 INFO [io.quarkus] (main) Profile prod activated.
118 | 2020-05-15 13:58:57,791 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]
119 | 13:58:58.309 IN hello world OUT MESSAGE TITLE CASED : "Hello World"
120 | 13:59:13.837 IN this is a message posted to kafka topic OUT MESSAGE TITLE CASED : "This Is A Message Posted To Kafka Topic"
121 | ```
122 | ## Conclusion
123 |
124 | In this chapter, we have learnt to set up a kafka source, that reads messages from a kafka topic and delivers to a consumer that we configured as our knative serverless application.
125 |
126 |
127 | ## Clean up
128 |
129 | * The test pod kafka-producer should remove itself if you pressed Ctrl+C, but if it timed out before you pressed it, you may have to delete that pod by running
130 |
131 | ```
132 | % oc delete po kafka-producer
133 |
134 | pod "kafka-producer" deleted
135 | ```
136 | * Remove kafka source
137 |
138 | ```
139 | % oc delete kafkasource.sources.knative.dev kafka-source
140 |
141 | kafkasource.sources.knative.dev "kafka-source" deleted
142 | ```
143 |
144 | * Remove kafka topic
145 |
146 | ```
147 | % oc delete kafkatopic knative-demo-topic -n kafka
148 |
149 | kafkatopic.kafka.strimzi.io "knative-demo-topic" deleted
150 | ```
151 |
152 |
--------------------------------------------------------------------------------
/11.BrokerTriggerWithKafka.md:
--------------------------------------------------------------------------------
1 | # Using Broker and Trigger with Kafka
2 |
3 | In this example we will setup an API source that listens to kubernetes events and sinks them to a broker. We will then add a trigger that configures delivery of these events to a serverless application.
4 |
5 | ## Prerequisites
6 |
7 | * Knative Serving and Knative Eventing are deployed on your cluster using OpenShift Serverless Operator
8 | * RedHat AMQ Integration Streams Operator is deployed and a kafka cluster `my-cluster` is deployed and running in `kafka` namespace
9 | * You have a Knative service deployed. Add a knative service running `kn service create msgtxr-sl --image=image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr -l app.openshift.io/runtime=quarkus --env format=none`
10 | * You will need cluster administrator to help with a cluster role binding
11 |
12 | ## Create a Service Account with Event Watcher Role
13 |
14 | In order for our knative API event source to watch for events, it needs appropriate permissions. We will assign these permissions to a service account and run the api source with this service account.
15 |
16 | * Add a Service Account named `events-sa`
17 |
18 | ```
19 | % oc create serviceaccount events-sa
20 |
21 | serviceaccount/events-sa created
22 | ```
23 |
24 | * Your cluster administrator should create a cluster role to watch events as below
25 |
26 | ```
27 | % oc create clusterrole event-watcher --verb=get,list,watch --resource=events
28 |
29 | clusterrole.rbac.authorization.k8s.io/event-watcher created
30 | ```
31 |
32 | * Your cluster administrator should create a cluster role binding to assign the `event-watcher` cluster role to the `events-sa` service account.
33 |
34 | ```
35 | % oc adm policy add-cluster-role-to-user event-watcher -z events-sa
36 |
37 | clusterrole.rbac.authorization.k8s.io/event-watcher added: "events-sa"
38 | ```
39 |
40 | ## Cluster Administrator should change the default channel at broker creation to Kafka Channel
41 |
42 | When a broker is created, we want the channel created to be a Kafka channel for our current exercise.
43 |
44 | So the cluster administrator should change the configmap `config-br-default-channel` to create `KafkaChannel` when a broker is added to your namespace.
45 |
46 | Cluster admin can edit the data using administration console or directly from CLI as below
47 |
48 | Edit configmap
49 |
50 | ```
51 | oc edit cm config-br-default-channel --as system:admin
52 | ```
53 |
54 | Change data from
55 | ```
56 | data:
57 | channelTemplateSpec: |
58 | apiVersion: messaging.knative.dev/v1alpha1
59 | kind: InMemoryChannel
60 | ```
61 | to
62 |
63 | ```
64 | data:
65 | channelTemplateSpec: |
66 | apiVersion: messaging.knative.dev/v1alpha1
67 | kind: KafkaChannel
68 | ```
69 | so that `InMemoryChannel` is replaced with `KafkaChannel` as default
70 |
71 | ## Add a Trigger and Broker
72 |
73 | Now let us add a trigger that sinks to our knative service. `--inject-broker` option will also add the broker at the same time.
74 |
75 | ```
76 | kn trigger create testevents-trigger \
77 | --inject-broker --broker default \
78 | --sink svc:msgtxr-sl
79 | ```
80 |
81 | Verify that the broker pods are running
82 |
83 | ```
84 | % oc get po | grep Running
85 | default-broker-filter-7d89b8d949-nqfrr 1/1 Running 0 3m59s
86 | default-broker-ingress-6b5d8cf558-jttvp 1/1 Running 0 3m58s
87 | ```
88 |
89 | and check the trigger list
90 |
91 | ```
92 | % kn trigger list
93 | NAME BROKER SINK AGE CONDITIONS READY REASON
94 | testevents-trigger default svc:msgtxr-sl 43s 5 OK / 5 True
95 | ```
96 |
97 | You should also see a KafkaChannel added as we configured that as the default for broker.
98 |
99 | ```
100 | % oc get channel
101 | NAME READY REASON URL AGE
102 | kafkachannel.messaging.knative.dev/default-kne-trigger True http://default-kne-trigger-kn-channel.kn-demo.svc.cluster.local 46s
103 | ```
104 |
105 | and you should see a topic named `knative-messaging-kafka.kn-demo.default-kne-trigger` added to the kafka cluster when you run `oc get kafkatopics -n kafka`
106 |
107 |
108 | ## Add API event source
109 |
110 | Now let us add API event source that watches for kubernetes events and sends them to the default broker.
111 |
112 | ```
113 | kn source apiserver create testevents-kafka --resource Event:v1 --service-account events-sa --sink broker:default
114 | ```
115 |
116 | Now you should see API event source pod running and as it starts sending events, you will see the serverless pod coming up in a few seconds.
117 |
118 | ```
119 | % oc get po | grep Running
120 | apiserversource-testevents-90c1477d-9c5d-40ae-91c2-9d71d3696jf6 1/1 Running 0 12s
121 | default-broker-filter-7d89b8d949-6w4wp 1/1 Running 0 5m23s
122 | default-broker-ingress-6b5d8cf558-lm889 1/1 Running 0 5m23s
123 | msgtxr-sl-hjcsm-3-deployment-78db8688f8-h4zmw 2/2 Running 0 7s
124 | ```
125 |
126 | Once the event consumer (your knative service pod) is up, watch the logs `oc logs ` to see the events coming in.
127 |
128 | ## Conclusion
129 |
130 | In this chapter, we have configured an API event source to send kubernetes events to a knative service via Broker and Trigger.
131 |
132 | ## Cleanup
133 |
134 | * Remove trigger
135 | ```
136 | kn trigger delete testevents-trigger
137 | ```
138 |
139 | * Remove API event source
140 |
141 | ```
142 | kn source apiserver delete testevents-kafka
143 | ```
144 |
145 | * Cluster admin to relabel the namespace
146 | ```
147 | oc label namespace kn-demo knative-eventing-injection=disabled --overwrite=true
148 | ```
149 |
150 | * Delete broker pods
151 | ```
152 | oc delete brokers.eventing.knative.dev --all
153 | ```
154 |
155 | * Cluster admin to remove cluster role binding and cluster role
156 |
157 | ```
158 | oc adm policy remove-cluster-role-from-user event-watcher -z events-sa
159 | ```
160 |
161 | ```
162 | oc delete clusterrole event-watcher
163 | ```
164 |
165 |
166 | * Delete service account
167 |
168 | ```
169 | oc delete sa events-sa
170 | ```
171 |
--------------------------------------------------------------------------------
/12.KafkaChannelSubscription.md:
--------------------------------------------------------------------------------
1 | # Adding Kafka Channel and Subscription
2 |
3 | In this chapter we will learn to setup a Kafka channel to receive the events from an event source and subscribe a knative service to listen to those events.
4 |
5 | ## Prerequisites
6 | * Knative Serving and Knative Eventing are deployed on your cluster using OpenShift Serverless Operator
7 | * RedHat AMQ Integration Streams Operator is deployed and a kafka cluster `my-cluster` is deployed and running in `kafka` namespace
8 | * You have a Knative service deployed. Add a knative service running `kn service create msgtxr-sl --image=image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr -l app.openshift.io/runtime=quarkus --env format=none`
9 |
10 |
11 | ## Create a Kafka Channel to your namespace
12 |
13 | Add a new channel named `testchannel-one` in the namespace `kn-demo` with the following spec
14 |
15 | ```
16 | apiVersion: messaging.knative.dev/v1alpha1
17 | kind: KafkaChannel
18 | metadata:
19 | name: testchannel-one
20 | ```
21 | by running
22 |
23 | ```
24 | % oc create -f kafka/kafka-channel.yaml
25 |
26 | kafkachannel.messaging.knative.dev/testchannel-one created
27 | ```
28 |
29 | Verify the channel created is a kafka channel by running `oc get channel`
30 |
31 | ```
32 | % oc get channel
33 | NAME READY REASON URL AGE
34 | kafkachannel.messaging.knative.dev/testchannel-one True http://testchannel-one-kn-channel.kn-demo.svc.cluster.local 19s
35 | ```
36 |
37 | Note the URL for this testchannel as we will be using that while setting up the source.
38 |
39 | Also notice that a new topic `knative-messaging-kafka.kn-demo.testchannel-one` is added to the kafka cluster `my-cluster` in the namespace `kafka` by running
40 |
41 | ```oc get kafkatopic -n kafka```
42 |
43 |
44 | ## Add an event source
45 |
46 | Now let us create an event source that posts events to this channel. We will add a ping source by running
47 |
48 | ```
49 | kn source ping create my-ping-source --data="from pingsource" \
50 | --schedule="* * * * *" \
51 | --sink=http://testchannel-one-kn-channel.kn-demo.svc.cluster.local
52 | ```
53 | * `schedule` shows that an event is posted every minute
54 | * `data` is the data that will be sent with the event
55 | * `sink` is configured with the channel URL we noted above to post the events to the kafka channel
56 |
57 | Verify that the ping source is created
58 | ```
59 | % kn source ping list
60 | NAME SCHEDULE SINK AGE CONDITIONS READY REASON
61 | my-ping-source * * * * * 16s 6 OK / 6 True
62 | ```
63 |
64 | Also note that the ping source is deployed as a pod
65 | ```
66 | % oc get po| grep Running
67 | msgtxr-1-gg5bf 1/1 Running 0 113m
68 | pingsource-my-ping-source-38669b3c-352a-44ef-a7a7-8fe3a7b5m9xww 1/1 Running 0 44s
69 | ```
70 |
71 | This pod produces events that are sent to the kafka channel.
72 |
73 | ## Add a subscription
74 |
75 | Now let us create a subscription by adding knative service `msg-txr` as a subscriber to the kafka channel `testchannel-one`. Here is the spec
76 |
77 | ```
78 | apiVersion: messaging.knative.dev/v1alpha1
79 | kind: Subscription
80 | metadata:
81 | name: event-subscription
82 | spec:
83 | channel:
84 | apiVersion: messaging.knative.dev/v1alpha1
85 | kind: KafkaChannel
86 | name: testchannel-one
87 | subscriber:
88 | ref:
89 | apiVersion: serving.knative.dev/v1alpha1
90 | kind: Service
91 | name: msgtxr-sl
92 | ```
93 |
94 | Add the subscription by running
95 |
96 | ```
97 | % oc create -f kafka/event-subscription.yaml
98 |
99 | subscription.messaging.knative.dev/event-subscription created
100 | ```
101 |
102 | Verify subscription is created
103 |
104 | ```
105 | % oc get subscriptions.messaging.knative.dev
106 | NAME READY REASON AGE
107 | event-subscription True 3m3s
108 | ```
109 |
110 | The knative service should now come up. Check the pod logs `oc logs -c user-container -f $(oc get po -l serving.knative.dev/service=msgtxr-sl -o=jsonpath='{.items[0].metadata.name}')` to see the events being delivered
111 |
112 | ```
113 | % oc logs -c user-container -f $(oc get po -l serving.knative.dev/service=msgtxr-sl -o=jsonpath='{.items[0].metadata.name}')
114 | __ ____ __ _____ ___ __ ____ ______
115 | --/ __ \/ / / / _ | / _ \/ //_/ / / / __/
116 | -/ /_/ / /_/ / __ |/ , _/ ,< / /_/ /\ \
117 | --\___\_\____/_/ |_/_/|_/_/|_|\____/___/
118 | 2020-05-15 22:56:08,242 INFO [io.quarkus] (main) getting-started 1.0-SNAPSHOT (powered by Quarkus 1.3.2.Final) started in 0.009s. Listening on: http://0.0.0.0:8080
119 | 2020-05-15 22:56:08,242 INFO [io.quarkus] (main) Profile prod activated.
120 | 2020-05-15 22:56:08,242 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]
121 | 22:56:08.735 IN {"body":"from pingsource"} OUT {"body":"from pingsource"}
122 | 22:57:00.181 IN {"body":"from pingsource"} OUT {"body":"from pingsource"}
123 | 22:58:00.200 IN {"body":"from pingsource"} OUT {"body":"from pingsource"}
124 |
125 | ```
126 |
127 | ## Conclusion
128 | In this chapter we learnt to set up a kafka channel, create a subscription from this channel to the knative service. We setup a ping source to deliver events to the kafka channel.
129 |
130 | ## Clean up
131 |
132 | * Delete subscripton
133 |
134 | ```
135 | oc delete -f kafka/event-subscription.yaml
136 | ```
137 | * Delete event source
138 |
139 | ```
140 | kn source ping delete my-ping-source
141 | ```
142 | * Delete kafka channel
143 |
144 | ```
145 | oc delete -f kafka/kafka-channel.yaml
146 | ```
147 | * Delete kafka topic
148 |
149 | ```
150 | oc delete kafkatopic knative-messaging-kafka.kn-demo.testchannel-one -n kafka
151 | ```
152 |
153 |
154 |
--------------------------------------------------------------------------------
/13.SQSDirectSource.md:
--------------------------------------------------------------------------------
1 | # Adding AWS SQS Source as Direct Source to your Application
2 |
3 | ## Prerequisites
4 | * Create a queue using Simple Queue Service in AWS. Note the URL for your queue.
5 | * Cluster administrator should [install AWS SQS Controller](./1.InstallKnativeAndTekton.md#installsqscontroller) on your cluster
6 | * You will need your aws credentials to configure as a secret
7 | * You have a Knative service deployed. Add a knative service running `kn service create msgtxr-sl --image=image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr -l app.openshift.io/runtime=quarkus --env format=none`
8 |
9 | ## Add a secret
10 |
11 | Create a file with aws credentials
12 |
13 | ```
14 | % cat credentials
15 | [default]
16 | aws_access_key_id =
17 | aws_secret_access_key =
18 | ```
19 |
20 | Create a secret referencing this file
21 |
22 | ```
23 | oc create secret generic aws-credentials --from-file=credentials=./credentials
24 | ```
25 |
26 | ## Add AWS SQS Source
27 |
28 | We will now create a source that reads messages posted to your AWS SQS Queue and sinks those messages to your knative service.
29 |
30 | `AwsSqsSource` is a custom resource definition added when the administrator installed the controller. We will create a custom resource for our application. The controller manages this custom resource.
31 |
32 | Let us understand the AwsSqsSource custom resource shown below. This reads the data from SQS queue on AWS and pushes it to the sink. In our case we are configuring sink to be our knative service. Also note that this source references the secret created above.
33 |
34 | ```
35 | % cat awssqs/awssqs-source-direct.yaml
36 | # Replace the following before applying this file:
37 | # QUEUE_URL: Replace with the AWS SQS queue.
38 |
39 | apiVersion: sources.knative.dev/v1alpha1
40 | kind: AwsSqsSource
41 | metadata:
42 | name: awssqs-sample-source
43 | spec:
44 | awsCredsSecret:
45 | name: aws-credentials
46 | key: credentials
47 | queueUrl: QUEUE_URL
48 | sink:
49 | apiVersion: serving.knative.dev/v1
50 | kind: Service
51 | name: msgtxr-sl
52 | ```
53 |
54 | Set the value of an environment variable to your SQS URL
55 |
56 | ```
57 | export QUEUE_URL=
58 | ```
59 |
60 | Let us create the source by replacing the value of QUEUE_URL running
61 |
62 | ```
63 | sed -e "s|QUEUE_URL|$QUEUE_URL|g" awssqs/awssqs-source-direct.yaml | oc create -f -
64 | ```
65 | and notice
66 |
67 | ```
68 | awssqssource.sources.knative.dev/awssqs-sample-source created
69 | ```
70 | You should see the awssqssource pod running.
71 |
72 | ```
73 | % oc get po
74 | NAME READY STATUS RESTARTS AGE
75 | awssqs-awssqs-sample-source-k9rcm-7d48dbb7f6-l2mld 1/1 Running 0 116s
76 | ```
77 |
78 | It will show up on your developer console as below:
79 |
80 | 
81 |
82 | ## Post Messages and Test
83 |
84 | Now post a message to the queue running the following command from command line. Alternately you can also post a message from AWS Console
85 |
86 | ```
87 | aws sqs send-message --queue-url=$QUEUE_URL --message-body="hello world"
88 | ```
89 |
90 | Notice the knative service pod scales up and the logs show the following output.
91 |
92 |
93 | ```
94 | __ ____ __ _____ ___ __ ____ ______
95 | --/ __ \/ / / / _ | / _ \/ //_/ / / / __/
96 | -/ /_/ / /_/ / __ |/ , _/ ,< / /_/ /\ \
97 | --\___\_\____/_/ |_/_/|_/_/|_|\____/___/
98 | 2020-06-24 20:34:54,779 INFO [io.quarkus] (main) getting-started 1.0-SNAPSHOT (powered by Quarkus 1.3.2.Final) started in 0.017s. Listening on: http://0.0.0.0:8080
99 | 2020-06-24 20:34:54,779 INFO [io.quarkus] (main) Profile prod activated.
100 | 2020-06-24 20:34:54,779 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]
101 | 20:34:55.864 IN {"Attributes":{"SentTimestamp":"1593030891645"},"Body":"hello world","MD5OfBody":"5eb63bbbe01eeed093cb22bb8f5acdc3","MD5OfMessageAttributes":null,"MessageAttributes":null,"MessageId":"149e8578-ccc5-4e80-911e-d898dbf8905d","ReceiptHandle":"AQEBdEhLPsrD6TdESml95/OVJYeFPUvcfEsELxqaspiHN/ZlrDehmJdw9gQeycxud4KwTBXdelTmSPPR1Jfjn/nTVzvtlEDubGeH1mrPAzb4R4Du1n9FEZrGMJKGAwpp5TNaa7ynaoEJm7LFk3x9X0LQAucMZkolBL0NpW1zud9ouASb11Iqv/OEb087AnrhtKu6StCXqX9sxkKL2scNUOhIExg9EKtX9Gr77VrX+ynSo2ZPfBCDlfDtQiQ1MTV6bY207/zej2mgjxUXVHqTfBWY/0wADwkM6W5niKaQCW59o92YSC4tOdJVuoZRjpamT79WLPjdf6N6hR6uAM3230VBzxeDTKzFRwb6x0J3++Lc2jFUHlJ5W4rPd7CmB9+LHHJMYM9JzNhGbR9eaVeXqPe6mg=="} OUT {"Attributes":{"SentTimestamp":"1593030891645"},"Body":"hello world","MD5OfBody":"5eb63bbbe01eeed093cb22bb8f5acdc3","MD5OfMessageAttributes":null,"MessageAttributes":null,"MessageId":"149e8578-ccc5-4e80-911e-d898dbf8905d","ReceiptHandle":"AQEBdEhLPsrD6TdESml95/OVJYeFPUvcfEsELxqaspiHN/ZlrDehmJdw9gQeycxud4KwTBXdelTmSPPR1Jfjn/nTVzvtlEDubGeH1mrPAzb4R4Du1n9FEZrGMJKGAwpp5TNaa7ynaoEJm7LFk3x9X0LQAucMZkolBL0NpW1zud9ouASb11Iqv/OEb087AnrhtKu6StCXqX9sxkKL2scNUOhIExg9EKtX9Gr77VrX+ynSo2ZPfBCDlfDtQiQ1MTV6bY207/zej2mgjxUXVHqTfBWY/0wADwkM6W5niKaQCW59o92YSC4tOdJVuoZRjpamT79WLPjdf6N6hR6uAM3230VBzxeDTKzFRwb6x0J3++Lc2jFUHlJ5W4rPd7CmB9+LHHJMYM9JzNhGbR9eaVeXqPe6mg=="}
102 | ```
103 | Post a few more messages and test.
104 |
105 | ## Conclusion
106 |
107 | In this chapter we have learnt to configure AWS SQS as a source for your knative service
108 |
109 |
110 | ## Cleanup
111 |
112 | Delete AWS SQS Source
113 |
114 | ```
115 | oc delete -f awssqs/awssqs-source-direct.yaml
116 | awssqssource.sources.knative.dev
117 | ```
118 | Delete Secret
119 |
120 | ```
121 | oc delete secret/aws-credentials
122 | ```
123 |
124 |
--------------------------------------------------------------------------------
/14.SQSBrokerTrigger.md:
--------------------------------------------------------------------------------
1 | # Using Broker Trigger with AWS SQS Source
2 |
3 | ## Prerequisites
4 | * Create a queue using Simple Queue Service in AWS. Note the URL for your queue.
5 | * Cluster administrator should [install AWS SQS Controller](./1.InstallKnativeAndTekton.md#installsqscontroller) on your cluster
6 | * You will need your aws credentials to configure as a secret
7 | * You have a Knative service deployed. Add a knative service running `kn service create msgtxr-sl --image=image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr -l app.openshift.io/runtime=quarkus --env format=none`
8 |
9 | ## Add a secret
10 |
11 | Create a file with aws credentials
12 |
13 | ```
14 | % cat credentials
15 | [default]
16 | aws_access_key_id =
17 | aws_secret_access_key =
18 | ```
19 |
20 | Create a secret referencing this file
21 |
22 | ```
23 | oc create secret generic aws-credentials --from-file=credentials=./credentials
24 | ```
25 |
26 | ## Add a Trigger and Broker
27 |
28 | Now let us add a trigger that sinks to our knative service. `--inject-broker` option will also add the broker at the same time.
29 |
30 | ```
31 | kn trigger create testevents-trigger \
32 | --inject-broker --broker default \
33 | --sink svc:msgtxr-sl
34 | ```
35 |
36 | Verify that the broker pods are running
37 |
38 | ```
39 | % oc get po | grep Running
40 | default-broker-filter-7d89b8d949-2s6k7 1/1 Running 0 54s
41 | default-broker-ingress-6b5d8cf558-5ptvf 1/1 Running 0 54s
42 | ```
43 |
44 | and check the trigger list
45 |
46 | ```
47 | % kn trigger list
48 | NAME BROKER SINK AGE CONDITIONS READY REASON
49 | testevents-trigger default svc:msgtxr-sl 43s 5 OK / 5 True
50 | ```
51 |
52 |
53 | ## Add AWS SQS Source
54 |
55 | We will now create a source that reads messages posted to your AWS SQS Queue and sends those messages to a broker.
56 |
57 | `AwsSqsSource` is a custom resource definition added when the administrator installed the controller. We will create a custom resource for our application. The controller manages this custom resource.
58 |
59 | Let us understand the AwsSqsSource shown below. This reads the data from SQS queue on AWS and pushes it to the sink. In our case we are configuring sink to be a broker named `default`. Also note that this source references the secret created above.
60 |
61 | ```
62 | % cat awssqs/awssqs-source-broker.yaml
63 | # Replace the following before applying this file:
64 | # QUEUE_URL: Replace with the AWS SQS queue.
65 |
66 | apiVersion: sources.knative.dev/v1alpha1
67 | kind: AwsSqsSource
68 | metadata:
69 | name: awssqs-sample-source
70 | spec:
71 | awsCredsSecret:
72 | name: aws-credentials
73 | key: credentials
74 | queueUrl: QUEUE_URL
75 | sink:
76 | apiVersion: eventing.knative.dev/v1alpha1
77 | kind: Broker
78 | name: default
79 | ```
80 |
81 | Set the value of an environment variable to your SQS URL
82 |
83 | ```
84 | export QUEUE_URL=
85 | ```
86 |
87 | Let us create the source by replacing the value of QUEUE_URL running
88 |
89 | ```
90 | sed -e "s|QUEUE_URL|$QUEUE_URL|g" awssqs/awssqs-source-broker.yaml | oc create -f -
91 | ```
92 | and notice
93 |
94 | ```
95 | awssqssource.sources.knative.dev/awssqs-sample-source created
96 | ```
97 | You should see the awssqssource pod running.
98 |
99 | ```
100 | % oc get po
101 | NAME READY STATUS RESTARTS AGE
102 | awssqs-awssqs-sample-source-kzvdl-5f67b6db8d-58j4w 1/1 Running 0 10s
103 | default-broker-filter-7d89b8d949-2s6k7 1/1 Running 0 84s
104 | default-broker-ingress-6b5d8cf558-5ptvf 1/1 Running 0 84s
105 | ```
106 |
107 | ## Post Messages and Test
108 |
109 | Now post a message to the queue running the following command from command line. Alternately you can also post a message from AWS Console
110 |
111 | ```
112 | aws sqs send-message --queue-url=$QUEUE_URL --message-body="hello world"
113 | ```
114 |
115 | In a few seconds you will notice the knative service pod scales up and the logs show the following output.
116 |
117 |
118 | ```
119 | __ ____ __ _____ ___ __ ____ ______
120 | --/ __ \/ / / / _ | / _ \/ //_/ / / / __/
121 | -/ /_/ / /_/ / __ |/ , _/ ,< / /_/ /\ \
122 | --\___\_\____/_/ |_/_/|_/_/|_|\____/___/
123 | 2020-06-24 20:34:54,779 INFO [io.quarkus] (main) getting-started 1.0-SNAPSHOT (powered by Quarkus 1.3.2.Final) started in 0.017s. Listening on: http://0.0.0.0:8080
124 | 2020-06-24 20:34:54,779 INFO [io.quarkus] (main) Profile prod activated.
125 | 2020-06-24 20:34:54,779 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]
126 | 20:34:55.864 IN {"Attributes":{"SentTimestamp":"1593030891645"},"Body":"hello world","MD5OfBody":"5eb63bbbe01eeed093cb22bb8f5acdc3","MD5OfMessageAttributes":null,"MessageAttributes":null,"MessageId":"149e8578-ccc5-4e80-911e-d898dbf8905d","ReceiptHandle":"AQEBdEhLPsrD6TdESml95/OVJYeFPUvcfEsELxqaspiHN/ZlrDehmJdw9gQeycxud4KwTBXdelTmSPPR1Jfjn/nTVzvtlEDubGeH1mrPAzb4R4Du1n9FEZrGMJKGAwpp5TNaa7ynaoEJm7LFk3x9X0LQAucMZkolBL0NpW1zud9ouASb11Iqv/OEb087AnrhtKu6StCXqX9sxkKL2scNUOhIExg9EKtX9Gr77VrX+ynSo2ZPfBCDlfDtQiQ1MTV6bY207/zej2mgjxUXVHqTfBWY/0wADwkM6W5niKaQCW59o92YSC4tOdJVuoZRjpamT79WLPjdf6N6hR6uAM3230VBzxeDTKzFRwb6x0J3++Lc2jFUHlJ5W4rPd7CmB9+LHHJMYM9JzNhGbR9eaVeXqPe6mg=="} OUT {"Attributes":{"SentTimestamp":"1593030891645"},"Body":"hello world","MD5OfBody":"5eb63bbbe01eeed093cb22bb8f5acdc3","MD5OfMessageAttributes":null,"MessageAttributes":null,"MessageId":"149e8578-ccc5-4e80-911e-d898dbf8905d","ReceiptHandle":"AQEBdEhLPsrD6TdESml95/OVJYeFPUvcfEsELxqaspiHN/ZlrDehmJdw9gQeycxud4KwTBXdelTmSPPR1Jfjn/nTVzvtlEDubGeH1mrPAzb4R4Du1n9FEZrGMJKGAwpp5TNaa7ynaoEJm7LFk3x9X0LQAucMZkolBL0NpW1zud9ouASb11Iqv/OEb087AnrhtKu6StCXqX9sxkKL2scNUOhIExg9EKtX9Gr77VrX+ynSo2ZPfBCDlfDtQiQ1MTV6bY207/zej2mgjxUXVHqTfBWY/0wADwkM6W5niKaQCW59o92YSC4tOdJVuoZRjpamT79WLPjdf6N6hR6uAM3230VBzxeDTKzFRwb6x0J3++Lc2jFUHlJ5W4rPd7CmB9+LHHJMYM9JzNhGbR9eaVeXqPe6mg=="}
127 | ```
128 | Post a few more messages and test.
129 |
130 | ## Conclusion
131 |
132 | In this chapter we have learnt to configure AWS SQS as a source that receives the messages sent to AWS SQS. We have configured the AwsSqsSource to deliver these messages to a broker and set up a trigger to deliver the messages to a knative service.
133 |
134 |
135 | ## Cleanup
136 |
137 | Remove trigger
138 | ```
139 | kn trigger delete testevents-trigger
140 | ```
141 |
142 | **Cluster Administrator** to relabel the namespace
143 | ```
144 | oc label namespace kn-demo knative-eventing-injection=disabled --overwrite=true
145 | ```
146 |
147 | Delete broker pods
148 | ```
149 | oc delete brokers.eventing.knative.dev --all
150 | ```
151 |
152 | Delete AWS SQS Source
153 |
154 | ```
155 | oc delete -f awssqs/awssqs-source-broker.yaml
156 |
157 | awssqssource.sources.knative.dev
158 | ```
159 |
160 | Delete Secret
161 |
162 | ```
163 | oc delete secret/aws-credentials
164 | ```
165 |
166 |
167 |
--------------------------------------------------------------------------------
/15.CamelDirectSource.md:
--------------------------------------------------------------------------------
1 | # Adding Camel Source as Direct Source to your Application
2 |
3 | ## Prerequisites
4 | * Cluster administrator should [install Knative Apache Camel](./1.InstallKnativeAndTekton.md#installknativeapachecamel) on your cluster
5 | * You have a Knative service deployed. Add a knative service running `kn service create msgtxr-sl --image=image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr -l app.openshift.io/runtime=quarkus --env format=none`
6 |
7 |
8 | ## Add AWS SQS Source
9 |
10 | We will now create a camel source that generates messages and sinks those messages to your knative service.
11 |
12 | Navigate to `Add` menu option, choose `Event Sources` tile and then Camel Source.
13 |
14 | Paste the following yaml to create a camel source that sinks to knative service.
15 |
16 | ```
17 | apiVersion: sources.knative.dev/v1alpha1
18 | kind: CamelSource
19 | metadata:
20 | name: camel-timer-source
21 | namespace: kn-demo
22 | spec:
23 | source:
24 | flow:
25 | from:
26 | uri: 'timer:tick?period=3000'
27 | steps:
28 | - set-body:
29 | constant: Hello World!
30 | sink:
31 | ref:
32 | apiVersion: serving.knative.dev/v1
33 | kind: Service
34 | name: msgtxr-sl
35 | ```
36 | as shown below
37 |
38 | 
39 |
40 | Alternately, you can also create this running
41 | `oc apply -f camelk/camel-source.yaml`
42 |
43 | This will build and deploy a camelk timer source that periodically sends messages to the knative service. It will show up on your developer console as below:
44 |
45 | 
46 |
47 |
48 | ## Verify
49 |
50 | Check the logs for the knative service to find the messages being posted
51 |
52 | ```
53 | __ ____ __ _____ ___ __ ____ ______
54 | --/ __ \/ / / / _ | / _ \/ //_/ / / / __/
55 | -/ /_/ / /_/ / __ |/ , _/ ,< / /_/ /\ \
56 | --\___\_\____/_/ |_/_/|_/_/|_|\____/___/
57 | 2020-07-06 20:41:23,372 INFO [io.quarkus] (main) getting-started 1.0-SNAPSHOT (powered by Quarkus 1.3.2.Final) started in 0.012s. Listening on: http://0.0.0.0:8080
58 | 2020-07-06 20:41:23,372 INFO [io.quarkus] (main) Profile prod activated.
59 | 2020-07-06 20:41:23,372 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]
60 | 20:41:23.764 IN Hello World! OUT Hello World!
61 | 20:41:23.764 IN Hello World! OUT Hello World!
62 | 20:41:26.488 IN Hello World! OUT Hello World!
63 | 20:41:47.613 IN Hello World! OUT Hello World!
64 | 20:41:50.376 IN Hello World! OUT Hello World!
65 | 20:41:53.371 IN Hello World! OUT Hello World!
66 | ```
67 |
68 |
69 | ## Conclusion
70 |
71 | In this chapter we have learnt to configure CamelK source as a direct source for your knative service
72 |
73 |
74 | ## Cleanup
75 |
76 | Delete Camel K Source
77 |
78 | ```
79 | oc delete -f camelk/camel-source.yaml
80 | ```
81 |
--------------------------------------------------------------------------------
/2.BuildUsingOpenShiftPipelines.md:
--------------------------------------------------------------------------------
1 | # Build using OpenShift Pipeline
2 |
3 |
4 | ## Pre-requisites
5 | * OCP4 cluster with OpenShift Pipelines installed
6 | * `git clone` this repository. We have some samples here.
7 | * * Clone the project from https://github.com/RedHatWorkshops/knative-on-ocp4
8 | * * `cd knative-on-ocp4`
9 |
10 | ## Steps
11 |
12 | ### Set up a new project and create application objects
13 |
14 | Create a new project
15 |
16 | ```
17 | oc new-project kn-demo
18 | ```
19 |
20 | We will build and deploy application from this (github repository)[https://github.com/VeerMuchandi/mesgtxformer]. This application is written in Quarkus, Kubernetes-native Java.
21 |
22 | We will first create the application objects `imagestream`, `deploymentconfig`, `service` and `route`.
23 |
24 | ```
25 | $ oc create -f https://raw.githubusercontent.com/VeerMuchandi/mesgtxformer/master/openshift-objects.yaml
26 |
27 |
28 | imagestream.image.openshift.io/msgtxr created
29 | deploymentconfig.apps.openshift.io/msgtxr created
30 | service/msgtxr created
31 | route.route.openshift.io/msgtxr created
32 | ```
33 |
34 | ### Set up a Tekton Pipeline
35 |
36 | Your OpenShift cluster should a bunch of tekton tasks configured as cluster tasks which you can list by running:
37 |
38 | `tkn clustertask list`
39 |
40 | **Note:** List of S2I build tasks available for different languages can be found here:
41 | [https://github.com/openshift/pipelines-catalog](https://github.com/openshift/pipelines-catalog)
42 |
43 | However, we currently don't have a cluster task to build a Quarkus application. We will use Openshift S2I to run a native build. So let us create a custom task in our namespace. Feel free to review the task at (https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/master/pipeline/s2i-quarkus-native.yaml)[https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/master/pipeline/s2i-quarkus-native.yaml]
44 |
45 | ```
46 | $ oc create -f https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/master/pipeline/s2i-quarkus-native.yaml
47 |
48 | task.tekton.dev/s2i-quarkus-native created
49 | ```
50 | Now list the tasks in the project and you will see the task that we just added.
51 |
52 | ```
53 | $ tkn tasks list
54 | NAME DESCRIPTION AGE
55 | s2i-quarkus-native 16 minutes ago
56 | ```
57 |
58 | Now it is time to create a pipeline. You can either create it from command line or from openshift devconsole.
59 |
60 | Pipeline menu option in devconsole allows you to interactively create or edit a pipeline. You are encouraged to try that option as it is intuitive. In this guide, the rest of the steps will show the CLI approach.
61 |
62 | 
63 |
64 |
65 | Let us look at the pipeline that shows the list of tasks within the pipeline to create the application image using `s2i-quarkus-native` task and deploy it using `openshift-client` task. Note that this pipeline depends on source code coming from `app-git` and pushes image to `app-image` resource.
66 |
67 | ```
68 | apiVersion: tekton.dev/v1alpha1
69 | kind: Pipeline
70 | metadata:
71 | name: deploy-pipeline
72 | spec:
73 | resources:
74 | - name: app-git
75 | type: git
76 | - name: app-image
77 | type: image
78 | tasks:
79 | - name: build
80 | taskRef:
81 | kind: Task
82 | name: s2i-quarkus-native
83 | params:
84 | - name: TLSVERIFY
85 | value: "false"
86 | resources:
87 | inputs:
88 | - name: source
89 | resource: app-git
90 | outputs:
91 | - name: image
92 | resource: app-image
93 | - name: deploy
94 | taskRef:
95 | kind: ClusterTask
96 | name: openshift-client
97 | runAfter:
98 | - build
99 | params:
100 | - name: ARGS
101 | value:
102 | - rollout
103 | - latest
104 | - msgtxr
105 | ```
106 |
107 | Let us create the pipeline by running
108 |
109 | ```
110 | $ oc create -f https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/master/pipeline/pipeline.yaml
111 |
112 | pipeline.tekton.dev/deploy-pipeline created
113 | ```
114 |
115 | The resources used by this pipeline are defined in the `pipeline-resources` file as shown below:
116 |
117 | ```
118 | apiVersion: tekton.dev/v1alpha1
119 | kind: PipelineResource
120 | metadata:
121 | name: application-image
122 | spec:
123 | type: image
124 | params:
125 | - name: url
126 | value: image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr
127 | ---
128 | apiVersion: tekton.dev/v1alpha1
129 | kind: PipelineResource
130 | metadata:
131 | name: sourcecode-git
132 | spec:
133 | type: git
134 | params:
135 | - name: url
136 | value: https://github.com/VeerMuchandi/mesgtxformer
137 |
138 | ```
139 |
140 | Now let us add these pipeline-resources running:
141 |
142 | ```
143 | $ oc create -f https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/master/pipeline/pipeline-resources.yaml
144 |
145 | pipelineresource.tekton.dev/application-image created
146 | pipelineresource.tekton.dev/sourcecode-git created
147 | ```
148 |
149 | Let us use `tkn` to verify all the objects we just created as shown below:
150 |
151 | ```
152 | $ tkn pipeline list
153 | NAME AGE LAST RUN STARTED DURATION STATUS
154 | deploy-pipeline 2 hours ago --- --- --- ---
155 |
156 | $ tkn resource list
157 | NAME TYPE DETAILS
158 | sourcecode-git git url: https://github.com/VeerMuchandi/mesgtxformer
159 | application-image image url: image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr
160 |
161 | ```
162 |
163 | Now let us run the pipeline using `tkn`. This pipeline can also be started from devconsole.
164 | * `-s` option is to pass the Service Account used to run this pipeline.
165 | * `-r` option is used to pass the values for resources. Our pipeline above uses 2 resources and their values are already defined in the pipeline resources. So we are just passing those pipeline resources inputs while running the pipeline
166 |
167 | `tkn pipeline start` will create a new `pipelinerun` object to start the pipeline.
168 |
169 | ```
170 | $ tkn pipeline start deploy-pipeline -s pipeline -r app-git=sourcecode-git -r app-image=application-image
171 |
172 | Pipelinerun started: deploy-pipeline-run-pkth8
173 |
174 | In order to track the pipelinerun progress run:
175 | tkn pipelinerun logs deploy-pipeline-run-pkth8 -f -n kn-demo
176 | ```
177 | Note the name of the `pipelinerun` pod created above. We will use this to track the pipeline logs next. Run the following command to watch the logs. The pipeline run will clone the source code, generate a dockerfile, run a build using buildah, push the resultant image and then deploy that container image.
178 |
179 | ```
180 | $ tkn pipelinerun logs deploy-pipeline-run-pkth8 -f -n kn-demo
181 | ```
182 |
183 | This will output the logs as below:
184 |
185 | ```
186 | [build : git-source-sourcecode-git-wcm7n] {"level":"info","ts":1589407307.4596121,"caller":"git/git.go:105","msg":"Successfully cloned https://github.com/VeerMuchandi/mesgtxformer @ master in path /workspace/source"}
187 | [build : git-source-sourcecode-git-wcm7n] {"level":"warn","ts":1589407307.4596806,"caller":"git/git.go:152","msg":"Unexpected error: creating symlink: symlink /tekton/home/.ssh /root/.ssh: file exists"}
188 | [build : git-source-sourcecode-git-wcm7n] {"level":"info","ts":1589407307.5098634,"caller":"git/git.go:133","msg":"Successfully initialized and updated submodules in path /workspace/source"}
189 |
190 | [build : generate] Application dockerfile generated in /gen-source/Dockerfile.gen
191 |
192 | [build : build] STEP 1: FROM quay.io/quarkus/ubi-quarkus-native-s2i:20.0.0-java8
193 | [build : build] Getting image source signatures
194 | [build : build] Copying blob sha256:cf0f3ebe9f536c782ab3835049cfbd9a663761ded9370791ef6ea3965c823aad
195 |
196 |
197 | ...
198 | ...
199 |
200 | [build : push] Writing manifest to image destination
201 | [build : push] Storing signatures
202 |
203 | [build : image-digest-exporter-tkzj7] {"level":"info","ts":1589407598.3208406,"logger":"fallback-logger","caller":"logging/config.go:76","msg":"Fetch GitHub commit ID from kodata failed: \"KO_DATA_PATH\" does not exist or is empty"}
204 | [build : image-digest-exporter-tkzj7] {"level":"info","ts":1589407598.3209352,"logger":"fallback-logger","caller":"imagedigestexporter/main.go:59","msg":"No index.json found for: application-image"}
205 |
206 | [deploy : oc] + oc rollout latest msgtxr
207 | [deploy : oc] I0513 22:06:52.626401 13 request.go:621] Throttling request took 1.156705194s, request: GET:https://172.30.0.1:443/apis/cloudcredential.openshift.io/v1?timeout=32s
208 | [deploy : oc] deploymentconfig.apps.openshift.io/msgtxr rolled out
209 |
210 | ```
211 |
212 | You can also observe the running pipeline on devconsole. Navigate to your project `kn-demo` -> `Pipelines` from menu -> select the pipelinerun that is currently in progress
213 |
214 | Notes: you will need developer console to do the following steps.
215 |
216 | You will see a screen like this:
217 |
218 | 
219 |
220 |
221 | You will observe that the deployment is also successful. You can look at the topology view of the running app.
222 |
223 | 
224 |
225 |
226 | Click on the link url on the right to corner of the dumpy icon or run `oc get route` to find your application route to bring the running application up in the browser.
227 |
228 | You will see output like this :
229 |
230 | ```
231 | MESSAGE TITLE CASED : "This Is Default Prod Message"
232 |
233 | ```
234 |
235 | Congratulations!! You have successfully build and deployed application on OCP4.x using openshift pipelines.
236 |
237 | ## Closing
238 |
239 | We are going to deploy this application with knative-serving in the next lab. Let us idle this application for now.
240 |
241 | **Note: ** Scale to zero was in OpenShift for years now.
242 |
243 | ```
244 | $ oc idle msgtxr
245 | The service "kn-demo/msgtxr" has been marked as idled
246 | The service will unidle DeploymentConfig "kn-demo/msgtxr" to 1 replicas once it receives traffic
247 | DeploymentConfig "kn-demo/msgtxr" has been idled
248 | ```
249 |
250 | Note that the app shows without a blue circle on the screen showing that the pod is not running. But if you try to access it, it will scale up.
251 |
252 | 
253 |
--------------------------------------------------------------------------------
/3.DeployServerlessApp.md:
--------------------------------------------------------------------------------
1 | # Deploy Serverless App using Knative Serving
2 |
3 | ## Prerequisites
4 | * Application was built earlier
5 | * `kn` CLI installed
6 |
7 | ## Install the Application
8 |
9 | * Get the image repository name by running `oc get is`. This image was created using the openshift build in the last lab.
10 |
11 | ```
12 | $ oc get is -n kn-demo
13 | NAME IMAGE REPOSITORY TAGS UPDATED
14 | dumpy image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr latest 28 minutes ago
15 |
16 | ```
17 | Copy the name of the `image repository` from above.
18 |
19 |
20 | Create a knative service by running
21 |
22 | ```
23 | kn service create msgtxr-sl --image=image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr -l app.openshift.io/runtime=quarkus
24 | ```
25 |
26 | This command runs synchronously. It takes a few seconds for the app to be deployed as a knative application.
27 |
28 | ```
29 | Creating service 'msgtxr-sl' in namespace 'kn-demo':
30 |
31 | 0.329s The Route is still working to reflect the latest desired specification.
32 | 0.466s Configuration "msgtxr-sl" is waiting for a Revision to become ready.
33 | 4.334s ...
34 | 4.457s Ingress has not yet been reconciled.
35 | 4.926s Ready to serve.
36 |
37 | Service 'msgtxr-sl' created to latest revision 'msgtxr-sl-bkfyg-1' is available at URL:
38 | http://msgtxr-sl-kn-demo.YOURDOMAIN
39 | ```
40 |
41 | Note that the output also displays the service URL.
42 |
43 | Access the URL in the browser and watch the running application. This is the same application as in the last lab. But it now works with Knative Serving.
44 |
45 | 
46 |
47 | If you wait for a minute without using the application and you will observe that the application automagically scales down to 0.
48 |
49 | 
50 |
51 | Yeay!! We have now deployed and tested our application using Knative-serving.
52 |
53 | But wait what does this `kn create service` create. Let us explore now.
54 |
55 | ## Explore the Knative Serving Objects
56 |
57 | List all Knative services:
58 |
59 | ```
60 | $ kn service list
61 | NAME URL LATEST AGE CONDITIONS READY REASON
62 | msgtxr-sl http://msgtxr-sl-kn-demo.apps.ocp4.home.ocpcloud.com msgtxr-sl-bkfyg-1 4m45s 3 OK / 3 True
63 | ```
64 |
65 | List all Knative revisions:
66 |
67 | ```
68 | $ kn revision list
69 | NAME SERVICE TRAFFIC TAGS GENERATION AGE CONDITIONS READY REASON
70 | msgtxr-sl-bkfyg-1 msgtxr-sl 100% 1 5m8s 3 OK / 4 True
71 | ```
72 |
73 | List all Knative routes:
74 | ```
75 | NAME URL READY
76 | msgtxr-sl http://msgtxr-sl-kn-demo.YOURDOMAIN True
77 | ```
78 |
79 | Describe a Knative service:
80 |
81 | ```
82 | $ kn service describe msgtxr-sl
83 | Name: msgtxr-sl
84 | Namespace: kn-demo
85 | Labels: app.openshift.io/runtime=quarkus
86 | Age: 6m
87 | URL: http://msgtxr-sl-kn-demo.YOURDOMAIN
88 |
89 | Revisions:
90 | 100% @latest (msgtxr-sl-bkfyg-1) [1] (6m)
91 | Image: image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr (pinned to 476b55)
92 |
93 | Conditions:
94 | OK TYPE AGE REASON
95 | ++ Ready 6m
96 | ++ ConfigurationsReady 6m
97 | ++ RoutesReady 6m
98 | ```
99 |
100 | ### Cleaning up:
101 |
102 | Run the following command when you want to remove the knative service. If you are doing next chapter, we will be using it again, so don't remove this service yet.
103 |
104 | ```
105 | $ kn service delete msgtxr-sl
106 | Service 'msgtxr-sl' successfully deleted in namespace 'kn-demo'.
107 | ```
--------------------------------------------------------------------------------
/4.Autoscaling.md:
--------------------------------------------------------------------------------
1 |
2 | # Autoscaling Serverless Application with Knative serving
3 |
4 | ## Prerequisites
5 | * This lab uses `siege` command. So install that if you don't already have it. Run `$ siege -v` to check if it is already on your workstation.
6 |
7 | ## Create a knative service
8 |
9 | Create a knative service as explained in the previous chapter. If you have cleaned it up, create it again.
10 |
11 | `kn service create msgtxr-sl --image=image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr -l app.openshift.io/runtime=quarkus`
12 |
13 | ## Update the Service to provide upper limit for scaling
14 |
15 | Let us update our knative service to configure `concurrency-target` value that for when to scale up based on the concurrent number of incoming request. This is required to auto-scale the application.
16 |
17 | ```
18 | $ kn service update msgtxr-sl --concurrency-target=5
19 | Service 'dumpy-serverless' updated in namespace 'kn-demo'.
20 | ```
21 |
22 | If you describe the service now, you will observe an annotation added to the service template ` autoscaling.knative.dev/target: "5"`.
23 |
24 | ```
25 | $ kn service describe msgtxr-sl -o yaml
26 | apiVersion: serving.knative.dev/v1
27 | kind: Service
28 | metadata:
29 | annotations:
30 | serving.knative.dev/creator: veer
31 | serving.knative.dev/lastModifier: veer
32 | creationTimestamp: "2020-05-14T02:52:03Z"
33 | generation: 2
34 | labels:
35 | app.openshift.io/runtime: quarkus
36 | name: msgtxr-sl
37 | namespace: kn-demo
38 | resourceVersion: "334296605"
39 | selfLink: /apis/serving.knative.dev/v1/namespaces/kn-demo/services/msgtxr-sl
40 | uid: cc517e12-7ec9-43b1-a2c5-f3d7c950e7e7
41 | spec:
42 | template:
43 | metadata:
44 | annotations:
45 | autoscaling.knative.dev/target: "5"
46 | client.knative.dev/user-image: image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr
47 | creationTimestamp: null
48 | labels:
49 | app.openshift.io/runtime: quarkus
50 | name: msgtxr-sl-xpgtc-2
51 | spec:
52 | containerConcurrency: 0
53 | containers:
54 | - image: image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr@sha256:476b5558516f70af94e42e61b73fee3c67cb6ee4e498c63f1ea29b88592d62bc
55 | name: user-container
56 | readinessProbe:
57 | successThreshold: 1
58 | tcpSocket:
59 | port: 0
60 | resources:
61 | limits:
62 | cpu: "1"
63 | memory: 200M
64 | requests:
65 | cpu: 400m
66 | memory: 100M
67 | timeoutSeconds: 300
68 | traffic:
69 | - latestRevision: true
70 | percent: 100
71 | status:
72 | ...
73 | ...
74 | ...
75 | ```
76 |
77 | The above command creates a new revision for the service. You can check there are two revisions now:
78 |
79 | ```
80 | $ kn revision list
81 |
82 | NAME SERVICE TRAFFIC TAGS GENERATION AGE CONDITIONS READY REASON
83 | msgtxr-sl-xpgtc-2 msgtxr-sl 100% 2 5m17s 3 OK / 4 True
84 | msgtxr-sl-xpzwg-1 msgtxr-sl 1 5m27s 3 OK / 4 True
85 | ```
86 |
87 | And it shows 100% traffic routing to the latest revision. If you check the routes, it has only one route like before.
88 |
89 | ```
90 | $ kn route list
91 | NAME URL READY
92 | msgtxr-sl http://msgtxr-sl-kn-demo.YOURDOMAIN True
93 | ```
94 |
95 |
96 |
97 | ## Load the application
98 |
99 | Get the URL for your Knative Route.
100 |
101 | ```
102 | export URL=$(kn route list | awk 'NR>1 {print $2}')
103 | ```
104 |
105 | Confirm that your application is scaled down to zero and you have no pods running. The command below should give you no results. If not wait a minute until your pods are scaled down to zero.
106 |
107 | ```
108 | $ oc get po | grep Running
109 | ```
110 |
111 | Let us load this application now by running siege. Have another window ready to watch the pods with `watch oc get po`
112 |
113 | ```
114 | siege -r 1 -c 50 -t 30S $URL
115 | ```
116 |
117 | You will quickly see on developer console that it spins up a bunch of pods.
118 |
119 | 
120 |
121 | and in a couple of minutes all these pods will autoscale down as the siege ends and the workload goes down.
122 |
123 | But there were just too many pods coming up at the same time. Should we limit the number of pods that can come up. Let us update the service again to update the `max-scale`, upper limit for the number of pods to `5`
124 |
125 | ```
126 | $ kn service update msgtxr-sl --max-scale=5
127 | Updating Service 'msgtxr-sl' in namespace 'kn-demo':
128 |
129 | 5.227s Traffic is not yet migrated to the latest revision.
130 | 5.352s Ingress has not yet been reconciled.
131 | 5.689s Ready to serve.
132 |
133 | Service 'msgtxr-sl' updated to latest revision 'msgtxr-sl-mkzlf-3' is available at URL:
134 | http://msgtxr-sl-kn-demo.YOURDOMAIN
135 | ```
136 | Wit until the application scales down to 0. Now, try running `siege` again and this time it will only scale to `5` instances maximum
137 |
138 | 
139 |
140 | ## Conclusion
141 |
142 | In this lab we have learnt to set up our Knative Service for autoscaling.
143 |
144 | # Cleanup
145 |
146 | ```
147 | $ kn service delete msgtxr-sl
148 | Service 'msgtxr-sl' successfully deleted in namespace 'kn-demo'.
149 | ```
150 |
--------------------------------------------------------------------------------
/5.BlueGreen.md:
--------------------------------------------------------------------------------
1 | # Blue Green Deployments with Knative serving
2 | In this chapter we will learn traffic splitting with Knative Serving
3 |
4 | ## Prerequisites
5 | * Application Image was built earlier
6 | * `kn` CLI installed
7 |
8 | ## Deploy Knative Service
9 |
10 | ### Blue Revision
11 | Let us deploy application version 1 using the container image we created earlier. Note the parameters
12 | * `revision-name` to name the revision as `blue` which will suffixed to the service name
13 | * environment variable `format` configures this app to not change the messages
14 | * environment variable `color` configures this app to print message in blue color
15 | * environment variable `message` is the message that is output by default
16 |
17 | ```
18 | kn service create msgtxr-sl \
19 | --image=image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr \
20 | --revision-name=blue \
21 | -l app.openshift.io/runtime=quarkus \
22 | --env format=none \
23 | --env color=blue \
24 | --env message="Version 1 prints this message in BLUE"
25 | ```
26 | Output
27 | ```
28 | Creating service 'msgtxr-sl' in namespace 'kn-demo':
29 |
30 | 0.340s The Route is still working to reflect the latest desired specification.
31 | 0.568s Configuration "msgtxr-sl" is waiting for a Revision to become ready.
32 | 5.171s ...
33 | 5.306s Ingress has not yet been reconciled.
34 | 5.421s Ready to serve.
35 |
36 | Service 'msgtxr-sl' created to latest revision 'msgtxr-sl-blue' is available at URL:
37 | http://msgtxr-sl-kn-demo.YOURDOMAIN
38 | ```
39 | Get revision list and notice the revision name and how `blue` is suffixed.
40 | ```
41 | % kn revision list
42 | NAME SERVICE TRAFFIC TAGS GENERATION AGE CONDITIONS READY REASON
43 | msgtxr-sl-blue msgtxr-sl 100% 1 45s 4 OK / 4 True
44 | ```
45 |
46 | Get URL for the application
47 |
48 | ```
49 | export URL=$(kn route list | awk 'NR>1 {print $2}')
50 | ```
51 | Verify by calling this URL `curl -w "\n" $URL`
52 |
53 | 
54 |
55 | ### Green Revision
56 |
57 | Let us now update the service to create a new GREEN revision but not send any traffic to this new revision yet . Notice the parameters
58 | * `revision-name` that will suffix `green`
59 | * `traffic` that shows the split of traffic between revisions `v1` and `v2`
60 |
61 | ```
62 | kn service update msgtxr-sl \
63 | --revision-name=green \
64 | --env format=none \
65 | --env color=green \
66 | --env message="Version 2 prints this message in GREEN" \
67 | --traffic msgtxr-sl-blue=100
68 | ```
69 |
70 | Output
71 | ```
72 | Updating Service 'msgtxr-sl' in namespace 'kn-demo':
73 |
74 | 0.456s The Route is still working to reflect the latest desired specification.
75 | 3.917s Ready to serve.
76 |
77 | Service 'msgtxr-sl' updated to latest revision 'msgtxr-sl-green' is available at URL:
78 | http://msgtxr-sl-kn-demo.YOURDOMAIN
79 | ```
80 |
81 | Check revision list again. The output shows how traffic still goes to blue revision.
82 |
83 | ```
84 | % kn revision list
85 |
86 | NAME SERVICE TRAFFIC TAGS GENERATION AGE CONDITIONS READY REASON
87 | msgtxr-sl-green msgtxr-sl 2 39s 4 OK / 4 True
88 | msgtxr-sl-blue msgtxr-sl 100% 1 46m 3 OK / 4 True
89 | ```
90 |
91 | You can verify running `curl -w"\n" $URL` that the traffic is still going to blue revision.
92 | 
93 |
94 | ### Switch from Blue to Green
95 |
96 | Let us update the knative service again to send all the traffic now to green revision.
97 |
98 | `kn service update msgtxr-sl --traffic msgtxr-sl-green=100`
99 |
100 | Verify the revision to see that the 100% of traffic should now be going to green revision.
101 |
102 | ```
103 | kn revision list
104 | NAME SERVICE TRAFFIC TAGS GENERATION AGE CONDITIONS READY REASON
105 | msgtxr-sl-green msgtxr-sl 100% 2 5m13s 3 OK / 4 True
106 | msgtxr-sl-blue msgtxr-sl 1 51m 3 OK / 4 True
107 | ```
108 |
109 | and test again by running `curl -w "\n" $URL` and the output should be
110 | 
111 |
112 | You can try switching between blue and green revisions by updating the traffic again.
113 |
114 | ### Conclusion
115 |
116 | In this chapter we have learnt to do BlueGreen deployments with knative services.
117 |
118 | ### Cleanup
119 |
120 | Delete the knative service by running
121 |
122 | ```
123 | kn service delete msgtxr-sl
124 | ```
125 |
126 |
127 |
128 |
--------------------------------------------------------------------------------
/6.TrafficSplitting.md:
--------------------------------------------------------------------------------
1 |
2 | # Traffic Splitting with Knative serving
3 | In this chapter we will learn traffic splitting with Knative Serving
4 |
5 | ## Prerequisites
6 | * Application Image was built earlier
7 | * `kn` CLI installed
8 |
9 | ## Deploy Knative Service
10 |
11 | ### Revision 1
12 | Let us deploy application version 1 using the container image we created earlier. Note the parameters
13 | * `revision-name` to name the revision as `v1` which will suffixed to the service name
14 | * environment variable `format` configure this app to `capitalize` the messages
15 | * environment variable `message` is the message that is output by default
16 |
17 | ```
18 | kn service create msgtxr-sl \
19 | --image=image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr \
20 | --revision-name=v1 \
21 | -l app.openshift.io/runtime=quarkus \
22 | --env format=capitalize \
23 | --env message="version1 will capitalize the message"
24 | ```
25 | Output
26 | ```
27 | Creating service 'msgtxr-sl' in namespace 'kn-demo':
28 |
29 | 0.260s The Route is still working to reflect the latest desired specification.
30 | 0.498s Configuration "msgtxr-sl" is waiting for a Revision to become ready.
31 | 4.601s ...
32 | 4.740s Ingress has not yet been reconciled.
33 | 5.176s Ready to serve.
34 |
35 | Service 'msgtxr-sl' created to latest revision 'msgtxr-sl-hgdyx-1' is available at URL:
36 | http://msgtxr-sl-kn-demo.YOURDOMAIN
37 | ```
38 | Get revision list and notice the revision name and how `v1` is suffixed.
39 | ```
40 | % kn revision list
41 | NAME SERVICE TRAFFIC TAGS GENERATION AGE CONDITIONS READY REASON
42 | msgtxr-sl-v1 msgtxr-sl 100% 1 13s 4 OK / 4 True
43 | ```
44 |
45 | You can also notice the knative service in developer console where 100% of traffic is going to revision `v1`
46 | 
47 |
48 | Get URL for the application
49 |
50 | ```
51 | export URL=$(kn route list | awk 'NR>1 {print $2}')
52 | ```
53 | Verify by calling this URL
54 |
55 | ```
56 | % curl $URL
57 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
58 | ```
59 | ### Revision 2
60 |
61 | Let us now update the service to create a new revision and introduce as a Canary that takes 10% traffic. Notice the parameters
62 | * `revision-name` that will suffix `v2`
63 | * environment variable `format` that configures the application to display `wordcount`
64 | * `traffic` that shows the split of traffic between revisions `v1` and `v2`
65 |
66 | ```
67 | kn service update msgtxr-sl \
68 | --revision-name=v2 \
69 | --env format=wordcount \
70 | --env message="version2 displays word count for this message" \
71 | --traffic msgtxr-sl-v1=90,msgtxr-sl-v2=10
72 | ```
73 |
74 | Output
75 | ```
76 | Updating Service 'msgtxr-sl' in namespace 'kn-demo':
77 |
78 | 0.406s The Route is still working to reflect the latest desired specification.
79 | 0.543s Revision "msgtxr-sl-v2" is not yet ready.
80 | 5.041s ...
81 | 5.130s Ingress has not yet been reconciled.
82 | 5.231s Ready to serve.
83 |
84 | Service 'msgtxr-sl' updated to latest revision 'msgtxr-sl-v2' is available at URL:
85 | http://msgtxr-sl-kn-demo.YOURDOMAIN
86 | ```
87 |
88 | Check revision list again. The output shows how traffic is split between the two revisions.
89 |
90 | ```
91 | % kn revision list
92 |
93 | NAME SERVICE TRAFFIC TAGS GENERATION AGE CONDITIONS READY REASON
94 | msgtxr-sl-v2 msgtxr-sl 10% 2 47s 4 OK / 4 True
95 | msgtxr-sl-v1 msgtxr-sl 90% 1 5m10s 3 OK / 4 True
96 | ```
97 |
98 | You can notice the change in the developer console to see the traffic split as request between revisions `v1` and `v2`
99 |
100 | 
101 |
102 | Now let us test by curling the service a few times:
103 |
104 | ```
105 | % for i in {1..20}; do curl -w "\n" $URL; done
106 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
107 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
108 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
109 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
110 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
111 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
112 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
113 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
114 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
115 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
116 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
117 | MESSAGE WORD COUNT: 7 SENTENCE: "version2 displays word count for this message"
118 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
119 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
120 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
121 | MESSAGE WORD COUNT: 7 SENTENCE: "version2 displays word count for this message"
122 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
123 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
124 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
125 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
126 | ```
127 |
128 | ## Update the traffic split for AB testing
129 |
130 | Now let us update the traffic percentage to 50-50
131 |
132 | ```
133 | kn service update msgtxr-sl \
134 | --traffic msgtxr-sl-v1=50,msgtxr-sl-v2=50
135 | ```
136 |
137 | Verify the percentages have taken effect
138 | ```
139 | % kn revision list
140 |
141 |
142 | NAME SERVICE TRAFFIC TAGS GENERATION AGE CONDITIONS READY REASON
143 | msgtxr-sl-v2 msgtxr-sl 50% 2 7m44s 3 OK / 4 True
144 | msgtxr-sl-v1 msgtxr-sl 50% 1 12m 3 OK / 4 True
145 | ```
146 | 
147 |
148 | Test again with `for i in {1..10}; do curl -w "\n" $URL; done` and notice the output.
149 |
150 | **Note** that the traffic split will stabilize after pods for both the revisions are up. If the pods are scaled down, the results may not be consistent.
151 |
152 | ```
153 | % for i in {1..10}; do curl -w "\n" $URL; done
154 | MESSAGE WORD COUNT: 7 SENTENCE: "version2 displays word count for this message"
155 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
156 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
157 | MESSAGE WORD COUNT: 7 SENTENCE: "version2 displays word count for this message"
158 | MESSAGE WORD COUNT: 7 SENTENCE: "version2 displays word count for this message"
159 | MESSAGE WORD COUNT: 7 SENTENCE: "version2 displays word count for this message"
160 | MESSAGE WORD COUNT: 7 SENTENCE: "version2 displays word count for this message"
161 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
162 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
163 | MESSAGE CAPITALIZED : "VERSION1 WILL CAPITALIZE THE MESSAGE"
164 | ```
165 |
166 | ## Conclusion
167 | We have tested traffic splitting with canary an change the percentage of traffic split.
168 |
169 | ## Cleanup
170 |
171 | ```
172 | $ kn service delete msgtxr-sl
173 | Service 'msgtxr-sl' successfully deleted in namespace 'kn-demo'.
174 | ```
175 |
176 |
177 |
178 |
--------------------------------------------------------------------------------
/7.AddingDirectEventSource.md:
--------------------------------------------------------------------------------
1 | # Add an Event Source to the App
2 |
3 | **Event Sources** are the components that receive the external events and forward them onto **Sinks**.
4 |
5 | **Note :** `kn` doesn't have Knative Eventing implemented yet. So we will be using yaml files for now.
6 |
7 | Different types of event sources are possible that are [listed here](https://knative.dev/docs/eventing/sources/). In this example we will add an event source to test Direct delivery.
8 |
9 | Direct delivery from a source to a single Service (an Addressable endpoint, including a Knative Service or a core Kubernetes Service). In this case, the Source is responsible for retrying or queueing events if the destination Service is not available.
10 |
11 |
12 | ## Prerequisites
13 | * Knative Eventing is installed
14 | * You have a Knative service deployed. Add a knative service running
15 | ```kn service create msgtxr-sl --image=image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr -l app.openshift.io/runtime=quarkus --env format=none```
16 |
17 | ## Add a Direct Event Source
18 |
19 | Let us create an event source based on a `PingSource` that generates an event every `1 minute`. In this example,we will use our previously deployed application as the sink.
20 |
21 | ```
22 | kn source ping create msgtxr-pingsource \
23 | --schedule="* * * * *" \
24 | --data="This message is from PingSource" \
25 | --sink svc:msgtxr-sl
26 |
27 | Ping source 'msgtxr-pingsource' created in namespace 'kn-demo'.
28 | ```
29 | Alternately you can also add this from developer console by choosing `ADD` button on the left menu
30 | * Event Sources tile
31 | 
32 | * Selecting Ping Source and filling in rest of the details and pressing on **Create** button
33 | 
34 |
35 | You can confirm the source is created by running
36 |
37 | ```
38 | $ kn source ping list
39 |
40 | NAME SCHEDULE SINK AGE CONDITIONS READY REASON
41 | msgtxr-pingsource * * * * * svc:msgtxr-sl 4m30s 6 OK / 6 True
42 | ```
43 |
44 | Now, the `PingSource` is set up to send events to the knative service and you will observe that the pod is up and running.
45 |
46 | ```
47 | $ oc get po | grep Running
48 | pingsource-msgtxr-pingsour-48373f6a-da18-4484-b309-2b88c29bjcg6 1/1 Running 0 31s
49 | ```
50 |
51 | In about a minute, you will notice two pods running :
52 | * PingEventSource will run as a pod
53 | * Knative service for our application is a separate pod.
54 |
55 |
56 | ```
57 | % oc get po | grep Running
58 | msgtxr-sl-fzpfp-1-deployment-d55996b47-r4n52 2/2 Running 0 4m54s
59 | pingsource-msgtxr-pingsour-48373f6a-da18-4484-b309-2b88c29bjcg6 1/1 Running 0 5m25s
60 |
61 | ```
62 |
63 | and with this view in the developer console
64 | 
65 |
66 | Note the application's pod name. You can watch the logs for the application to verify that the app is being called continuously by running
67 |
68 | ```oc logs -f $(oc get po | grep msgtxr-sl | awk '{print $1}') -c user-container```
69 |
70 | and you should see logs like below:
71 |
72 | ```
73 | __ ____ __ _____ ___ __ ____ ______
74 | --/ __ \/ / / / _ | / _ \/ //_/ / / / __/
75 | -/ /_/ / /_/ / __ |/ , _/ ,< / /_/ /\ \
76 | --\___\_\____/_/ |_/_/|_/_/|_|\____/___/
77 | 2020-05-14 04:16:07,134 INFO [io.quarkus] (main) getting-started 1.0-SNAPSHOT (powered by Quarkus 1.3.2.Final) started in 0.010s. Listening on: http://0.0.0.0:8080
78 | 2020-05-14 04:16:07,134 INFO [io.quarkus] (main) Profile prod activated.
79 | 2020-05-14 04:16:07,134 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]
80 | 04:16:07.868 IN {"body":"This message is from PingSource"} OUT {"body":"This message is from PingSource"}
81 | 04:17:00.007 IN {"body":"This message is from PingSource"} OUT {"body":"This message is from PingSource"}
82 | 04:18:00.006 IN {"body":"This message is from PingSource"} OUT {"body":"This message is from PingSource"}
83 | ...
84 | ...
85 | ```
86 |
87 | You will notice from the logs that the events coming in every 1 minute from the cron job.
88 |
89 | ## Cleanup the Event Source
90 |
91 | Delete the event source
92 |
93 | ```
94 | % kn source ping delete msgtxr-pingsource
95 |
96 | Ping source 'msgtxr-pingsource' deleted in namespace 'kn-demo'.
97 | ```
98 | This should remove the event source pod. In a minute or so, the application pod should scale down to 0.
99 |
100 |
101 | ## Conclusion
102 | We saw a simple example of a direct event source in this lab.
103 |
104 |
105 |
106 |
107 |
108 |
109 |
--------------------------------------------------------------------------------
/8.AddingChannelsAndSubscriptions.md:
--------------------------------------------------------------------------------
1 | # Adding Channels and Subscriptions
2 |
3 | We can fan-out delivery from an Event source to multiple endpoints using Channels and Subscriptions. In this case, the Channel implementation ensures that messages are delivered to the requested destinations and should buffer the events if the destination service is unavailable.
4 |
5 | ## Create a Channel
6 |
7 | Different types of [channels](https://knative.dev/docs/eventing/channels/) are possible such as Apache Kafka, GCPPubSub, InMemoryChannel etc. In this example we will create an InMemoryChannel with the following yaml. This is a best effort channel for development usage.
8 |
9 | ```
10 | apiVersion: messaging.knative.dev/v1alpha1
11 | kind: InMemoryChannel
12 | metadata:
13 | name: imc-msgtxr
14 | ```
15 |
16 | Let us create the channel.
17 |
18 | ```
19 | $ oc create -f eventing/in-memory-channel.yaml
20 |
21 | inmemorychannel.messaging.knative.dev/imc-msgtxr created
22 | ```
23 |
24 | Verify
25 |
26 | ```
27 | $ oc get inmemorychannel
28 |
29 | NAME READY REASON URL AGE
30 | imc-msgtxr True http://imc-msgtxr-kn-channel.kn-demo.svc.cluster.local 24s
31 | ```
32 |
33 | Note the URL from the output as we will use it while adding the event source in the next step.
34 |
35 | ## Add an Event Source with Channel as the Sink
36 |
37 | This time we will again add an [event source](https://knative.dev/docs/eventing/sources/). But unlike last direct delivery example, the event source in this case will use the above channel as the sink.
38 |
39 | ```
40 | kn source ping create msgtxr-pingsource \
41 | --schedule="* * * * *" \
42 | --data="This message is from PingSource" \
43 | --sink=http://imc-msgtxr-kn-channel.kn-demo.svc.cluster.local
44 | ```
45 |
46 | * We are using PingSource as the event source in this example as well
47 | * **Note** the `sink` here points to the channel we created above. Look at the URL.
48 |
49 |
50 | Verify
51 |
52 | ```
53 | % kn source ping list
54 | NAME SCHEDULE SINK AGE CONDITIONS READY REASON
55 | msgtxr-pingsource * * * * * 13m 6 OK / 6 True
56 |
57 | ```
58 |
59 | At this point the events are being generated but no data should be flowing into the application yet. Run
60 |
61 | ```
62 | % oc get po | grep Running
63 | pingsource-msgtxr-pingsour-e574f4cc-1d71-4d6f-b1b9-47f18c6kl9pq 1/1 Running 0 41s
64 | ```
65 |
66 | You should see the event source pod running. It is pushing workloads to a sink which is the InMemory channel. But, since the channel is not subscribed by anyone the events are not getting anywhere.
67 |
68 | ## Add an Event Subscriber
69 |
70 | Event subscription allows an event destination to subscribe to a channel. In our case we will configure our service as the destination.
71 |
72 |
73 | ```
74 | apiVersion: messaging.knative.dev/v1alpha1
75 | kind: Subscription
76 | metadata:
77 | name: msgtxr-subscriber
78 | spec:
79 | channel:
80 | apiVersion: messaging.knative.dev/v1alpha1
81 | kind: InMemoryChannel
82 | name: imc-msgtxr
83 | subscriber:
84 | ref:
85 | apiVersion: serving.knative.dev/v1alpha1
86 | kind: Service
87 | name: msgtxr-sl
88 | ```
89 |
90 | Let us create the subscription
91 |
92 | ```
93 | $ oc create -f eventing/event-subscription.yaml
94 |
95 |
96 | subscription.messaging.knative.dev/msgtxr-subscriber created
97 | ```
98 |
99 | Verify
100 |
101 | ```
102 | $ oc get subscription.messaging.knative.dev
103 | NAME READY REASON AGE
104 | msgtxr-subscriber True 42s
105 | ```
106 |
107 | Wait a min or so and you should see the serverless pod coming up
108 |
109 | ```
110 | % oc get po | grep Running
111 | msgtxr-sl-fzpfp-1-deployment-d55996b47-chxhh 2/2 Running 0 3m3s
112 | pingsource-msgtxr-pingsour-e574f4cc-1d71-4d6f-b1b9-47f18c6kl9pq 1/1 Running 0 10m
113 | ```
114 |
115 | Check the serverless pod logs for events received from the event source via channel
116 |
117 | ```
118 | __ ____ __ _____ ___ __ ____ ______
119 | --/ __ \/ / / / _ | / _ \/ //_/ / / / __/
120 | -/ /_/ / /_/ / __ |/ , _/ ,< / /_/ /\ \
121 | --\___\_\____/_/ |_/_/|_/_/|_|\____/___/
122 | 2020-05-14 04:56:02,354 INFO [io.quarkus] (main) getting-started 1.0-SNAPSHOT (powered by Quarkus 1.3.2.Final) started in 0.013s. Listening on: http://0.0.0.0:8080
123 | 2020-05-14 04:56:02,354 INFO [io.quarkus] (main) Profile prod activated.
124 | 2020-05-14 04:56:02,354 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]
125 | 04:56:03.350 IN {"body":"This message is from PingSource"} OUT {"body":"This message is from PingSource"}
126 | 04:57:00.011 IN {"body":"This message is from PingSource"} OUT {"body":"This message is from PingSource"}
127 | ```
128 |
129 | ## Summary
130 |
131 | In this lab, we have learnt to add a Channel that listens to an Event Source and then we added our Knative service as the destination that subscribes to the Channel
132 |
133 | ## Cleanup
134 |
135 | Let us delete the event source.
136 |
137 | ```
138 | $ kn source ping delete msgtxr-pingsource
139 |
140 | Ping source 'msgtxr-pingsource' deleted in namespace 'kn-demo'.
141 | ```
142 |
143 | You should the event source pod go away immediately. If you wait a minute,our serverless service also scales down to zero.
144 |
145 | Delete the channel
146 |
147 | ```
148 | $ oc delete -f eventing/in-memory-channel.yaml
149 |
150 | inmemorychannel.messaging.knative.dev "imc-msgtxr" deleted
151 |
152 | ```
153 |
154 | Delete subscription
155 |
156 | ```
157 | $ oc delete -f eventing/event-subscription.yaml
158 | ```
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
--------------------------------------------------------------------------------
/9.UsingBrokersAndTriggers.md:
--------------------------------------------------------------------------------
1 | # Using Brokers and Triggers
2 |
3 | Event Sources are the components that receive the external events and forward them onto Sinks which can be mediated by a Trigger. Triggers offer an opportunity to filter out events by attributes. Triggers are used to connect Knative Serving application to a Broker so that it can respond to the events that the Broker emits.
4 |
5 |
6 | ## Create an Event Source
7 |
8 | This time we will create a ContainerSource that uses a heartbeat container. This is a heartbeat which generates events at a configurable rate, to be forwarded to the given sink. That sink is a broker that will be created shortly.
9 |
10 | ```
11 | apiVersion: sources.eventing.knative.dev/v1alpha1
12 | kind: ContainerSource
13 | metadata:
14 | name: heartbeat-event-source
15 | spec:
16 | image: quay.io/openshift-knative/knative-eventing-sources-heartbeats:v0.13.2
17 | args:
18 | - '--label="from heartbeat source"'
19 | - '--period=20'
20 | env:
21 | - name: POD_NAME
22 | valueFrom:
23 | fieldRef:
24 | fieldPath: metadata.name
25 | - name: POD_NAMESPACE
26 | valueFrom:
27 | fieldRef:
28 | fieldPath: metadata.namespace
29 | sink:
30 | apiVersion: eventing.knative.dev/v1alpha1
31 | kind: Broker
32 | name: default
33 | ```
34 |
35 | * The above source will generate an event once every 20s.
36 |
37 | Let us create the event source
38 |
39 | ```
40 | $ oc create -f eventing/event-source-heartbeat.yaml
41 | containersource.sources.eventing.knative.dev/heartbeat-event-source created
42 | ```
43 |
44 | Verify
45 |
46 | ```
47 | $ oc get containersources
48 | NAME AGE
49 | heartbeat-event-source 49s
50 | ```
51 |
52 | ## Add a Broker
53 |
54 | In order to create broker, we will have to label the namespace for `knative-eventing-injection=enabled` and it will automatically add broker with the name `default`.
55 |
56 | ```
57 | $ oc label namespace kn-demo knative-eventing-injection=enabled
58 | namespace/kn-demo labeled
59 | ```
60 |
61 | ```
62 | $ oc get po -w
63 | NAME READY STATUS RESTARTS AGE
64 | default-broker-filter-59b5bc56-fjq75 1/1 Running 0 4m5s
65 | default-broker-ingress-74759d5995-76lj5 1/1 Running 0 4m5s
66 | ```
67 |
68 |
69 | ## Create a Trigger and Broker
70 |
71 | Let us create a trigger that connects the source to our Knative service. With `kn` this command can also add a broker at the same time.
72 |
73 | ```
74 | kn trigger create msgtxr-trigger \
75 | --inject-broker --broker default \
76 | --filter type=dev.knative.eventing.samples.heartbeat \
77 | --sink svc:msgtxr-sl
78 | ```
79 |
80 |
81 | * The event type `dev.knative.eventing.samples.heartbeat` is the one used by the ContainerSource image we deployed earlier.
82 | * Subscriber is our knative service
83 | * `--inject-broker` will inject the broker into namespace. Alternately we could have requested system administrator to add this broker to the namespace by running `
84 | oc label namespace kn-demo knative-eventing-injection=enabled`
85 | since labeling a namespace requires privileged user. But luckily we have `kn` handling this for us.
86 |
87 | Verify
88 |
89 | ```
90 | $ kn trigger list
91 |
92 | NAME BROKER SINK AGE CONDITIONS READY REASON
93 | msgtxr-trigger default svc:msgtxr-sl 24m 5 OK / 5 True
94 | ```
95 |
96 | ## Observe the Events in Action
97 |
98 | List the pods running now. You'll see an heartbeat event source pod that starts generating events and your Knative service acting as the destination via the broker.
99 |
100 | ```
101 | $ oc get po | grep Running
102 | containersource-heartbeat--282bc5ef-c409-4319-9c98-177988558vg7 1/1 Running 0 21m
103 | default-broker-filter-7d89b8d949-kw5z2 1/1 Running 0 25m
104 | default-broker-ingress-6b5d8cf558-2f87x 1/1 Running 0 25m
105 | msgtxr-sl-fzpfp-1-deployment-d55996b47-r9c4k 2/2 Running 0 20m
106 | ```
107 |
108 |
109 | Look at your Knative service logs, and you will see the heartbeats received from the heartbeat event source once in `20s`.
110 |
111 | ```
112 | $ oc logs -f $(oc get po | grep msgtxr-sl | awk '{print $1}') -c user-container
113 | __ ____ __ _____ ___ __ ____ ______
114 | --/ __ \/ / / / _ | / _ \/ //_/ / / / __/
115 | -/ /_/ / /_/ / __ |/ , _/ ,< / /_/ /\ \
116 | --\___\_\____/_/ |_/_/|_/_/|_|\____/___/
117 | 2020-05-14 14:25:24,363 INFO [io.quarkus] (main) getting-started 1.0-SNAPSHOT (powered by Quarkus 1.3.2.Final) started in 0.011s. Listening on: http://0.0.0.0:8080
118 | 2020-05-14 14:25:24,363 INFO [io.quarkus] (main) Profile prod activated.
119 | 2020-05-14 14:25:24,363 INFO [io.quarkus] (main) Installed features: [cdi, resteasy]
120 | 14:25:25.127 IN {"id":1,"label":"from heartbeat source"} OUT {"id":1,"label":"from heartbeat source"}
121 | 14:25:36.838 IN {"id":2,"label":"from heartbeat source"} OUT {"id":2,"label":"from heartbeat source"}
122 | 14:25:56.836 IN {"id":3,"label":"from heartbeat source"} OUT {"id":3,"label":"from heartbeat source"}
123 | ```
124 |
125 | Let us edit the event source to increase the time to 180s.
126 |
127 | ```
128 | $ cat eventing/event-source-heartbeat-180s.yaml
129 | apiVersion: sources.eventing.knative.dev/v1alpha1
130 | kind: ContainerSource
131 | metadata:
132 | name: heartbeat-event-source
133 | spec:
134 | image: quay.io/openshift-knative/knative-eventing-sources-heartbeats:v0.7.1
135 | args:
136 | - '--label="from heartbeat source"'
137 | - '--period=180'
138 | env:
139 | - name: POD_NAME
140 | valueFrom:
141 | fieldRef:
142 | fieldPath: metadata.name
143 | - name: POD_NAMESPACE
144 | valueFrom:
145 | fieldRef:
146 | fieldPath: metadata.namespace
147 | sink:
148 | apiVersion: eventing.knative.dev/v1alpha1
149 | kind: Broker
150 | name: default
151 | ```
152 |
153 | Let us apply this change
154 |
155 | ```
156 | $ oc replace -f eventing/event-source-heartbeat-180s.yaml
157 | containersource.sources.eventing.knative.dev/heartbeat-event-source replaced
158 | ```
159 |
160 | Now observe your Knative destination service by running `watch oc get po ` or in the developer console.
161 |
162 | You will notice that the the Knative application scales to zero after a minute and a new pod comes up every 120s due to heartbeat.
163 |
164 | ## Extra Points
165 |
166 | * Deploy an additional knative service that capitalizes messages
167 |
168 | ```
169 | kn service create msgtxr-capitalizer \
170 | --image=image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr \
171 | -l app.openshift.io/runtime=quarkus \
172 | --env format=capitalize \
173 | ```
174 |
175 | * Add a new PingSource that sinks into Broker with message `data from pingsource`
176 | * Create a new trigger that filters the events by `type=dev.knative.sources.ping` and sinks into the new `msgtxr-capitalizer` service
177 | * Watch that the messages from ping source go to the `msgtxr-capitalizer` service whereas the ones from heartbeat container source continue to go to the other knative service while they are all posted to the same broker.
178 |
179 | 
180 |
181 | ## Conclusion
182 |
183 | In this lab we have learnt to configure a container based event source, a broker and a trigger that subscribes the event source to a destination.
184 |
185 | ## Cleanup
186 |
187 | Delete Eventsource
188 |
189 | ```
190 | $ oc delete -f eventing/event-source-heartbeat.yaml
191 | containersource.sources.eventing.knative.dev "heartbeat-event-source" deleted
192 | ```
193 |
194 | Delete Trigger
195 |
196 | ```
197 | $ kn trigger delete msgtxr-trigger
198 |
199 | Trigger 'msgtxr-trigger' deleted in namespace 'kn-demo'.
200 | ```
201 |
202 | Remove Broker
203 |
204 | Currently you will have to approach your cluster admin to label your namespace this way
205 | ```
206 | $ oc label namespace kn-demo knative-eventing-injection=disabled --overwrite=true
207 |
208 | namespace/kn-demo labeled
209 | ```
210 |
211 | and you can remove the broker pods in your namespace this way
212 | ```
213 | $ oc delete brokers.eventing.knative.dev --all
214 |
215 | broker.eventing.knative.dev "default" deleted
216 |
217 | ```
218 |
219 | Now there should be no running pods
220 |
221 | ```
222 | $ oc get po | grep Running
223 | ```
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
--------------------------------------------------------------------------------
/Issues.txt:
--------------------------------------------------------------------------------
1 |
2 | Cloudevents Failing;
3 | ====================
4 |
5 | Hi Ben for my demos today, I stood up a different cluster on AWS with only knative serving and knative eventing running on it. Nothing else was installed.
6 |
7 | Veer 18 hours ago
8 | First time everything went fine when I tested.
9 |
10 | Veer 18 hours ago
11 | Then again when I was demoing I hit the issue of posting to service
12 |
13 | Veer 18 hours ago
14 | {"level":"error","ts":1568081760.0051727,"logger":"fallback","caller":"cronjobevents/adapter.go:97","msg":"failed to send cloudeventPost http://ch-event-dumpy-channel-xjtzr.kn-demo.svc.cluster.local: dial tcp: lookup ch-event-dumpy-channel-xjtzr.kn-demo.svc.cluster.local: no such host","stacktrace":"github.com/knative/eventing/pkg/adapter/cronjobevents.(*Adapter).cronTick\n\t/go/src/github.com/knative/eventing/pkg/adapter/cronjobevents/adapter.go:97\ngithub.com/knative/eventing/vendor/github.com/robfig/cron.FuncJob.Run\n\t/go/src/github.com/knative/eventing/vendor/github.com/robfig/cron/cron.go:92\ngithub.com/knative/eventing/vendor/github.com/robfig/cron.(*Cron).runWithRecovery\n\t/go/src/github.com/knative/eventing/vendor/github.com/robfig/cron/cron.go:165"}
15 |
16 | Veer 18 hours ago
17 | So this seems to have nothing to do with service mesh installation.
18 |
19 | Veer 4 hours ago
20 | @Ben Browning I have the second cluster intact in case you want to see. The above issue is purely knative.. nothing to do with servicemesh.
21 |
22 | Ben Browning 3 hours ago
23 | @Veer sure - is the cloudevent not posting to your channel the only issue? serving works fine?
24 |
25 | Veer 3 hours ago
26 | yes serving works fine
27 |
28 | Veer 3 hours ago
29 | it is issue with cloudevents not posting both for channels and brokers
30 |
31 | Veer 3 hours ago
32 | @Ben Browning
33 |
34 | Ben Browning 3 hours ago
35 | ok - I'll take a look :slightly_smiling_face:
36 |
37 | Veer 3 hours ago
38 | I have sent you credentials on GoogleChat
39 |
40 | Veer 3 hours ago
41 | @Ben Browning
42 |
43 | Ben Browning 3 hours ago
44 | yep - just logged in there to grab them :slightly_smiling_face:
45 |
46 | Veer 3 hours ago
47 | thank you
48 |
49 | Ben Browning 3 hours ago
50 | I see a ton of 2019/09/10 17:10:35 failed to send the request: Post http://zipkin.istio-system.svc.cluster.local:9411/api/v2/spans: dial tcp: lookup zipkin.istio-system.svc.cluster.local: no such host in your cronjob pod in the kn-demo namespace...
51 |
52 | Ben Browning 3 hours ago
53 | something is expecting zipkin to exist there?
54 |
55 | Veer 3 hours ago
56 | yeah.. that is harmless
57 |
58 | Veer 3 hours ago
59 | that dumpy app was written for zipkin, but it works without that
60 |
61 | Ben Browning 3 hours ago
62 | so how do I reproduce your issue posting events?
63 |
64 | Veer 3 hours ago
65 | it should be already there in the logs
66 |
67 | Veer 3 hours ago
68 | oh.. it seems to be working now somehow
69 |
70 | Ben Browning 3 hours ago
71 | yeah everything seems to be working
72 |
73 | Veer 3 hours ago
74 | yesterday it failed at the demo and it seems to have fixed itself later
75 |
76 | Veer 3 hours ago
77 | I think if I clean it up and create again, I will start hitting that issue again.
78 |
79 | Veer 3 hours ago
80 | at least for sometime.
81 |
82 | Veer 3 hours ago
83 | those cloudevent posting failures will exist for sometime and will look bad. Should I try to recreate the issue?
84 |
85 | Ben Browning 3 hours ago
86 | sure - if there's a bug here we need to track down what's up
87 |
88 | Veer 3 hours ago
89 | will do
90 |
91 | Veer 3 hours ago
92 | I will ping you back
93 |
94 | Ben Browning 3 hours ago
95 | thanks!
96 |
97 | Veer 1 hour ago
98 | @Ben Browning The issue came back again. You can see it now on the same cluster
99 |
100 | Veer 1 hour ago
101 | $ oc logs -f cronjob-event-dumpy-cronjob-source-l85s6-7cf56577ff-vbjhq
102 | (edited)
103 |
104 | Veer 1 hour ago
105 | {"level":"error","ts":1568142120.0054426,"logger":"fallback","caller":"cronjobevents/adapter.go:97","msg":"failed to send cloudeventPost http://ch-event-dumpy-channel-rf6mp.kn-demo.svc.cluster.local: dial tcp: lookup ch-event-dumpy-channel-rf6mp.kn-demo.svc.cluster.local: no such host","stacktrace":"github.com/knative/eventing/pkg/adapter/cronjobevents.(*Adapter).cronTick\n\t/go/src/github.com/knative/eventing/pkg/adapter/cronjobevents/adapter.go:97\ngithub.com/knative/eventing/vendor/github.com/robfig/cron.FuncJob.Run\n\t/go/src/github.com/knative/eventing/vendor/github.com/robfig/cron/cron.go:92\ngithub.com/knative/eventing/vendor/github.com/robfig/cron.(*Cron).runWithRecovery\n\t/go/src/github.com/knative/eventing/vendor/github.com/robfig/cron/cron.go:165"}
106 |
107 | Ben Browning 1 hour ago
108 | looking now
109 |
110 | Ben Browning 1 hour ago
111 | Something seems up with your install - I don't see any ClusterChannelProvisioners installed
112 |
113 | Ben Browning 45 minutes ago
114 | This is knative eventing 0.7.1? It seems like somehow eventing and the in-memory-channel are not installed right
115 |
116 | Ben Browning 38 minutes ago
117 | @lberk @matzew either if you may have more input here - something is up with this eventing install and it's not clear to me what it is
118 |
119 | Ben Browning 37 minutes ago
120 | @Veer this is a vanilla knative eventing install via our operator? nothing manually modified here?
121 |
122 | Veer 37 minutes ago
123 | yes
124 |
125 | Veer 37 minutes ago
126 | vanilla install. This is the exact same thing that was working before (edited)
127 |
128 | Lukas Berk 36 minutes ago
129 | reading now
130 |
131 | Lukas Berk 36 minutes ago
132 | https://github.com/openshift/knative-eventing/pull/271 related perhaps?
133 |
134 | Veer 34 minutes ago
135 | @Ben Browning The only thing running on the cluster is knative
136 |
137 | Veer 34 minutes ago
138 | and tekton of course
139 |
140 | Veer 34 minutes ago
141 | I created it for demo yesterday
142 |
143 | Veer 32 minutes ago
144 | One observation - First time it all works fine. Once you delete the app and try to do it again, the problem shows up. I had the same issue on my other cluster. But since we suspected servicemesh, I did set up this separate cluster.
145 |
146 |
147 |
148 | Ben Browning 32 minutes ago
149 | how are you deleting the app?
150 |
151 | Veer 31 minutes ago
152 | like here https://github.com/RedHatWorkshops/knative-on-ocp4/blob/master/6.AddingChannelsAndSubscriptions.md#cleanup
153 |
154 | Veer 31 minutes ago
155 | and then oc delete all --all -n kn-demo (edited)
156 |
157 | Veer 31 minutes ago
158 | and then oc delete project kn-demo
159 |
160 | Veer 30 minutes ago
161 | sometimes the project gets stuck in Terminating state for ever. If that happens, I remove the finalizer to clean it up
162 |
163 | Ben Browning 22 minutes ago
164 | that's your problem - oc delete all --all is deleting the clusterchannelprovision
165 |
166 | Ben Browning 22 minutes ago
167 | the change @lberk shared above removes clusterchannelprovisioner from the all category to fix this, but for now you can't do that :smile:
168 |
169 | Ben Browning 21 minutes ago
170 | that's why it works the 1st time and is broken the 2nd
171 |
172 | Veer 15 minutes ago
173 | ah.. so do you have any thoughts on how it fixed itself overnight?
174 |
175 | Veer 14 minutes ago
176 | But wait, I am deleting the project and creating again from scratch (everything)
177 |
178 | Ben Browning 14 minutes ago
179 | that provisioner is a cluster-scoped resource
180 |
181 | Veer 13 minutes ago
182 | oh.. so cluster-scoped resource is getting deleted with delete all --all ? Then it makes sense on why it doesnt work
183 |
184 | Veer 13 minutes ago
185 | but I am not sure how it fixed itself though
186 |
187 | Ben Browning 13 minutes ago
188 | it fixed itself because the period global resync of the in-memory-channel-controller pod noticed at some point the provisioner was gone and recreated it
189 |
190 | Ben Browning 12 minutes ago
191 | or something did - at least that's my guess
192 |
193 | Ben Browning 10 minutes ago
194 | actually it looks like our eventing operator did that for you
195 |
196 | Ben Browning 10 minutes ago
197 | a bug in our eventing operator would be that it didn't notice immediately you deleted the provisioner - ideally it would notice immediately and recreate it for you :slightly_smiling_face:
198 |
199 | Veer 9 minutes ago
200 | yeah.. operator noticing and adding it back makes sense
201 |
202 | Veer 8 minutes ago
203 | I can confirm looking at the results of oc delete all --all that the clusterchannelprovisioner for in-memory channel was deleted.
204 |
205 | Veer 7 minutes ago
206 | I think if I scale the eventing operator to zero and start it again, it should add it back
207 |
208 | Veer 5 minutes ago
209 | Yep it worked. I had to scale in-memory-channel-controller to 0 and scale it back to 1
210 |
211 | Ben Browning 5 minutes ago
212 | cool
213 |
214 | Veer 4 minutes ago
215 | how did you find what the issue was?
216 |
217 | Ben Browning 4 minutes ago
218 | Well, I saw there was no ClusterChannelProvisioner in your cluster at all
219 |
220 | Ben Browning 4 minutes ago
221 | and there should be 1 for each type of channel you're using
222 |
223 | Veer 4 minutes ago
224 | ok
225 |
226 | Ben Browning 3 minutes ago
227 | so then it was just figuring out what caused it to go away
228 |
229 | Veer 3 minutes ago
230 | what should be there for broker?
231 |
232 | Ben Browning 2 minutes ago
233 | I'm not sure - @lberk do you know what specific broker artifacts to look for to ensure the broker is properly enabled for a namespace?
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Knative Serving, Eventing and OpenShift Pipelines Tutorial
2 |
3 | ## Table of Contents
4 |
5 | ### Installing Knative Components on OCP4
6 | * [Installing Knative Serving, Knative Eventing, OpenShift Pipeline and Client tools](./1.InstallKnativeAndTekton.md)
7 |
8 | ### OpenShift Pipelines
9 | * [Build an application using OpenShift Pipelines](./2.BuildUsingOpenShiftPipelines.md)
10 |
11 | ### Knative Serving
12 | * [Deploy Serverless Application](./3.DeployServerlessApp.md)
13 | * [Autoscaling](./4.Autoscaling.md)
14 | * [Blue Green Deployments](./5.BlueGreen.md)
15 | * [Traffic Splitting](./6.TrafficSplitting.md)
16 |
17 | ### Knative Eventing
18 | * [Add an Event Source for Direct Delivery](./7.AddingDirectEventSource.md)
19 | * [Eventing with Channels and Subscriptions](./8.AddingChannelsAndSubscriptions.md)
20 | * [Eventing with Brokers and Trigger based Subscriptions](./9.UsingBrokersAndTriggers.md)
21 |
22 |
23 | #### Using Kafka with Knative Eventing
24 | * [Configure Kafka Source to receive events from a Kafka Topic](10.KafkaSource.md)
25 | * [Eventing with Kafka Channel and Subscription](12.KafkaChannelSubscription.md)
26 | * [Using API Event Source with KafkaChannel Broker and Trigger](11.BrokerTriggerWithKafka.md)
27 |
28 |
29 | #### Using AWS SQS as Event Source
30 |
31 | * [Configure AWS SQS Source as Direct Source for Knative Service ](13.SQSDirectSource.md)
32 | * [Using AWS SQS Source with Broker and Trigger](14.SQSBrokerTrigger.md)
--------------------------------------------------------------------------------
/awssqs/awssqs-source-broker.yaml:
--------------------------------------------------------------------------------
1 | # Replace the following before applying this file:
2 | # QUEUE_URL: Replace with the AWS SQS queue.
3 |
4 | apiVersion: sources.knative.dev/v1alpha1
5 | kind: AwsSqsSource
6 | metadata:
7 | name: awssqs-sample-source
8 | spec:
9 | awsCredsSecret:
10 | name: aws-credentials
11 | key: credentials
12 | queueUrl: QUEUE_URL
13 | sink:
14 | apiVersion: eventing.knative.dev/v1alpha1
15 | kind: Broker
16 | name: default
17 |
--------------------------------------------------------------------------------
/awssqs/awssqs-source-direct.yaml:
--------------------------------------------------------------------------------
1 | # Replace the following before applying this file:
2 | # QUEUE_URL: Replace with the AWS SQS queue.
3 |
4 | apiVersion: sources.knative.dev/v1alpha1
5 | kind: AwsSqsSource
6 | metadata:
7 | name: awssqs-sample-source
8 | spec:
9 | awsCredsSecret:
10 | name: aws-credentials
11 | key: credentials
12 | queueUrl: QUEUE_URL
13 | sink:
14 | apiVersion: serving.knative.dev/v1
15 | kind: Service
16 | name: msgtxr-sl
17 |
--------------------------------------------------------------------------------
/camelk/camel-source.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: sources.knative.dev/v1alpha1
2 | kind: CamelSource
3 | metadata:
4 | name: camel-timer-source
5 | namespace: kn-demo
6 | spec:
7 | source:
8 | flow:
9 | from:
10 | uri: 'timer:tick?period=3000'
11 | steps:
12 | - set-body:
13 | constant: Hello World!
14 | sink:
15 | ref:
16 | apiVersion: serving.knative.dev/v1
17 | kind: Service
18 | name: msgtxr-sl
19 |
--------------------------------------------------------------------------------
/camelk/camel-source1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: sources.knative.dev/v1alpha1
2 | kind: CamelSource
3 | metadata:
4 | name: camel-timer-source
5 | namespace: kn-demo
6 | spec:
7 | source:
8 | flow:
9 | from:
10 | uri: 'rest:get:hello'
11 | steps:
12 | - set-body:
13 | constant: Hello World!
14 | sink:
15 | ref:
16 | apiVersion: serving.knative.dev/v1
17 | kind: Service
18 | name: msgtxr-sl
19 |
--------------------------------------------------------------------------------
/eventing/event-source-heartbeat-180s.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: sources.eventing.knative.dev/v1alpha1
2 | kind: ContainerSource
3 | metadata:
4 | name: heartbeat-event-source
5 | spec:
6 | image: quay.io/openshift-knative/knative-eventing-sources-heartbeats:v0.7.1
7 | args:
8 | - '--label="from heartbeat source"'
9 | - '--period=180'
10 | env:
11 | - name: POD_NAME
12 | valueFrom:
13 | fieldRef:
14 | fieldPath: metadata.name
15 | - name: POD_NAMESPACE
16 | valueFrom:
17 | fieldRef:
18 | fieldPath: metadata.namespace
19 | sink:
20 | apiVersion: eventing.knative.dev/v1alpha1
21 | kind: Broker
22 | name: default
23 |
--------------------------------------------------------------------------------
/eventing/event-source-heartbeat.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: sources.eventing.knative.dev/v1alpha1
2 | kind: ContainerSource
3 | metadata:
4 | name: heartbeat-event-source
5 | spec:
6 | image: quay.io/openshift-knative/knative-eventing-sources-heartbeats:v0.13.2
7 | args:
8 | - '--label="from heartbeat source"'
9 | - '--period=20'
10 | env:
11 | - name: POD_NAME
12 | valueFrom:
13 | fieldRef:
14 | fieldPath: metadata.name
15 | - name: POD_NAMESPACE
16 | valueFrom:
17 | fieldRef:
18 | fieldPath: metadata.namespace
19 | sink:
20 | apiVersion: eventing.knative.dev/v1alpha1
21 | kind: Broker
22 | name: default
23 |
--------------------------------------------------------------------------------
/eventing/event-source-to-channel.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: sources.knative.dev/v1alpha2
2 | kind: PingSource
3 | metadata:
4 | name: msgtxr-pingsource-channel
5 | spec:
6 | jsonData: This message is from PingSource
7 | schedule: '* * * * *'
8 | sink:
9 | apiVersion: messaging.knative.dev/v1alpha2
10 | kind: InMemoryChannel
11 | name: imc-msgtxr
12 |
--------------------------------------------------------------------------------
/eventing/event-source-to-channel.yaml.bak:
--------------------------------------------------------------------------------
1 | apiVersion: sources.eventing.knative.dev/v1alpha1
2 | kind: CronJobSource
3 | metadata:
4 | name: event-dumpy-cronjob-source
5 | spec:
6 | schedule: "* * * * *"
7 | sink:
8 | apiVersion: eventing.knative.dev/v1alpha1
9 | kind: Channel
10 | name: ch-event-dumpy
11 |
--------------------------------------------------------------------------------
/eventing/event-source.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: sources.eventing.knative.dev/v1alpha1
2 | kind: CronJobSource
3 | metadata:
4 | name: event-dumpy-cronjob-source
5 | spec:
6 | schedule: "* * * * *"
7 | sink:
8 | apiVersion: serving.knative.dev/v1alpha1
9 | kind: Service
10 | name: dumpy-serverless
11 |
--------------------------------------------------------------------------------
/eventing/event-subscription.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: messaging.knative.dev/v1alpha1
2 | kind: Subscription
3 | metadata:
4 | name: msgtxr-subscriber
5 | spec:
6 | channel:
7 | apiVersion: messaging.knative.dev/v1alpha1
8 | kind: InMemoryChannel
9 | name: imc-msgtxr
10 | subscriber:
11 | ref:
12 | apiVersion: serving.knative.dev/v1alpha1
13 | kind: Service
14 | name: msgtxr-sl
15 |
--------------------------------------------------------------------------------
/eventing/event-subscription.yaml.bak:
--------------------------------------------------------------------------------
1 | apiVersion: eventing.knative.dev/v1alpha1
2 | kind: Subscription
3 | metadata:
4 | name: event-dumpy-subscriber
5 | spec:
6 | channel:
7 | apiVersion: eventing.knative.dev/v1alpha1
8 | kind: Channel
9 | name: ch-event-dumpy
10 | subscriber:
11 | ref:
12 | apiVersion: serving.knative.dev/v1alpha1
13 | kind: Service
14 | name: dumpy-serverless
15 |
--------------------------------------------------------------------------------
/eventing/event-trigger.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: eventing.knative.dev/v1alpha1
2 | kind: Trigger
3 | metadata:
4 | name: dumpy-trigger
5 | spec:
6 | filter:
7 | sourceAndType:
8 | type: dev.knative.eventing.samples.heartbeat
9 | subscriber:
10 | ref:
11 | apiVersion: serving.knative.dev/v1alpha1
12 | kind: Service
13 | name: dumpy-serverless
14 |
--------------------------------------------------------------------------------
/eventing/event-trigger.yaml.bak:
--------------------------------------------------------------------------------
1 | apiVersion: eventing.knative.dev/v1alpha1
2 | kind: Trigger
3 | metadata:
4 | name: dumpy-trigger
5 | spec:
6 | filter:
7 | sourceAndType:
8 | type: dev.knative.eventing.samples.heartbeat
9 | subscriber:
10 | ref:
11 | apiVersion: serving.knative.dev/v1alpha1
12 | kind: Service
13 | name: dumpy-serverless
14 |
--------------------------------------------------------------------------------
/eventing/in-memory-channel.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: messaging.knative.dev/v1alpha1
2 | kind: InMemoryChannel
3 | metadata:
4 | name: imc-msgtxr
5 |
--------------------------------------------------------------------------------
/eventing/in-memory-channel.yaml.bak:
--------------------------------------------------------------------------------
1 | apiVersion: eventing.knative.dev/v1alpha1
2 | kind: Channel
3 | metadata:
4 | name: ch-event-dumpy
5 | spec:
6 | provisioner:
7 | apiVersion: eventing.knative.dev/v1alpha1
8 | kind: ClusterChannelProvisioner
9 | name: in-memory
10 |
--------------------------------------------------------------------------------
/eventing/pingsource.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: sources.knative.dev/v1alpha2
2 | kind: PingSource
3 | metadata:
4 | name: msgtxr-pingsource
5 | spec:
6 | jsonData: This message is from PingSource
7 | schedule: '* * * * *'
8 | sink:
9 | ref:
10 | apiVersion: serving.knative.dev/v1
11 | kind: Service
12 | name: msgtxr-sl
13 | namespace: kn-demo
14 |
--------------------------------------------------------------------------------
/images/1.clidownload.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/1.clidownload.png
--------------------------------------------------------------------------------
/images/awssqs1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/awssqs1.png
--------------------------------------------------------------------------------
/images/awssqs2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/awssqs2.png
--------------------------------------------------------------------------------
/images/bluegreen1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/bluegreen1.png
--------------------------------------------------------------------------------
/images/bluegreen2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/bluegreen2.png
--------------------------------------------------------------------------------
/images/brokertrigger1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/brokertrigger1.png
--------------------------------------------------------------------------------
/images/camelsource1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/camelsource1.png
--------------------------------------------------------------------------------
/images/camelsource2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/camelsource2.png
--------------------------------------------------------------------------------
/images/devconsole1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/devconsole1.png
--------------------------------------------------------------------------------
/images/devconsole2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/devconsole2.png
--------------------------------------------------------------------------------
/images/devconsole3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/devconsole3.png
--------------------------------------------------------------------------------
/images/directsource1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/directsource1.png
--------------------------------------------------------------------------------
/images/directsource2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/directsource2.png
--------------------------------------------------------------------------------
/images/directsource3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/directsource3.png
--------------------------------------------------------------------------------
/images/eventing1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/eventing1.png
--------------------------------------------------------------------------------
/images/kafkacluster.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/kafkacluster.png
--------------------------------------------------------------------------------
/images/pipeline1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/pipeline1.png
--------------------------------------------------------------------------------
/images/scaling1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/scaling1.png
--------------------------------------------------------------------------------
/images/scaling2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/scaling2.png
--------------------------------------------------------------------------------
/images/serving1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/serving1.png
--------------------------------------------------------------------------------
/images/serving2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/serving2.png
--------------------------------------------------------------------------------
/images/trafficsplit1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/trafficsplit1.png
--------------------------------------------------------------------------------
/images/trafficsplit2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/trafficsplit2.png
--------------------------------------------------------------------------------
/images/trafficsplit3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RedHatWorkshops/knative-on-ocp4/f6351106d27bd8b5a3b50bc5384c589f889f66af/images/trafficsplit3.png
--------------------------------------------------------------------------------
/kafka/channel.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: messaging.knative.dev/v1alpha1
2 | kind: Channel
3 | metadata:
4 | name: testchannel-one
5 |
--------------------------------------------------------------------------------
/kafka/event-subscription.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: messaging.knative.dev/v1alpha1
2 | kind: Subscription
3 | metadata:
4 | name: event-subscription
5 | spec:
6 | channel:
7 | apiVersion: messaging.knative.dev/v1alpha1
8 | kind: KafkaChannel
9 | name: testchannel-one
10 | subscriber:
11 | ref:
12 | apiVersion: serving.knative.dev/v1alpha1
13 | kind: Service
14 | name: msgtxr-sl
15 |
--------------------------------------------------------------------------------
/kafka/kafka-channel.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: messaging.knative.dev/v1alpha1
2 | kind: KafkaChannel
3 | metadata:
4 | name: testchannel-one
5 |
--------------------------------------------------------------------------------
/kafka/kafka-source.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: sources.knative.dev/v1alpha1
2 | kind: KafkaSource
3 | metadata:
4 | name: kafka-source
5 | spec:
6 | bootstrapServers:
7 | - my-cluster-kafka-bootstrap.kafka:9092
8 | topics:
9 | - knative-demo-topic
10 | sink:
11 | ref:
12 | apiVersion: serving.knative.dev/v1
13 | kind: Service
14 | name: msgtxr-sl
15 |
--------------------------------------------------------------------------------
/kafka/kafka-topic.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kafka.strimzi.io/v1beta1
2 | kind: KafkaTopic
3 | metadata:
4 | name: knative-demo-topic
5 | labels:
6 | strimzi.io/cluster: my-cluster
7 | spec:
8 | partitions: 3
9 | replicas: 1
10 | config:
11 | retention.ms: 7200000
12 | segment.bytes: 1073741824
13 |
--------------------------------------------------------------------------------
/knative-serving-ingress.json:
--------------------------------------------------------------------------------
1 | {
2 | "apiVersion": "v1",
3 | "kind": "Namespace",
4 | "metadata": {
5 | "annotations": {
6 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Namespace\",\"metadata\":{\"annotations\":{\"serving.knative.openshift.io/ownerName\":\"knative-serving\",\"serving.knative.openshift.io/ownerNamespace\":\"knative-serving\"},\"name\":\"knative-serving-ingress\"}}\n",
7 | "manifestival": "new",
8 | "openshift.io/sa.scc.mcs": "s0:c25,c20",
9 | "openshift.io/sa.scc.supplemental-groups": "1000640000/10000",
10 | "openshift.io/sa.scc.uid-range": "1000640000/10000",
11 | "serving.knative.openshift.io/ownerName": "knative-serving",
12 | "serving.knative.openshift.io/ownerNamespace": "knative-serving"
13 | },
14 | "creationTimestamp": "2020-06-01T20:17:11Z",
15 | "deletionTimestamp": "2020-06-10T03:48:05Z",
16 | "name": "knative-serving-ingress",
17 | "resourceVersion": "12414467",
18 | "selfLink": "/api/v1/namespaces/knative-serving-ingress",
19 | "uid": "499bb2d0-9c07-4c6b-954a-b46b340b804e"
20 | },
21 | "spec": {
22 | "finalizers": []
23 | },
24 | "status": {
25 | "conditions": [
26 | {
27 | "lastTransitionTime": "2020-06-10T03:48:11Z",
28 | "message": "Discovery failed for some groups, 3 failing: unable to retrieve the complete list of server APIs: clusterregistry.k8s.io/v1alpha1: the server is currently unable to handle the request, mcm.ibm.com/v1alpha1: the server is currently unable to handle the request, mcm.ibm.com/v1beta1: the server is currently unable to handle the request",
29 | "reason": "DiscoveryFailed",
30 | "status": "True",
31 | "type": "NamespaceDeletionDiscoveryFailure"
32 | },
33 | {
34 | "lastTransitionTime": "2020-06-10T03:48:23Z",
35 | "message": "All legacy kube types successfully parsed",
36 | "reason": "ParsedGroupVersions",
37 | "status": "False",
38 | "type": "NamespaceDeletionGroupVersionParsingFailure"
39 | },
40 | {
41 | "lastTransitionTime": "2020-06-10T03:48:23Z",
42 | "message": "All content successfully deleted, may be waiting on finalization",
43 | "reason": "ContentDeleted",
44 | "status": "False",
45 | "type": "NamespaceDeletionContentFailure"
46 | },
47 | {
48 | "lastTransitionTime": "2020-06-10T03:48:23Z",
49 | "message": "All content successfully removed",
50 | "reason": "ContentRemoved",
51 | "status": "False",
52 | "type": "NamespaceContentRemaining"
53 | },
54 | {
55 | "lastTransitionTime": "2020-06-10T03:48:23Z",
56 | "message": "All content-preserving finalizers finished",
57 | "reason": "ContentHasNoFinalizers",
58 | "status": "False",
59 | "type": "NamespaceFinalizersRemaining"
60 | }
61 | ],
62 | "phase": "Terminating"
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/network-policies/all-open.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: all-open
5 | namespace: kn-demo
6 | labels:
7 | app: dumpy
8 | spec:
9 | podSelector: {}
10 | ingress:
11 | - {}
12 |
13 |
14 |
--------------------------------------------------------------------------------
/network-policies/allow-openshift-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: allow-from-openshift-ingress
5 | spec:
6 | ingress:
7 | - from:
8 | - namespaceSelector:
9 | matchLabels:
10 | network.openshift.io/policy-group: ingress
11 | podSelector: {}
12 | policyTypes:
13 | - Ingress
14 |
--------------------------------------------------------------------------------
/network-policies/allowIngressToAppInSMMR.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: APPNAME
5 | namespace: NAMESPACE
6 | labels:
7 | app: APPNAME
8 | spec:
9 | podSelector:
10 | matchLabels:
11 | app: APPNAME
12 | ingress:
13 | - {}
14 |
15 |
16 |
--------------------------------------------------------------------------------
/network-policies/dumpy-network-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: dumpy
5 | namespace: kn-demo
6 | labels:
7 | app: dumpy
8 | spec:
9 | podSelector:
10 | matchLabels:
11 | app: dumpy
12 | ingress:
13 | - {}
14 |
15 |
16 |
--------------------------------------------------------------------------------
/network-policies/knative-eventing-network-policies.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: eventing-controller
5 | namespace: knative-eventing
6 | labels:
7 | app: eventing-controller
8 | spec:
9 | podSelector:
10 | matchLabels:
11 | app: eventing-controller
12 | ingress:
13 | - {}
14 | ---
15 | apiVersion: networking.k8s.io/v1
16 | kind: NetworkPolicy
17 | metadata:
18 | name: eventing-webhook
19 | namespace: knative-eventing
20 | labels:
21 | app: eventing-webhook
22 | spec:
23 | podSelector:
24 | matchLabels:
25 | app: eventing-webhook
26 | ingress:
27 | - {}
28 | ---
29 | apiVersion: networking.k8s.io/v1
30 | kind: NetworkPolicy
31 | metadata:
32 | name: sources-controller
33 | namespace: knative-eventing
34 | labels:
35 | app: sources-controller
36 | spec:
37 | podSelector:
38 | matchLabels:
39 | app: sources-controller
40 | ingress:
41 | - {}
42 |
--------------------------------------------------------------------------------
/oc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1alpha1
2 | kind: Task
3 | metadata:
4 | name: openshift-client
5 | spec:
6 | inputs:
7 | params:
8 | - name: ARGS
9 | description: The OpenShift CLI arguments to run
10 | default: help
11 | steps:
12 | - name: oc
13 | image: quay.io/openshiftlabs/openshift-cli-tekton-workshop:2.0
14 | command: ["/usr/local/bin/oc"]
15 | args:
16 | - "${inputs.params.ARGS}"
17 |
--------------------------------------------------------------------------------
/pipeline/openshift-objects.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: image.openshift.io/v1
3 | kind: ImageStream
4 | metadata:
5 | labels:
6 | app: dumpy
7 | name: dumpy
8 | ---
9 | apiVersion: apps.openshift.io/v1
10 | kind: DeploymentConfig
11 | metadata:
12 | labels:
13 | app: dumpy
14 | app.openshift.io/runtime: golang
15 | name: dumpy
16 | spec:
17 | replicas: 1
18 | revisionHistoryLimit: 10
19 | selector:
20 | app: dumpy
21 | deploymentconfig: dumpy
22 | strategy:
23 | activeDeadlineSeconds: 21600
24 | resources: {}
25 | rollingParams:
26 | intervalSeconds: 1
27 | maxSurge: 25%
28 | maxUnavailable: 25%
29 | timeoutSeconds: 600
30 | updatePeriodSeconds: 1
31 | type: Rolling
32 | template:
33 | metadata:
34 | labels:
35 | app: dumpy
36 | deploymentconfig: dumpy
37 | spec:
38 | containers:
39 | - image: dumpy:latest
40 | imagePullPolicy: Always
41 | livenessProbe:
42 | failureThreshold: 3
43 | httpGet:
44 | path: /
45 | port: 8080
46 | scheme: HTTP
47 | initialDelaySeconds: 45
48 | periodSeconds: 10
49 | successThreshold: 1
50 | timeoutSeconds: 1
51 | name: dumpy
52 | ports:
53 | - containerPort: 8080
54 | protocol: TCP
55 | - containerPort: 8443
56 | protocol: TCP
57 | - containerPort: 8778
58 | protocol: TCP
59 | readinessProbe:
60 | failureThreshold: 3
61 | httpGet:
62 | path: /
63 | port: 8080
64 | scheme: HTTP
65 | initialDelaySeconds: 45
66 | periodSeconds: 10
67 | successThreshold: 1
68 | timeoutSeconds: 5
69 | resources: {}
70 | terminationMessagePath: /dev/termination-log
71 | terminationMessagePolicy: File
72 | dnsPolicy: ClusterFirst
73 | restartPolicy: Always
74 | schedulerName: default-scheduler
75 | securityContext: {}
76 | terminationGracePeriodSeconds: 30
77 | test: false
78 | triggers:
79 | - imageChangeParams:
80 | containerNames:
81 | - dumpy
82 | from:
83 | kind: ImageStreamTag
84 | name: dumpy:latest
85 | type: ImageChange
86 | ---
87 | apiVersion: v1
88 | kind: Service
89 | metadata:
90 | labels:
91 | app: dumpy
92 | name: dumpy
93 | spec:
94 | ports:
95 | - name: 8080-tcp
96 | port: 8080
97 | protocol: TCP
98 | targetPort: 8080
99 | selector:
100 | app: dumpy
101 | deploymentconfig: dumpy
102 | sessionAffinity: None
103 | type: ClusterIP
104 | ---
105 | apiVersion: route.openshift.io/v1
106 | kind: Route
107 | metadata:
108 | labels:
109 | app: dumpy
110 | name: dumpy
111 | spec:
112 | port:
113 | targetPort: 8080-tcp
114 | to:
115 | kind: Service
116 | name: dumpy
117 | weight: 100
118 |
--------------------------------------------------------------------------------
/pipeline/pipeline-resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1alpha1
2 | kind: PipelineResource
3 | metadata:
4 | name: application-image
5 | spec:
6 | type: image
7 | params:
8 | - name: url
9 | value: image-registry.openshift-image-registry.svc:5000/kn-demo/msgtxr
10 | ---
11 | apiVersion: tekton.dev/v1alpha1
12 | kind: PipelineResource
13 | metadata:
14 | name: sourcecode-git
15 | spec:
16 | type: git
17 | params:
18 | - name: url
19 | value: https://github.com/VeerMuchandi/mesgtxformer
20 |
--------------------------------------------------------------------------------
/pipeline/pipeline.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1alpha1
2 | kind: Pipeline
3 | metadata:
4 | name: deploy-pipeline
5 | spec:
6 | resources:
7 | - name: app-git
8 | type: git
9 | - name: app-image
10 | type: image
11 | tasks:
12 | - name: build
13 | taskRef:
14 | kind: Task
15 | name: s2i-quarkus-native
16 | params:
17 | - name: TLSVERIFY
18 | value: "false"
19 | resources:
20 | inputs:
21 | - name: source
22 | resource: app-git
23 | outputs:
24 | - name: image
25 | resource: app-image
26 | - name: deploy
27 | taskRef:
28 | kind: ClusterTask
29 | name: openshift-client
30 | runAfter:
31 | - build
32 | params:
33 | - name: ARGS
34 | value:
35 | - rollout
36 | - latest
37 | - msgtxr
38 |
--------------------------------------------------------------------------------
/pipeline/s2i-quarkus-native.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1alpha1
2 | kind: Task
3 | metadata:
4 | name: s2i-quarkus-native
5 | spec:
6 | inputs:
7 | resources:
8 | - name: source
9 | type: git
10 | params:
11 | - name: PATH_CONTEXT
12 | description: The location of the path to run s2i from.
13 | default: .
14 | - name: TLSVERIFY
15 | description: Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry)
16 | default: "true"
17 | - name: ARTIFACT_COPY_ARGS
18 | default: " -r lib/ todoapp-runner.jar"
19 | - name: JAVA_APP_JAR
20 | default: "todoapp-runner.jar"
21 | outputs:
22 | resources:
23 | - name: image
24 | type: image
25 | steps:
26 | - name: generate
27 | image: quay.io/openshift-pipeline/s2i
28 | workingdir: /workspace/source
29 | command: ['s2i', 'build', '$(inputs.params.PATH_CONTEXT)', 'quay.io/quarkus/ubi-quarkus-native-s2i:20.1.0-java8', '--image-scripts-url', 'image:///usr/libexec/s2i', '--as-dockerfile', '/gen-source/Dockerfile.gen', '-e', 'ARTIFACT_COPY_ARGS=$(inputs.params.ARTIFACT_COPY_ARGS)', '-e', 'JAVA_APP_JAR=$(inputs.params.JAVA_APP_JAR)']
30 | volumeMounts:
31 | - name: gen-source
32 | mountPath: /gen-source
33 | - name: build
34 | image: quay.io/buildah/stable
35 | workingdir: /gen-source
36 | command: ['buildah', 'bud', '--tls-verify=$(inputs.params.TLSVERIFY)', '--layers', '-f', '/gen-source/Dockerfile.gen', '-t', '$(outputs.resources.image.url)', '.']
37 | volumeMounts:
38 | - name: varlibcontainers
39 | mountPath: /var/lib/containers
40 | - name: gen-source
41 | mountPath: /gen-source
42 | securityContext:
43 | privileged: true
44 | - name: push
45 | image: quay.io/buildah/stable
46 | command: ['buildah', 'push', '--tls-verify=$(inputs.params.TLSVERIFY)', '$(outputs.resources.image.url)', 'docker://$(outputs.resources.image.url)']
47 | volumeMounts:
48 | - name: varlibcontainers
49 | mountPath: /var/lib/containers
50 | securityContext:
51 | privileged: true
52 | volumes:
53 | - name: varlibcontainers
54 | emptyDir: {}
55 | - name: gen-source
56 | emptyDir: {}
57 |
--------------------------------------------------------------------------------