├── .gitignore
├── resources
└── rhel-zip
├── screenshots
├── builder-pipeline.png
├── complex-pipeline.gif
├── deploy-pipeline.png
├── simple-pipeline.gif
├── example-pipelinerun-complex.png
└── example-pipelinerun-simple.png
├── demo-pre-reqs
├── images
│ ├── say_hello.png
│ ├── simple-demo.png
│ ├── sample_message_flow.png
│ └── sample-ace-application.png
├── kafka
│ ├── kafka-connection.yaml
│ ├── topic.yaml
│ ├── ace-creds.yaml
│ └── eventstreams.yaml
├── cp4i
│ └── platform-navigator.yaml
├── ibm-catalog-source.yaml
├── operators
│ ├── postgresql.yaml
│ ├── ibm-appconnect.yaml
│ ├── ibm-eventstreams.yaml
│ ├── tekton.yaml
│ └── ibm-integration.yaml
├── postgresql
│ ├── db-data.yaml
│ └── database.yaml
├── appconnect
│ └── dashboard.yaml
└── README.md
├── ace-projects
├── .gitignore
├── simple-demo
│ ├── application.descriptor
│ ├── say_hello_world.esql
│ ├── say_hello.msgflow
│ └── .project
├── sample-ace-application
│ ├── application.descriptor
│ ├── todo-update.schema.json
│ ├── todo-item.schema.json
│ ├── get_id_from_update.map
│ ├── .project
│ └── sample_message_flow.msgflow
├── sample-ace-config
│ ├── server.conf.yaml
│ ├── .project
│ └── setdbparms.txt
├── TEST_SERVER
│ ├── .project
│ ├── run
│ │ └── sample-ace-policies
│ │ │ ├── policy.descriptor
│ │ │ └── eventstreams.policyxml
│ ├── config
│ │ └── connectors
│ │ │ └── loopback
│ │ │ ├── datasources.json
│ │ │ └── store
│ │ │ └── todos.json
│ ├── overrides
│ │ └── server.conf.yaml
│ └── server.conf.yaml
├── sample-ace-policies
│ ├── policy.descriptor
│ ├── .project
│ └── eventstreams.policyxml
├── sample-ace-data-sources
│ ├── .project
│ ├── datasources.json
│ └── store
│ │ └── todos.json
├── simple-demo_Test
│ ├── testproject.descriptor
│ ├── .classpath
│ ├── .project
│ ├── simple-demo_Test.launch
│ └── src
│ │ └── main
│ │ └── java
│ │ └── test
│ │ └── Simple_demo_say_hello_world_0001_Test.java
├── sample-ace-application_Test
│ ├── testproject.descriptor
│ ├── .classpath
│ ├── .project
│ ├── sample-ace-application_Test.launch
│ └── src
│ │ └── main
│ │ └── java
│ │ └── test
│ │ └── Sample_ace_application_sample_message_flow_get_id_from_update_message_0002_Test.java
└── sample-ace-application-java
│ ├── .classpath
│ ├── .project
│ └── uk
│ └── co
│ └── dalelane
│ └── appconnect
│ └── demo
│ └── Base64EncodeTodoDescription.java
├── tekton
├── permissions
│ ├── serviceaccount.yaml
│ ├── rolebinding.yaml
│ └── deployer.yaml
├── tasks
│ ├── convert-p12-to-jks.yaml
│ ├── read-secret.yaml
│ ├── create-data-source.yaml
│ ├── create-config.yaml
│ ├── create-policy-project.yaml
│ ├── run-tests.yaml
│ ├── create-bar.yaml
│ ├── update-templates.yaml
│ ├── git-clone.yaml
│ └── create-integration-server.yaml
└── pipeline.yaml
├── 1-deploy-complex-integration-server.sh
├── 1-deploy-simple-integration-server.sh
├── LICENSE
├── 9-cleanup.sh
├── simple-pipelinerun.yaml
├── 0-setup.sh
├── complex-pipelinerun.yaml
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | github-credentials.yaml
2 | ibm-entitlement-key.yaml
3 | *.class
4 | .DS_Store
5 | ca.p12
--------------------------------------------------------------------------------
/resources/rhel-zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dalelane/app-connect-tekton-pipeline/HEAD/resources/rhel-zip
--------------------------------------------------------------------------------
/screenshots/builder-pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dalelane/app-connect-tekton-pipeline/HEAD/screenshots/builder-pipeline.png
--------------------------------------------------------------------------------
/screenshots/complex-pipeline.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dalelane/app-connect-tekton-pipeline/HEAD/screenshots/complex-pipeline.gif
--------------------------------------------------------------------------------
/screenshots/deploy-pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dalelane/app-connect-tekton-pipeline/HEAD/screenshots/deploy-pipeline.png
--------------------------------------------------------------------------------
/screenshots/simple-pipeline.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dalelane/app-connect-tekton-pipeline/HEAD/screenshots/simple-pipeline.gif
--------------------------------------------------------------------------------
/demo-pre-reqs/images/say_hello.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dalelane/app-connect-tekton-pipeline/HEAD/demo-pre-reqs/images/say_hello.png
--------------------------------------------------------------------------------
/demo-pre-reqs/images/simple-demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dalelane/app-connect-tekton-pipeline/HEAD/demo-pre-reqs/images/simple-demo.png
--------------------------------------------------------------------------------
/screenshots/example-pipelinerun-complex.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dalelane/app-connect-tekton-pipeline/HEAD/screenshots/example-pipelinerun-complex.png
--------------------------------------------------------------------------------
/screenshots/example-pipelinerun-simple.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dalelane/app-connect-tekton-pipeline/HEAD/screenshots/example-pipelinerun-simple.png
--------------------------------------------------------------------------------
/demo-pre-reqs/images/sample_message_flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dalelane/app-connect-tekton-pipeline/HEAD/demo-pre-reqs/images/sample_message_flow.png
--------------------------------------------------------------------------------
/demo-pre-reqs/images/sample-ace-application.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dalelane/app-connect-tekton-pipeline/HEAD/demo-pre-reqs/images/sample-ace-application.png
--------------------------------------------------------------------------------
/ace-projects/.gitignore:
--------------------------------------------------------------------------------
1 | /TEST_SERVER/config/common
2 | /TEST_SERVER/config/registry
3 | /TEST_SERVER/config/components
4 | /TEST_SERVER/server.conf.yaml
5 | /TEST_SERVER/console.log
6 | /TEST_SERVER/log
7 | /TEST_SERVER/run
8 |
--------------------------------------------------------------------------------
/tekton/permissions/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: pipeline-deployer-serviceaccount
5 | secrets:
6 | - name: github-credentials
7 | imagePullSecrets:
8 | - name: ibm-entitlement-key
--------------------------------------------------------------------------------
/ace-projects/simple-demo/application.descriptor:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ace-projects/simple-demo/say_hello_world.esql:
--------------------------------------------------------------------------------
1 |
2 |
3 | CREATE COMPUTE MODULE say_hello_world
4 | CREATE FUNCTION Main() RETURNS BOOLEAN
5 | BEGIN
6 | SET OutputRoot.JSON.Data.hello = 'world';
7 | RETURN TRUE;
8 | END;
9 | END MODULE;
10 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application/application.descriptor:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-config/server.conf.yaml:
--------------------------------------------------------------------------------
1 |
2 | EnvironmentVariables:
3 | # environment variable to add to the integration server when run
4 | # used in the Java class uk.co.dalelane.appconnect.demo.Base64EncodeTodoDescription
5 | TODO_TITLE_PREFIX: 'ENCODED:'
6 |
--------------------------------------------------------------------------------
/ace-projects/TEST_SERVER/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | TEST_SERVER
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-config/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | sample-ace-config
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/demo-pre-reqs/kafka/kafka-connection.yaml:
--------------------------------------------------------------------------------
1 | kind: Secret
2 | apiVersion: v1
3 | type: Opaque
4 | metadata:
5 | name: kafka-connection-info
6 | namespace: eventstreams
7 | stringData:
8 | bootstrap-server: event-backbone-kafka-bootstrap.eventstreams.svc:9093
9 | truststore-name: sample-truststore.jks
--------------------------------------------------------------------------------
/ace-projects/sample-ace-policies/policy.descriptor:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-data-sources/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | sample-ace-data-sources
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/ace-projects/TEST_SERVER/run/sample-ace-policies/policy.descriptor:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/ace-projects/TEST_SERVER/config/connectors/loopback/datasources.json:
--------------------------------------------------------------------------------
1 | {
2 | "store": {
3 | "name": "store",
4 | "connector": "postgresql",
5 | "protocol": "postgres",
6 | "host": "TEMPLATE_POSTGRES_HOST",
7 | "port": 1234,
8 | "database": "TEMPLATE_POSTGRES_DBNAME"
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-data-sources/datasources.json:
--------------------------------------------------------------------------------
1 | {
2 | "store": {
3 | "name": "store",
4 | "connector": "postgresql",
5 | "protocol": "postgres",
6 | "host": "TEMPLATE_POSTGRES_HOST",
7 | "port": TEMPLATE_POSTGRES_PORT,
8 | "database": "TEMPLATE_POSTGRES_DBNAME"
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-config/setdbparms.txt:
--------------------------------------------------------------------------------
1 | kafka::es-creds appconnect-kafka-user TEMPLATE_KAFKA_ES_PASSWORD
2 |
3 | truststore::es-truststore notused TEMPLATE_TRUSTSTORE_PASSWORD
4 |
5 | loopback::postgres-creds TEMPLATE_POSTGRES_USERNAME TEMPLATE_POSTGRES_PASSWORD
6 |
7 |
8 |
--------------------------------------------------------------------------------
/demo-pre-reqs/cp4i/platform-navigator.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: integration.ibm.com/v1beta1
2 | kind: PlatformNavigator
3 | metadata:
4 | name: navigator
5 | namespace: integration
6 | spec:
7 | license:
8 | accept: true
9 | license: L-RJON-CD3JKX
10 | mqDashboard: true
11 | replicas: 1
12 | storage:
13 | class: ibmc-file-gold-gid
14 | version: 2022.2.1
15 |
--------------------------------------------------------------------------------
/ace-projects/simple-demo_Test/testproject.descriptor:
--------------------------------------------------------------------------------
1 | simple-demo
--------------------------------------------------------------------------------
/demo-pre-reqs/kafka/topic.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: eventstreams.ibm.com/v1beta2
2 | kind: KafkaTopic
3 | metadata:
4 | labels:
5 | eventstreams.ibm.com/cluster: event-backbone
6 | name: todo.updates
7 | namespace: eventstreams
8 | spec:
9 | config:
10 | min.insync.replicas: '1'
11 | retention.ms: '604800000'
12 | partitions: 1
13 | replicas: 1
14 | topicName: TODO.UPDATES
15 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application_Test/testproject.descriptor:
--------------------------------------------------------------------------------
1 | sample-ace-application
--------------------------------------------------------------------------------
/demo-pre-reqs/ibm-catalog-source.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: operators.coreos.com/v1alpha1
2 | kind: CatalogSource
3 | metadata:
4 | name: ibm-operator-catalog
5 | namespace: openshift-marketplace
6 | spec:
7 | displayName: ibm-operator-catalog
8 | publisher: IBM Content
9 | sourceType: grpc
10 | image: icr.io/cpopen/ibm-operator-catalog
11 | updateStrategy:
12 | registryPoll:
13 | interval: 45m
14 |
--------------------------------------------------------------------------------
/tekton/permissions/rolebinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: pipeline-deployer-aceflows-rolebinding
5 | subjects:
6 | - kind: ServiceAccount
7 | name: pipeline-deployer-serviceaccount
8 | namespace: pipeline-ace
9 | roleRef:
10 | apiGroup: rbac.authorization.k8s.io
11 | kind: ClusterRole
12 | name: pipeline-deployer-aceflows-role
13 |
--------------------------------------------------------------------------------
/ace-projects/simple-demo_Test/.classpath:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application_Test/.classpath:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/demo-pre-reqs/operators/postgresql.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: operators.coreos.com/v1alpha1
2 | kind: Subscription
3 | metadata:
4 | labels:
5 | operators.coreos.com/postgresql.openshift-operators: ''
6 | name: postgresql
7 | namespace: openshift-operators
8 | spec:
9 | channel: v5
10 | installPlanApproval: Automatic
11 | name: postgresql
12 | source: community-operators
13 | sourceNamespace: openshift-marketplace
14 | startingCSV: postgresoperator.v5.2.0
15 |
--------------------------------------------------------------------------------
/demo-pre-reqs/operators/ibm-appconnect.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: operators.coreos.com/v1alpha1
2 | kind: Subscription
3 | metadata:
4 | labels:
5 | operators.coreos.com/ibm-appconnect.openshift-operators: ''
6 | name: ibm-appconnect
7 | namespace: openshift-operators
8 | spec:
9 | channel: v6.0
10 | installPlanApproval: Automatic
11 | name: ibm-appconnect
12 | source: ibm-operator-catalog
13 | sourceNamespace: openshift-marketplace
14 | startingCSV: ibm-appconnect.v6.0.0
15 |
--------------------------------------------------------------------------------
/demo-pre-reqs/operators/ibm-eventstreams.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: operators.coreos.com/v1alpha1
2 | kind: Subscription
3 | metadata:
4 | labels:
5 | operators.coreos.com/ibm-eventstreams.openshift-operators: ''
6 | name: ibm-eventstreams
7 | namespace: openshift-operators
8 | spec:
9 | channel: v3.0
10 | installPlanApproval: Automatic
11 | name: ibm-eventstreams
12 | source: ibm-operator-catalog
13 | sourceNamespace: openshift-marketplace
14 | startingCSV: ibm-eventstreams.v3.0.5
15 |
--------------------------------------------------------------------------------
/demo-pre-reqs/operators/tekton.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: operators.coreos.com/v1alpha1
2 | kind: Subscription
3 | metadata:
4 | labels:
5 | operators.coreos.com/openshift-pipelines-operator-rh.openshift-operators: ''
6 | name: openshift-pipelines-operator-rh
7 | namespace: openshift-operators
8 | spec:
9 | channel: pipelines-1.8
10 | installPlanApproval: Automatic
11 | name: openshift-pipelines-operator-rh
12 | source: redhat-operators
13 | sourceNamespace: openshift-marketplace
14 | startingCSV: openshift-pipelines-operator-rh.v1.8.0
15 |
--------------------------------------------------------------------------------
/demo-pre-reqs/operators/ibm-integration.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: operators.coreos.com/v1alpha1
2 | kind: Subscription
3 | metadata:
4 | labels:
5 | operators.coreos.com/ibm-integration-platform-navigator.openshift-operators: ''
6 | name: ibm-integration-platform-navigator
7 | namespace: openshift-operators
8 | spec:
9 | channel: v6.0
10 | installPlanApproval: Automatic
11 | name: ibm-integration-platform-navigator
12 | source: ibm-operator-catalog
13 | sourceNamespace: openshift-marketplace
14 | startingCSV: ibm-integration-platform-navigator.v6.0.3
15 |
--------------------------------------------------------------------------------
/demo-pre-reqs/postgresql/db-data.yaml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: pg-initial-data-cm
5 | namespace: postgresql
6 | data:
7 | setup_data.sql: |
8 | /* switch database */
9 | \c store
10 | ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO pgadmin;
11 | set schema 'public';
12 |
13 | CREATE TABLE todos (
14 | id INTEGER PRIMARY KEY,
15 | user_id INTEGER NOT NULL,
16 | title varchar(250) NOT NULL,
17 | encoded_title varchar(500) NOT NULL,
18 | is_completed boolean NOT NULL
19 | );
20 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application-java/.classpath:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/ace-projects/TEST_SERVER/overrides/server.conf.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | RestAdminListener:
3 | port: 7600
4 | ipcSocket: '/tmp/ace-test-placeholder.uds'
5 | ResourceManagers:
6 | HTTPConnector:
7 | ListenerPort: 7800
8 | JVM:
9 | jvmDebugPort: 9997
10 |
11 | EnvironmentVariables:
12 | # environment variable to add to the integration server when run
13 | # used in the Java class uk.co.dalelane.appconnect.demo.Base64EncodeTodoDescription
14 | TODO_TITLE_PREFIX: 'ENCODED:'
15 |
16 | Credentials:
17 | ServerCredentials:
18 | loopback:
19 | postgres-creds:
20 | username: 'dummy'
21 | password: 'dummy'
22 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-policies/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | sample-ace-policies
4 |
5 |
6 |
7 |
8 |
9 | com.ibm.etools.mft.policy.ui.builder.PolicyBuilder
10 |
11 |
12 |
13 |
14 | com.ibm.etools.mft.policy.ui.policybuilder
15 |
16 |
17 |
18 |
19 |
20 | com.ibm.etools.mft.policy.ui.Nature
21 |
22 |
23 |
--------------------------------------------------------------------------------
/1-deploy-complex-integration-server.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # exit when any command fails
4 | set -e
5 |
6 | # Allow this script to be run from other locations,
7 | # despite the relative file paths
8 | if [[ $BASH_SOURCE = */* ]]; then
9 | cd -- "${BASH_SOURCE%/*}/" || exit
10 | fi
11 |
12 | # Common setup
13 | source 0-setup.sh
14 |
15 |
16 | print_bold "running the pipeline"
17 | PIPELINE_RUN_K8S_NAME=$(oc create -n pipeline-ace -f ./complex-pipelinerun.yaml -o name)
18 | echo $PIPELINE_RUN_K8S_NAME
19 | PIPELINE_RUN_NAME=${PIPELINE_RUN_K8S_NAME:23}
20 |
21 | print_bold "tailing pipeline logs"
22 | tkn pipelinerun logs -n pipeline-ace --follow $PIPELINE_RUN_NAME
23 |
24 | print_bold "pipeline complete"
25 |
--------------------------------------------------------------------------------
/1-deploy-simple-integration-server.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # exit when any command fails
4 | set -e
5 |
6 | # Allow this script to be run from other locations,
7 | # despite the relative file paths
8 | if [[ $BASH_SOURCE = */* ]]; then
9 | cd -- "${BASH_SOURCE%/*}/" || exit
10 | fi
11 |
12 | # Common setup
13 | source 0-setup.sh
14 |
15 |
16 | print_bold "running the pipeline"
17 | PIPELINE_RUN_K8S_NAME=$(oc create -n pipeline-ace -f ./simple-pipelinerun.yaml -o name)
18 | echo $PIPELINE_RUN_K8S_NAME
19 | PIPELINE_RUN_NAME=${PIPELINE_RUN_K8S_NAME:23}
20 |
21 | print_bold "tailing pipeline logs"
22 | tkn pipelinerun logs -n pipeline-ace --follow $PIPELINE_RUN_NAME
23 |
24 | print_bold "pipeline complete"
25 |
--------------------------------------------------------------------------------
/demo-pre-reqs/appconnect/dashboard.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: appconnect.ibm.com/v1beta1
2 | kind: Dashboard
3 | metadata:
4 | name: ace-dashboard
5 | namespace: ace-demo
6 | spec:
7 | license:
8 | accept: true
9 | license: L-KSBM-CJ2KWU
10 | use: CloudPakForIntegrationProduction
11 | pod:
12 | containers:
13 | content-server:
14 | resources:
15 | limits:
16 | cpu: 250m
17 | control-ui:
18 | resources:
19 | limits:
20 | cpu: 250m
21 | memory: 250Mi
22 | useCommonServices: true
23 | version: '12.0'
24 | storage:
25 | class: 'ibmc-file-silver-gid'
26 | size: 5Gi
27 | type: persistent-claim
28 | replicas: 1
29 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application-java/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | sample-ace-application-java
4 |
5 |
6 |
7 |
8 |
9 | org.eclipse.jdt.core.javabuilder
10 |
11 |
12 |
13 |
14 | com.ibm.etools.mft.jcn.jcnbuilder
15 |
16 |
17 |
18 |
19 | com.ibm.etools.mft.bar.ext.barbuilder
20 |
21 |
22 |
23 |
24 |
25 | org.eclipse.jdt.core.javanature
26 | com.ibm.etools.mft.jcn.jcnnature
27 | com.ibm.etools.mft.bar.ext.barnature
28 |
29 |
30 |
--------------------------------------------------------------------------------
/ace-projects/TEST_SERVER/run/sample-ace-policies/eventstreams.policyxml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | TEMPLATE_BOOTSTRAP_SERVER
5 | PLAINTEXT
6 |
7 | TLSv1.2
8 |
9 | org.apache.kafka.common.security.scram.ScramLoginModule required;
10 |
11 |
12 |
13 |
14 |
15 | JKS
16 |
17 | false
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Dale Lane
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/ace-projects/sample-ace-data-sources/store/todos.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "todos",
3 | "options": {
4 | "idInjection": false,
5 | "postgresql": {
6 | "table": "todos"
7 | }
8 | },
9 | "properties": {
10 | "id": {
11 | "id": true,
12 | "type": "number",
13 | "required": true
14 | },
15 | "userId": {
16 | "type": "number",
17 | "required": true,
18 | "postgresql": {
19 | "columnName": "user_id"
20 | }
21 | },
22 | "title": {
23 | "type": "string",
24 | "required": true
25 | },
26 | "encodedTitle": {
27 | "type": "string",
28 | "required": false,
29 | "postgresql": {
30 | "columnName": "encoded_title"
31 | }
32 | },
33 | "completed": {
34 | "type": "boolean",
35 | "required": true,
36 | "postgresql": {
37 | "columnName": "is_completed"
38 | }
39 | }
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/ace-projects/TEST_SERVER/config/connectors/loopback/store/todos.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "todos",
3 | "options": {
4 | "idInjection": false,
5 | "postgresql": {
6 | "table": "todos"
7 | }
8 | },
9 | "properties": {
10 | "id": {
11 | "id": true,
12 | "type": "number",
13 | "required": true
14 | },
15 | "userId": {
16 | "type": "number",
17 | "required": true,
18 | "postgresql": {
19 | "columnName": "user_id"
20 | }
21 | },
22 | "title": {
23 | "type": "string",
24 | "required": true
25 | },
26 | "encodedTitle": {
27 | "type": "string",
28 | "required": false,
29 | "postgresql": {
30 | "columnName": "encoded_title"
31 | }
32 | },
33 | "completed": {
34 | "type": "boolean",
35 | "required": true,
36 | "postgresql": {
37 | "columnName": "is_completed"
38 | }
39 | }
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-policies/eventstreams.policyxml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | TEMPLATE_BOOTSTRAP_SERVER
5 | SASL_SSL
6 | SCRAM-SHA-512
7 | TLSv1.2
8 | es-creds
9 | org.apache.kafka.common.security.scram.ScramLoginModule required;
10 |
11 |
12 |
13 |
14 | /home/aceuser/truststores/TEMPLATE_TRUSTSTORE_FILE
15 | JKS
16 | es-truststore
17 | false
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application/todo-update.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://json-schema.org/draft/2019-09/schema",
3 | "$id": "https://jsonplaceholder.typicode.com/todos/update.json",
4 | "type": "object",
5 | "title": "To-do update notification",
6 | "description": "A notification that a to-do item has been modified",
7 | "required": [
8 | "id"
9 | ],
10 | "properties": {
11 | "id": {
12 | "title": "todo id",
13 | "description": "The unique identifier for a to-do item that has been modified",
14 | "type": "integer",
15 | "examples": [
16 | 5
17 | ]
18 | },
19 | "message": {
20 | "title": "notification description",
21 | "description": "A free text description with background information about the modification",
22 | "type": "string",
23 | "default": "",
24 | "examples": [
25 | "set the completed state back to 'false' because it turns out I hadn't finished it after all"
26 | ]
27 | }
28 | },
29 | "examples": [
30 | {
31 | "id": 1,
32 | "message": "deleted this to-do item as it isn't needed any more"
33 | },
34 | {
35 | "id": 2,
36 | "message": "set completed to false as additional work is needed"
37 | },
38 | {
39 | "id": 3
40 | }
41 | ]
42 | }
--------------------------------------------------------------------------------
/ace-projects/simple-demo_Test/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | simple-demo_Test
4 |
5 |
6 | simple-demo
7 |
8 |
9 |
10 | org.eclipse.jdt.core.javabuilder
11 |
12 |
13 |
14 |
15 | com.ibm.etools.mft.bar.ext.barbuilder
16 |
17 |
18 |
19 |
20 | com.ibm.etools.mft.jcn.jcnbuilder
21 |
22 |
23 |
24 |
25 | com.ibm.etools.mft.msg.assembly.messageAssemblyBuilder
26 |
27 |
28 |
29 |
30 | com.ibm.etools.mft.applib.applibbuilder
31 |
32 |
33 |
34 |
35 |
36 | org.eclipse.jdt.core.javanature
37 | com.ibm.etools.mft.jcn.jcnnature
38 | com.ibm.etools.mft.bar.ext.barnature
39 | com.ibm.etools.msgbroker.tooling.testProjectNature
40 |
41 |
42 |
--------------------------------------------------------------------------------
/9-cleanup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # exit when any command fails
4 | set -e
5 |
6 | function print_bold {
7 | echo -e "\033[1m> ---------------------------------------------------------------\033[0m"
8 | echo -e "\033[1m> $1\033[0m"
9 | echo -e "\033[1m> ---------------------------------------------------------------\033[0m"
10 | }
11 |
12 |
13 | print_bold "removing github credentials"
14 | oc delete -n pipeline-ace --ignore-not-found=true -f ./github-credentials.yaml
15 |
16 | print_bold "removing docker credentials"
17 | oc delete -n pipeline-ace --ignore-not-found=true -f ./ibm-entitlement-key.yaml
18 |
19 | print_bold "removing image builder permissions"
20 | oc adm policy remove-scc-from-user privileged -z pipeline-deployer-serviceaccount -n pipeline-ace
21 |
22 | print_bold "removing deploy pipeline resources"
23 | oc delete -n pipeline-ace -l tekton.dev/pipeline=pipeline-ace-integration-server pipelineruns
24 | oc delete -n pipeline-ace --ignore-not-found=true -f ./tekton/pipeline.yaml
25 | oc delete -n pipeline-ace --ignore-not-found=true -f ./tekton/tasks
26 | oc delete -n pipeline-ace --ignore-not-found=true -f ./tekton/permissions
27 |
28 | print_bold "removing pipeline namespace"
29 | oc delete namespace --ignore-not-found=true pipeline-ace
30 |
31 | print_bold "pipeline removed"
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application_Test/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | sample-ace-application_Test
4 |
5 |
6 | sample-ace-application
7 |
8 |
9 |
10 | org.eclipse.jdt.core.javabuilder
11 |
12 |
13 |
14 |
15 | com.ibm.etools.mft.bar.ext.barbuilder
16 |
17 |
18 |
19 |
20 | com.ibm.etools.mft.jcn.jcnbuilder
21 |
22 |
23 |
24 |
25 | com.ibm.etools.mft.msg.assembly.messageAssemblyBuilder
26 |
27 |
28 |
29 |
30 | com.ibm.etools.mft.applib.applibbuilder
31 |
32 |
33 |
34 |
35 |
36 | org.eclipse.jdt.core.javanature
37 | com.ibm.etools.mft.jcn.jcnnature
38 | com.ibm.etools.mft.bar.ext.barnature
39 | com.ibm.etools.msgbroker.tooling.testProjectNature
40 |
41 |
42 |
--------------------------------------------------------------------------------
/demo-pre-reqs/kafka/ace-creds.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: eventstreams.ibm.com/v1beta2
2 | kind: KafkaUser
3 | metadata:
4 | labels:
5 | app.kubernetes.io/instance: event-backbone
6 | eventstreams.ibm.com/cluster: event-backbone
7 | name: appconnect-kafka-user
8 | namespace: eventstreams
9 | spec:
10 | authentication:
11 | type: scram-sha-512
12 | authorization:
13 | acls:
14 | - host: '*'
15 | operation: Read
16 | resource:
17 | name: '*'
18 | patternType: literal
19 | type: topic
20 | - host: '*'
21 | operation: Read
22 | resource:
23 | name: __schema_
24 | patternType: prefix
25 | type: topic
26 | - host: '*'
27 | operation: Write
28 | resource:
29 | name: '*'
30 | patternType: literal
31 | type: topic
32 | - host: '*'
33 | operation: Create
34 | resource:
35 | name: '*'
36 | patternType: literal
37 | type: topic
38 | - host: '*'
39 | operation: Alter
40 | resource:
41 | name: __schema_
42 | patternType: prefix
43 | type: topic
44 | - host: '*'
45 | operation: Read
46 | resource:
47 | name: '*'
48 | patternType: literal
49 | type: group
50 | type: simple
51 |
--------------------------------------------------------------------------------
/ace-projects/simple-demo_Test/simple-demo_Test.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/tekton/tasks/convert-p12-to-jks.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: convert-p12-to-jks
5 | spec:
6 | params:
7 | - name: p12-file
8 | description: pkcs12 file to convert
9 | - name: password
10 | description: password for the p12 file and to use for the JKS file to create
11 |
12 |
13 | workspaces:
14 | - name: output
15 | description: workspace to write the jks to
16 |
17 |
18 | results:
19 | - name: file
20 | description: location of JKS file created
21 | - name: password
22 | description: password of JKS file created
23 |
24 |
25 | steps:
26 | - name: run-java
27 | image: openjdk:11
28 | script: |
29 | #!/bin/sh
30 |
31 | set -e
32 |
33 | echo "location of pkcs12 file to convert"
34 | ls -l $(params.p12-file)
35 |
36 | echo "location of jks file to create"
37 | JKS_FILE="$(params.p12-file).jks"
38 | echo $JKS_FILE
39 |
40 | echo "password to use"
41 | echo "$(params.password)"
42 |
43 | keytool -importkeystore \
44 | -srckeystore $(params.p12-file) -srcstoretype pkcs12 \
45 | -srcstorepass $(params.password) \
46 | -destkeystore $JKS_FILE -deststoretype jks \
47 | -deststorepass $(params.password)
48 |
49 | echo "writing results for later tasks to reuse"
50 | echo -n "$JKS_FILE" > $(results.file.path)
51 | echo -n "$(params.password)" > $(results.password.path)
52 |
--------------------------------------------------------------------------------
/demo-pre-reqs/postgresql/database.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: postgres-operator.crunchydata.com/v1beta1
2 | kind: PostgresCluster
3 | metadata:
4 | name: store
5 | namespace: postgresql
6 | spec:
7 | port: 5432
8 | databaseInitSQL:
9 | name: pg-initial-data-cm
10 | key: setup_data.sql
11 | proxy:
12 | pgBouncer:
13 | port: 5432
14 | replicas: 1
15 | backups:
16 | pgbackrest:
17 | repos:
18 | - name: repo1
19 | volume:
20 | volumeClaimSpec:
21 | accessModes:
22 | - "ReadWriteOnce"
23 | resources:
24 | requests:
25 | storage: 1Gi
26 | openshift: true
27 | patroni:
28 | leaderLeaseDurationSeconds: 30
29 | port: 8008
30 | syncPeriodSeconds: 10
31 | dynamicConfiguration:
32 | postgresql:
33 | pg_hba:
34 | - "hostnossl all all all md5"
35 | - "host all all all md5"
36 | parameters:
37 | shared_preload_libraries: timescaledb
38 | instances:
39 | - dataVolumeClaimSpec:
40 | accessModes:
41 | - ReadWriteOnce
42 | resources:
43 | requests:
44 | storage: 1Gi
45 | replicas: 1
46 | postgresVersion: 13
47 | users:
48 | #
49 | # credentials for use by ACE
50 | - name: cp4i
51 | databases:
52 | - store
53 | options: SUPERUSER
54 | #
55 | # credentials for interactive use
56 | # (e.g. when using PgAdmin)
57 | - name: pgadmin
58 | databases:
59 | - store
60 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application_Test/sample-ace-application_Test.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/demo-pre-reqs/kafka/eventstreams.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: eventstreams.ibm.com/v1beta2
2 | kind: EventStreams
3 | metadata:
4 | name: event-backbone
5 | namespace: eventstreams
6 | spec:
7 | adminApi: {}
8 | adminUI: {}
9 | apicurioRegistry: {}
10 | license:
11 | accept: true
12 | use: CloudPakForIntegrationNonProduction
13 | requestIbmServices:
14 | iam: true
15 | monitoring: true
16 | restProducer: {}
17 | strimziOverrides:
18 | entityOperator:
19 | topicOperator: {}
20 | kafka:
21 | authorization:
22 | authorizerClass: com.ibm.eventstreams.runas.authorizer.RunAsAuthorizer
23 | supportsAdminApi: true
24 | type: custom
25 | config:
26 | num.network.threads: 3
27 | inter.broker.protocol.version: '3.2'
28 | log.cleaner.threads: 6
29 | num.io.threads: 24
30 | num.replica.fetchers: 1
31 | min.insync.replicas: 1
32 | log.message.format.version: '3.2'
33 | offsets.topic.replication.factor: 1
34 | default.replication.factor: 1
35 | listeners:
36 | - authentication:
37 | type: scram-sha-512
38 | name: external
39 | port: 9094
40 | tls: true
41 | type: route
42 | - authentication:
43 | type: scram-sha-512
44 | name: tls
45 | port: 9093
46 | tls: true
47 | type: internal
48 | replicas: 1
49 | storage:
50 | type: ephemeral
51 | zookeeper:
52 | replicas: 1
53 | storage:
54 | type: ephemeral
55 | version: 11.0.4
56 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application/todo-item.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://json-schema.org/draft/2019-09/schema",
3 | "$id": "https://jsonplaceholder.typicode.com/todos/item.json",
4 | "type": "object",
5 | "title": "To-do item",
6 | "description": "Schema for stored todo items, based on the typicode REST API",
7 | "required": [
8 | "userId",
9 | "id",
10 | "title",
11 | "completed"
12 | ],
13 | "properties": {
14 | "userId": {
15 | "type": "integer",
16 | "title": "user id",
17 | "description": "The unique identifier for the user who owns the to-do item",
18 | "examples": [
19 | 5
20 | ]
21 | },
22 | "id": {
23 | "type": "integer",
24 | "title": "todo item id",
25 | "description": "The unique identifier for the to-do item",
26 | "examples": [
27 | 100
28 | ]
29 | },
30 | "title": {
31 | "type": "string",
32 | "title": "todo item text",
33 | "description": "A free-text description of the to-do item",
34 | "examples": [
35 | "excepturi a et neque qui expedita vel voluptate"
36 | ]
37 | },
38 | "completed": {
39 | "type": "boolean",
40 | "default": false,
41 | "title": "is the todo item completed",
42 | "description": "True if the to-do item has been completed, false if it is still pending",
43 | "examples": [
44 | true,
45 | false
46 | ]
47 | }
48 | },
49 | "examples": [
50 | {
51 | "userId": 1,
52 | "id": 1,
53 | "title": "delectus aut autem",
54 | "completed": false
55 | },
56 | {
57 | "userId": 3,
58 | "id": 50,
59 | "title": "cupiditate necessitatibus ullam aut quis dolor voluptate",
60 | "completed": true
61 | },
62 | {
63 | "userId": 5,
64 | "id": 100,
65 | "title": "excepturi a et neque qui expedita vel voluptate",
66 | "completed": false
67 | }
68 | ]
69 | }
--------------------------------------------------------------------------------
/tekton/permissions/deployer.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: pipeline-deployer-aceflows-role
5 | rules:
6 | # ########################################################
7 | # permissions for creating ACE resources
8 | # ########################################################
9 |
10 | - apiGroups:
11 | - appconnect.ibm.com
12 | resources:
13 | - configurations
14 | - integrationservers
15 | verbs:
16 | - create
17 | - patch
18 | - update
19 | - get
20 |
21 | # ########################################################
22 | # permissions for creating the custom Docker image with
23 | # the ACE bar baked into it
24 | # ########################################################
25 |
26 | - apiGroups:
27 | - image.openshift.io
28 | resources:
29 | - imagestreams
30 | verbs:
31 | - create
32 | - update
33 | - patch
34 | - get
35 | - apiGroups:
36 | - build.openshift.io
37 | resources:
38 | - builds
39 | verbs:
40 | - list
41 | - get
42 | - watch
43 | - apiGroups:
44 | - build.openshift.io
45 | resources:
46 | - buildconfigs
47 | verbs:
48 | - create
49 | - update
50 | - patch
51 | - get
52 | - apiGroups:
53 | - build.openshift.io
54 | resources:
55 | - buildconfigs
56 | - buildconfigs/instantiatebinary
57 | verbs:
58 | - get
59 | - list
60 | - create
61 |
62 | # ########################################################
63 | # permissions for reading the secrets that contain
64 | # values for updating templates
65 | # ########################################################
66 |
67 | - apiGroups:
68 | - ""
69 | resources:
70 | - secrets
71 | verbs:
72 | - get
73 |
--------------------------------------------------------------------------------
/tekton/tasks/read-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: read-secret
5 | spec:
6 | params:
7 | - name: name
8 | description: the name of the secret to download
9 | - name: namespace
10 | description: the namespace that the secret is in
11 | - name: key
12 | description: which value from the secret to download
13 |
14 |
15 | workspaces:
16 | - name: output
17 | description: workspace to write the secret to
18 |
19 |
20 | results:
21 | - name: file
22 | description: location where the file where the secret contents is written to
23 | - name: value
24 | description: contents of the secret
25 |
26 |
27 | steps:
28 | - name: run
29 | image: image-registry.openshift-image-registry.svc:5000/openshift/tools
30 | script: |
31 | #!/bin/sh
32 |
33 | set -e
34 |
35 | echo "preparing folder to store secrets in"
36 | SECRETS_FOLDER="/workspace/output/downloaded-secrets"
37 | mkdir -p $SECRETS_FOLDER
38 | echo $SECRETS_FOLDER
39 | ls -l $SECRETS_FOLDER
40 |
41 | echo "preparing name to store secret value in"
42 | SECRET_FILE="$SECRETS_FOLDER/$(params.namespace)-$(params.name)-$(params.key)"
43 |
44 | echo "retrieving value from the secret"
45 | ESCAPED_KEY=$(echo $(params.key) | sed s/\\./'\\'./)
46 | oc get secret -n $(params.namespace) $(params.name) -o jsonpath="{.data.$ESCAPED_KEY}" | base64 -d > $SECRET_FILE
47 | ls -l $SECRETS_FOLDER
48 |
49 | echo "writing results"
50 | echo -n $SECRET_FILE > $(results.file.path)
51 |
52 | if (( $(stat -c%s $SECRET_FILE) > 800 ))
53 | then
54 | echo "file contents too large to include in task results"
55 | else
56 | cat $SECRET_FILE > $(results.value.path)
57 | fi
58 |
--------------------------------------------------------------------------------
/ace-projects/simple-demo_Test/src/main/java/test/Simple_demo_say_hello_world_0001_Test.java:
--------------------------------------------------------------------------------
1 | package test;
2 |
3 | import org.hamcrest.Matchers;
4 | import org.junit.jupiter.api.AfterEach;
5 | import org.junit.jupiter.api.Test;
6 |
7 | import com.ibm.integration.test.v1.NodeSpy;
8 | import com.ibm.integration.test.v1.SpyObjectReference;
9 | import com.ibm.integration.test.v1.TestMessageAssembly;
10 | import com.ibm.integration.test.v1.TestSetup;
11 | import com.ibm.integration.test.v1.exception.TestException;
12 |
13 | import static com.ibm.integration.test.v1.Matchers.*;
14 | import static org.hamcrest.MatcherAssert.assertThat;
15 | import static org.junit.jupiter.api.Assertions.assertEquals;
16 |
17 | public class Simple_demo_say_hello_world_0001_Test {
18 |
19 | /*
20 | * Simple_demo_say_hello_world_0001_Test Test generated by IBM App Connect
21 | * Enterprise Toolkit 12.0.6.0 on Oct 26, 2022 8:45:58 AM
22 | */
23 |
24 | @AfterEach
25 | public void cleanupTest() throws TestException {
26 | // Ensure any mocks created by a test are cleared after the test runs
27 | TestSetup.restoreAllMocks();
28 | }
29 |
30 | @Test
31 | public void simple_demo_say_hello_world_TestCase_001() throws TestException {
32 |
33 | // Define the SpyObjectReference
34 | SpyObjectReference nodeReference = new SpyObjectReference().application("simple-demo").messageFlow("say_hello")
35 | .node("world");
36 |
37 | // Initialise a NodeSpy
38 | NodeSpy nodeSpy = new NodeSpy(nodeReference);
39 |
40 | // Create a blank message assembly
41 | TestMessageAssembly inputMessageAssembly = new TestMessageAssembly();
42 |
43 | // Call the message flow node with the blank Message Assembly
44 | nodeSpy.evaluate(inputMessageAssembly, true, "in");
45 |
46 | /* Compare Output Message 1 at output terminal out */
47 | TestMessageAssembly actualMessageAssembly = nodeSpy.propagatedMessageAssembly("out", 1);
48 |
49 | // Make sure the JSON still exists
50 | assertEquals("world", actualMessageAssembly.messagePath("JSON.Data.hello").getStringValue());
51 | }
52 |
53 | }
54 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application/get_id_from_update.map:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/simple-pipelinerun.yaml:
--------------------------------------------------------------------------------
1 | #
2 | # Sample pipelinerun for deploying an App Connect Enterprise integration server
3 | #
4 | # You need to modify this with details of your own App Connect application.
5 | #
6 | # This sample shows how to deploy a simple stand-alone App Connect application
7 |
8 | apiVersion: tekton.dev/v1beta1
9 | kind: PipelineRun
10 | metadata:
11 | generateName: ace-deploy-
12 | spec:
13 | serviceAccountName: pipeline-deployer-serviceaccount
14 | params:
15 | # ########################################################
16 | # where to run the App Connect application
17 | # ########################################################
18 | # what to call the IntegrationServer that runs your application
19 | - name: integration-server-name
20 | value: "hello-world"
21 | # where to run your application
22 | - name: ace-namespace
23 | value: "ace-demo"
24 |
25 | # ########################################################
26 | # where to find the projects to create
27 | # ########################################################
28 | # location of the git repository with your application
29 | - name: git-repository
30 | value: "https://github.com/dalelane/app-connect-tekton-pipeline"
31 |
32 | # name of the App Connect project to deploy
33 | # This should be the name of a folder in ace-projects
34 | - name: ace-project-name
35 | value: "simple-demo"
36 |
37 | # name of an App Connect test project
38 | # This is used to verify the bar before the new
39 | # Integration Server is deployed
40 | # This should be the name of a folder in ace-projects
41 | - name: test-project-name
42 | value: "simple-demo_Test"
43 |
44 |
45 | pipelineRef:
46 | name: pipeline-ace-integration-server
47 |
48 |
49 |
50 | workspaces:
51 | - name: pipeline-shared-workspace
52 | volumeClaimTemplate:
53 | spec:
54 | storageClassName: ibmc-block-gold
55 | accessModes:
56 | - ReadWriteOnce
57 | resources:
58 | requests:
59 | storage: 100Mi
60 |
61 |
--------------------------------------------------------------------------------
/ace-projects/simple-demo/say_hello.msgflow:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application-java/uk/co/dalelane/appconnect/demo/Base64EncodeTodoDescription.java:
--------------------------------------------------------------------------------
1 | package uk.co.dalelane.appconnect.demo;
2 |
3 | import java.util.Base64;
4 |
5 | import com.ibm.broker.javacompute.MbJavaComputeNode;
6 | import com.ibm.broker.plugin.MbElement;
7 | import com.ibm.broker.plugin.MbException;
8 | import com.ibm.broker.plugin.MbMessage;
9 | import com.ibm.broker.plugin.MbMessageAssembly;
10 | import com.ibm.broker.plugin.MbOutputTerminal;
11 | import com.ibm.broker.plugin.MbUserException;
12 |
13 | public class Base64EncodeTodoDescription extends MbJavaComputeNode {
14 |
15 | public void evaluate(MbMessageAssembly inAssembly) throws MbException {
16 | MbMessage inMessage = inAssembly.getMessage();
17 | MbMessageAssembly outAssembly = null;
18 | try {
19 | MbMessage outMessage = new MbMessage(inMessage);
20 | MbElement outputRoot = outMessage.getRootElement();
21 |
22 | // get the todo list item title
23 | MbElement titleElement = outputRoot.getFirstElementByPath("JSON/Data/title");
24 | String title = titleElement.getValueAsString();
25 |
26 | // prefix the title with an environment variable
27 | // (just to demonstrate we have provided an env var)
28 | title = String.join(" ",
29 | System.getenv("TODO_TITLE_PREFIX"),
30 | title);
31 |
32 | // encode it
33 | byte[] encodedBytes = Base64.getEncoder().encode(title.getBytes());
34 | String encodedTitle = new String(encodedBytes);
35 |
36 | // add the encoded title to the output message
37 | titleElement.createElementAfter(MbElement.TYPE_NAME_VALUE, "encodedTitle", encodedTitle);
38 |
39 | // prepare the output message to return
40 | outAssembly = new MbMessageAssembly(inAssembly, outMessage);
41 | }
42 | catch (MbException e) {
43 | throw e;
44 | }
45 | catch (RuntimeException e) {
46 | throw e;
47 | }
48 | catch (Exception e) {
49 | throw new MbUserException(this, "evaluate()", "", "", e.toString(), null);
50 | }
51 |
52 | // write to the output terminal
53 | MbOutputTerminal out = getOutputTerminal("out");
54 | out.propagate(outAssembly);
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/0-setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # exit when any command fails
4 | set -e
5 |
6 | function print_bold {
7 | echo -e "\033[1m> ---------------------------------------------------------------\033[0m"
8 | echo -e "\033[1m> $1\033[0m"
9 | echo -e "\033[1m> ---------------------------------------------------------------\033[0m"
10 | }
11 |
12 | print_bold "checking for tekton CLI"
13 | if ! command -v tkn &> /dev/null
14 | then
15 | echo "ERROR! This script needs tkn. Install it using:"
16 | echo "ERROR! brew install tektoncd-cli"
17 | exit
18 | fi
19 | print_bold "checking for oc CLI"
20 | if ! command -v oc &> /dev/null
21 | then
22 | echo "ERROR! This script needs tkn. Install it using:"
23 | echo "ERROR! brew install openshift-cli"
24 | exit
25 | fi
26 | print_bold "checking for IBM entitlement key env var"
27 | if [[ -z "$IBM_ENTITLEMENT_KEY" ]]; then
28 | echo "You must set an IBM_ENTITLEMENT_KEY environment variable" 1>&2
29 | echo "This is needed to pull the container images used by the pipeline" 1>&2
30 | echo "Create your entitlement key at https://myibm.ibm.com/products-services/containerlibrary" 1>&2
31 | echo "Set it like this:" 1>&2
32 | echo " export IBM_ENTITLEMENT_KEY=..." 1>&2
33 | exit 1
34 | fi
35 |
36 | print_bold "creating namespace to run pipelines in"
37 | oc create namespace pipeline-ace --dry-run=client -o yaml | oc apply -f -
38 |
39 | print_bold "checking for github credentials for cloning the repo from a pipeline"
40 | if test -f "github-credentials.yaml"; then
41 | oc apply -n pipeline-ace -f ./github-credentials.yaml
42 | fi
43 |
44 | print_bold "storing docker credentials for pulling image for BAR file builder and tester"
45 | oc create secret docker-registry ibm-entitlement-key \
46 | --docker-username=cp \
47 | --docker-password=$IBM_ENTITLEMENT_KEY \
48 | --docker-server=cp.icr.io \
49 | --namespace=pipeline-ace --dry-run=client -o yaml | oc apply -f -
50 |
51 | print_bold "creating service account to run the pipelines as"
52 | oc apply -n pipeline-ace -f ./tekton/permissions/serviceaccount.yaml
53 |
54 | print_bold "setting up permissions for the deploy pipeline"
55 | oc apply -n pipeline-ace -f ./tekton/permissions
56 |
57 | print_bold "adding image builder permissions"
58 | oc adm policy add-scc-to-user privileged -z pipeline-deployer-serviceaccount -n pipeline-ace
59 |
60 | print_bold "creating tasks for the deployment pipeline"
61 | oc apply -n pipeline-ace -f ./tekton/tasks
62 |
63 | print_bold "creating deployment pipeline"
64 | oc apply -n pipeline-ace -f ./tekton/pipeline.yaml
65 |
--------------------------------------------------------------------------------
/tekton/tasks/create-data-source.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: ace-create-datasource
5 | spec:
6 | params:
7 | # --------------------------------------------------------------
8 | # where to create the configuration
9 | # --------------------------------------------------------------
10 | - name: datasources-name
11 | description: name to give the created data sources resource
12 | - name: ace-namespace
13 | description: namespace where the Configuration resource should be created
14 | # --------------------------------------------------------------
15 | # what to create
16 | # --------------------------------------------------------------
17 | - name: datasources-project-template
18 | description: folder containing the template data sources project
19 |
20 |
21 | workspaces:
22 | - name: output
23 | description: workspace with the data source project template
24 |
25 |
26 | results:
27 | - name: name
28 | description: name of the created configuration object
29 |
30 |
31 | steps:
32 | - name: run
33 | image: image-registry.openshift-image-registry.svc:5000/openshift/tools
34 | script: |
35 | #!/bin/sh
36 |
37 | set -e
38 |
39 | echo "getting zip executable from workspace"
40 | ZIP="/workspace/output/resources/rhel-zip"
41 |
42 | echo "identifying location of data sources template"
43 | DATASOURCES_FOLDER="/workspace/output/$(params.datasources-project-template)"
44 | echo $DATASOURCES_FOLDER
45 | ls -l $DATASOURCES_FOLDER
46 |
47 | echo "creating data sources zip file"
48 | cd "$DATASOURCES_FOLDER"
49 | $ZIP -r /tmp/datasources.zip *
50 | ls -l /tmp/
51 |
52 | echo "creating data sources Configuration spec"
53 | cat >datasources.yaml < $(results.name.path)
73 |
--------------------------------------------------------------------------------
/tekton/tasks/create-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: ace-create-configuration
5 | spec:
6 | params:
7 | # --------------------------------------------------------------
8 | # where to create the configuration
9 | # --------------------------------------------------------------
10 | - name: config-name
11 | description: name to give the created Configuration resource
12 | - name: ace-namespace
13 | description: namespace where the Configuration resource should be created
14 | # --------------------------------------------------------------
15 | # what to create
16 | # --------------------------------------------------------------
17 | - name: config-file
18 | description: file containing configuration to deploy
19 | - name: config-type
20 | description: type of configuration (e.g. setdbparms, serverconf, truststore etc.)
21 |
22 |
23 | workspaces:
24 | - name: output
25 | description: workspace with the policy project template
26 |
27 |
28 | results:
29 | - name: name
30 | description: name of the created configuration object
31 |
32 |
33 | steps:
34 | - name: run
35 | image: image-registry.openshift-image-registry.svc:5000/openshift/tools
36 | script: |
37 | #!/bin/sh
38 |
39 | set -e
40 |
41 | echo "ensuring that the config file location is absolute"
42 | PARENT_FOLDER="/workspace/output"
43 | if [[ "$(params.config-file)" == $PARENT_FOLDER* ]]
44 | then
45 | CONFIG_FILE=$(params.config-file)
46 | else
47 | CONFIG_FILE="$PARENT_FOLDER/$(params.config-file)"
48 | fi
49 | echo $CONFIG_FILE
50 |
51 | echo "confirming presence of configuration file"
52 | ls -l $CONFIG_FILE
53 |
54 | echo "creating Configuration spec"
55 | cat >/tmp/configspec.yaml < $(results.name.path)
75 |
--------------------------------------------------------------------------------
/tekton/tasks/create-policy-project.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: ace-create-policy-project
5 | spec:
6 | params:
7 | # --------------------------------------------------------------
8 | # where to create the configuration
9 | # --------------------------------------------------------------
10 | - name: policies-name
11 | description: name to give the created policy Configuration resource
12 | - name: ace-namespace
13 | description: namespace where the App Connect Operator is running
14 | # --------------------------------------------------------------
15 | # what to create
16 | # --------------------------------------------------------------
17 | - name: policies-project-folder
18 | description: folder containing the template policies project
19 |
20 |
21 | workspaces:
22 | - name: output
23 | description: workspace with the policy project template
24 |
25 |
26 | results:
27 | - name: name
28 | description: name of the created configuration object
29 |
30 |
31 | steps:
32 | - name: run
33 | image: image-registry.openshift-image-registry.svc:5000/openshift/tools
34 | script: |
35 | #!/bin/sh
36 |
37 | set -e
38 |
39 | echo "getting zip executable from workspace"
40 | ZIP="/workspace/output/resources/rhel-zip"
41 |
42 | echo "identifying location of policy project"
43 | POLICY_FOLDER="/workspace/output/$(params.policies-project-folder)"
44 | echo $POLICY_FOLDER
45 | ls -l $POLICY_FOLDER
46 |
47 | echo "creating policy project zip file"
48 | cd "$POLICY_FOLDER"
49 | POLICY_NAME="$(basename $(pwd))"
50 | cd ..
51 | $ZIP -r /tmp/policies.zip ./$POLICY_NAME
52 | ls -l /tmp/
53 |
54 | echo "creating policy Configuration spec"
55 | cat >/tmp/policiesproject.yaml < $(results.name.path)
75 |
--------------------------------------------------------------------------------
/tekton/tasks/run-tests.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: ace-run-tests
5 | spec:
6 |
7 | params:
8 | - name: bar-file-location
9 | description: location of the bar file to be tested
10 |
11 | - name: test-project-name
12 | description: name of the project containing the tests to run
13 |
14 | workspaces:
15 | - name: output
16 | description: workspace with the ACE resources in
17 |
18 | steps:
19 | - name: make-test-project-writable
20 | image: image-registry.openshift-image-registry.svc:5000/openshift/tools
21 | script: |
22 | #!/bin/bash
23 |
24 | set -e
25 |
26 | echo "making test project writable to allow compiling"
27 | chmod -R a+wrx "/workspace/output/ace-projects/$(params.test-project-name)"
28 |
29 |
30 | - name: run-tests
31 | # see https://www.ibm.com/docs/en/app-connect/12.0?topic=cacerid-building-sample-app-connect-enterprise-image-using-docker
32 | # for alternate images and versions that can be used for this
33 | image: cp.icr.io/cp/appc/ace:12.0.6.0-r1@sha256:003ca6615312b0987fca1c09d676a77fc2c77ecc8607dfaa49f2e8e1976a6a15
34 | securityContext:
35 | # run as the aceuser user id
36 | runAsUser: 1001
37 | runAsGroup: 1001
38 | runAsNonRoot: false
39 | env:
40 | - name: LICENSE
41 | value: accept
42 | script: |
43 | #!/bin/bash
44 |
45 | set -e
46 |
47 | echo "setting up environment"
48 | source /opt/ibm/ace-12/server/bin/mqsiprofile
49 |
50 | echo "verifying bar output location"
51 | BAR_FOLDER="/workspace/output/bars"
52 | ls -l $BAR_FOLDER
53 |
54 | echo "confirming bar file location"
55 | BAR_FILE="$BAR_FOLDER/integration.bar"
56 | echo $BAR_FILE
57 |
58 | echo "deciding test bar file location"
59 | TEST_BAR_FILE="$BAR_FOLDER/integration-tests.bar"
60 | echo $TEST_BAR_FILE
61 |
62 | echo "confirming workspace location"
63 | ACE_PROJECTS_WORKSPACE="/workspace/output/ace-projects"
64 | ls -l $ACE_PROJECTS_WORKSPACE
65 |
66 | echo "confirming template for the test server to run"
67 | TEST_SERVER_TEMPLATE="$ACE_PROJECTS_WORKSPACE/TEST_SERVER"
68 | ls -l $TEST_SERVER_TEMPLATE
69 |
70 | echo "creating test server"
71 | TEST_SERVER=$(mktemp -d)
72 | cp -rf $TEST_SERVER_TEMPLATE/* $TEST_SERVER/.
73 | echo $TEST_SERVER
74 | ls -l $TEST_SERVER
75 | cd $TEST_SERVER
76 |
77 | echo "checking for required loopback plugins"
78 | [ -d "config/connectors/loopback" ] && npm install loopback-connector-postgresql --save
79 |
80 | echo "creating bar with tests to be run"
81 | ibmint package \
82 | --input-path $ACE_PROJECTS_WORKSPACE \
83 | --project $(params.test-project-name) \
84 | --output-bar-file $TEST_BAR_FILE
85 |
86 | echo "checking test bar"
87 | ls -l $TEST_BAR_FILE
88 |
89 | echo "installing bars into test server"
90 | mqsibar --working-directory $TEST_SERVER \
91 | --bar-file $BAR_FILE
92 | mqsibar --working-directory $TEST_SERVER \
93 | --bar-file $TEST_BAR_FILE
94 |
95 | echo "running tests"
96 | IntegrationServer --work-dir $TEST_SERVER \
97 | --test-project $(params.test-project-name) \
98 | --start-msgflows false
99 |
--------------------------------------------------------------------------------
/tekton/tasks/create-bar.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: ace-create-bar-file
5 | spec:
6 |
7 | params:
8 | - name: ace-project-name
9 | description: name of the App Connect project to build the bar file for
10 |
11 | - name: java-project-name
12 | default: ""
13 | description: |+
14 | Name of a Java project with implementation of Java Compute nodes.
15 | Leave this blank if no Java project is needed
16 |
17 | results:
18 | - name: folder
19 | description: folder containing the created bar file
20 | - name: file
21 | description: name of the created bar file
22 |
23 | workspaces:
24 | - name: output
25 | description: workspace with the ACE resources in
26 |
27 | steps:
28 | - name: make-output-writable
29 | image: image-registry.openshift-image-registry.svc:5000/openshift/tools
30 | script: |
31 | #!/bin/bash
32 |
33 | set -e
34 |
35 | echo "preparing bar output location to be writable by aceuser"
36 | mkdir -p /workspace/output/bars
37 | chmod a+wrx /workspace/output/bars
38 |
39 | if [ -z "$(params.java-project-name)" ]
40 | then
41 | echo "No Java project to compile"
42 | else
43 | echo "making Java project writable to allow compiling"
44 | chmod -R a+wrx "/workspace/output/ace-projects/$(params.java-project-name)"
45 | fi
46 |
47 |
48 | - name: run-ace
49 | # see https://www.ibm.com/docs/en/app-connect/12.0?topic=cacerid-building-sample-app-connect-enterprise-image-using-docker
50 | # for alternate images and versions that can be used for this
51 | image: cp.icr.io/cp/appc/ace:12.0.6.0-r1@sha256:003ca6615312b0987fca1c09d676a77fc2c77ecc8607dfaa49f2e8e1976a6a15
52 | securityContext:
53 | # run as the aceuser user id
54 | runAsUser: 1001
55 | runAsGroup: 1001
56 | runAsNonRoot: false
57 | env:
58 | - name: LICENSE
59 | value: accept
60 | script: |
61 | #!/bin/bash
62 |
63 | set -e
64 |
65 | echo "building a bar file for use with App Connect"
66 |
67 | echo "setting up environment"
68 | source /opt/ibm/ace-12/server/bin/mqsiprofile
69 |
70 | echo "verifying bar output location"
71 | BAR_FOLDER="/workspace/output/bars"
72 | ls -l $BAR_FOLDER
73 |
74 | echo "confirming bar file location"
75 | BAR_FILE="$BAR_FOLDER/integration.bar"
76 | echo $BAR_FILE
77 |
78 | echo "confirming workspace location"
79 | ACE_PROJECTS_WORKSPACE="/workspace/output/ace-projects"
80 | ls -l $ACE_PROJECTS_WORKSPACE
81 |
82 | echo "checking Java project"
83 | if [ -n "$(params.java-project-name)" ]; then
84 | echo "$(params.java-project-name) needs to be built with the bar"
85 | JAVA_BUILD_OPTION="--project $(params.java-project-name)"
86 | else
87 | echo "no Java dependencies needed"
88 | JAVA_BUILD_OPTION=""
89 | fi
90 |
91 | echo "creating bar"
92 | ibmint package \
93 | --input-path $ACE_PROJECTS_WORKSPACE \
94 | --project $(params.ace-project-name) \
95 | --output-bar-file $BAR_FILE \
96 | $JAVA_BUILD_OPTION
97 |
98 | echo "checking bar"
99 | ls -l $BAR_FILE
100 |
101 | echo "writing results for later tasks to reuse"
102 | echo -n "$BAR_FOLDER" > $(results.folder.path)
103 | echo -n "integration.bar" > $(results.file.path)
104 |
--------------------------------------------------------------------------------
/tekton/tasks/update-templates.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: ace-update-templates-from-secrets
5 | spec:
6 | params:
7 | # files in the git repository that need to be updated
8 | #
9 | # Each string in the array should contain a space-separated list of
10 | # strings that looks like this:
11 | # file-to-update value-to-update key secret namespace
12 | #
13 | # where:
14 | # file-to-update - relative location of the template file to be updated
15 | # value-to-update - the value in the template file that should be replaced
16 | # key - which value to retrieve from a secret to replace the template value with
17 | # secret - name of the secret to retrieve value from
18 | # namespace - namespace of the secret to retrieve value from
19 | - name: credentials
20 | type: array
21 |
22 |
23 | workspaces:
24 | - name: output
25 | description: workspace with the templates to update
26 |
27 | steps:
28 | - name: run
29 | image: image-registry.openshift-image-registry.svc:5000/openshift/tools
30 |
31 | # expand the array of credentials provided from the pipeline
32 | args: ["$(params.credentials[*])"]
33 |
34 | script: |
35 | #!/bin/sh
36 |
37 | set -e
38 |
39 | echo "checking if there is anything to do"
40 | if [[ "$@" == "" ]]
41 | then
42 | echo "no credentials to update"
43 | exit
44 | fi
45 |
46 | echo "identifying location of App Connect projects"
47 | TEMPLATES_FOLDER="/workspace/output/ace-projects"
48 | echo $TEMPLATES_FOLDER
49 | ls -l $TEMPLATES_FOLDER
50 |
51 | echo "applying modifications to the templates"
52 | for credentialsinfo in "$@"
53 | do
54 | # split the line into an array
55 | creditems=($credentialsinfo)
56 |
57 | # name of the template file to modify
58 | TEMPLATE_FILE=${creditems[0]}
59 | # absolute location of the template file
60 | TEMPLATE="$TEMPLATES_FOLDER/$TEMPLATE_FILE"
61 |
62 | # the placeholder value we expect to see in the template file
63 | PLACEHOLDER=${creditems[1]}
64 |
65 | # which value in the secret to replace the placeholder value with
66 | KEY=${creditems[2]}
67 |
68 | # which secret to get the value from
69 | SECRET_NAME=${creditems[3]}
70 |
71 | # the namespace where the secret to get the value from is
72 | SECRET_NAMESPACE=${creditems[4]}
73 |
74 | # confirm the credentials have been read in correctly
75 | echo "--------------------------------------------------"
76 | echo "replacing:"
77 | echo " placeholder value $PLACEHOLDER "
78 | echo " in template file $TEMPLATE "
79 | echo " using the $KEY value "
80 | echo " from the secret $SECRET_NAME in namespace $SECRET_NAMESPACE"
81 | echo ""
82 |
83 | echo "retrieving value from the secret"
84 | ESCAPED_KEY=$(echo $KEY | sed s/\\./'\\'./)
85 | SECRET_VALUE=$(oc get secret -n $SECRET_NAMESPACE $SECRET_NAME -o jsonpath="{.data.$ESCAPED_KEY}" | base64 -d)
86 |
87 | echo "editing template"
88 | sed -i "s~$PLACEHOLDER~$SECRET_VALUE~g" "$TEMPLATE"
89 |
90 | echo "updated template $TEMPLATE"
91 | echo "=================================================="
92 | cat "$TEMPLATE"
93 | echo "=================================================="
94 | done
95 |
96 |
--------------------------------------------------------------------------------
/ace-projects/simple-demo/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | simple-demo
4 |
5 |
6 |
7 |
8 |
9 | com.ibm.etools.mft.applib.applibbuilder
10 |
11 |
12 |
13 |
14 | com.ibm.etools.mft.applib.applibresourcevalidator
15 |
16 |
17 |
18 |
19 | com.ibm.etools.mft.connector.policy.ui.PolicyBuilder
20 |
21 |
22 |
23 |
24 | com.ibm.etools.mft.applib.mbprojectbuilder
25 |
26 |
27 |
28 |
29 | com.ibm.etools.msg.validation.dfdl.mlibdfdlbuilder
30 |
31 |
32 |
33 |
34 | com.ibm.etools.mft.flow.adapters.adapterbuilder
35 |
36 |
37 |
38 |
39 | com.ibm.etools.mft.flow.sca.scabuilder
40 |
41 |
42 |
43 |
44 | com.ibm.etools.msg.validation.dfdl.mbprojectresourcesbuilder
45 |
46 |
47 |
48 |
49 | com.ibm.etools.mft.esql.lang.esqllangbuilder
50 |
51 |
52 |
53 |
54 | com.ibm.etools.mft.map.builder.mslmappingbuilder
55 |
56 |
57 |
58 |
59 | com.ibm.etools.mft.flow.msgflowxsltbuilder
60 |
61 |
62 |
63 |
64 | com.ibm.etools.mft.flow.msgflowbuilder
65 |
66 |
67 |
68 |
69 | com.ibm.etools.mft.decision.service.ui.decisionservicerulebuilder
70 |
71 |
72 |
73 |
74 | com.ibm.etools.mft.pattern.capture.PatternBuilder
75 |
76 |
77 |
78 |
79 | com.ibm.etools.mft.json.builder.JSONBuilder
80 |
81 |
82 |
83 |
84 | com.ibm.etools.mft.restapi.ui.restApiDefinitionsBuilder
85 |
86 |
87 |
88 |
89 | com.ibm.etools.mft.policy.ui.policybuilder
90 |
91 |
92 |
93 |
94 | com.ibm.etools.mft.msg.assembly.messageAssemblyBuilder
95 |
96 |
97 |
98 |
99 | com.ibm.etools.msg.validation.dfdl.dfdlqnamevalidator
100 |
101 |
102 |
103 |
104 | com.ibm.etools.mft.bar.ext.barbuilder
105 |
106 |
107 |
108 |
109 | com.ibm.etools.mft.unittest.ui.TestCaseBuilder
110 |
111 |
112 |
113 |
114 |
115 | com.ibm.etools.msgbroker.tooling.applicationNature
116 | com.ibm.etools.msgbroker.tooling.messageBrokerProjectNature
117 |
118 |
119 |
--------------------------------------------------------------------------------
/demo-pre-reqs/README.md:
--------------------------------------------------------------------------------
1 | # demo prep
2 |
3 | **You (almost certainly) don't need this.**
4 |
5 | These are the pre-reqs I used to demonstrate my sample App Connect Enterprise application. My App Connect application connects to a PostgreSQL database - so I need to set up a PostgreSQL database to demo it. My App Connect application receives messages from Kafka - so I need to create a Kafka cluster to demo it. And so on.
6 |
7 | I'm keeping this here as it'll be convenient when I need to recreate this demo from scratch, but as you'll be building and deploying your own App Connect Enterprise application, **you will have different pre-reqs to me**.
8 |
9 | If you just follow these instructions on your existing OpenShift cluster, you will likely find some of this clashes with what you already have set up on your cluster.
10 |
11 | ## Add IBM software to Operator Hub
12 |
13 | ```sh
14 | oc apply -f ibm-catalog-source.yaml
15 | ```
16 |
17 | ## Install operators needed for the demo
18 |
19 | ```sh
20 | oc apply -f operators
21 | ```
22 |
23 | ## Setup Platform Navigator
24 |
25 | ```sh
26 | oc new-project integration
27 | oc apply -f ./ibm-entitlement-key.yaml -n integration
28 | oc apply -f ./cp4i
29 | ```
30 |
31 | ## Setup Event Streams
32 |
33 | ```sh
34 | oc new-project eventstreams
35 | oc apply -f ./ibm-entitlement-key.yaml -n eventstreams
36 | oc apply -f ./kafka
37 | ```
38 |
39 | ## Setup PostgreSQL
40 |
41 | ```sh
42 | oc new-project postgresql
43 | oc apply -f ./postgresql/db-data.yaml
44 | oc apply -f ./postgresql/database.yaml
45 | ```
46 |
47 | ## Setup the namespace where the sample ACE demo will run
48 |
49 | ```sh
50 | oc new-project ace-demo
51 | oc apply -f ./ibm-entitlement-key.yaml -n ace-demo
52 | ```
53 |
54 | ## Submit an HTTP request to the simple ACE flow
55 |
56 | ```sh
57 | curl "http://$(oc get route -nace-demo hello-world-http -o jsonpath='{.spec.host}')/hello"
58 | ```
59 |
60 | ## Produce a message to the Kafka topic that will trigger the complex ACE flow
61 |
62 | ```sh
63 | BOOTSTRAP=$(oc get eventstreams event-backbone -neventstreams -ojsonpath='{.status.kafkaListeners[1].bootstrapServers}')
64 | PASSWORD=$(oc get secret -neventstreams appconnect-kafka-user -ojsonpath='{.data.password}' | base64 -d)
65 | oc get secret -neventstreams event-backbone-cluster-ca-cert -ojsonpath='{.data.ca\.p12}' | base64 -d > ca.p12
66 | CA_PASSWORD=$(oc get secret -neventstreams event-backbone-cluster-ca-cert -ojsonpath='{.data.ca\.password}' | base64 -d)
67 |
68 | echo '{"id": 1, "message": "quick test"}' | kafka-console-producer.sh \
69 | --bootstrap-server $BOOTSTRAP \
70 | --topic TODO.UPDATES \
71 | --producer-property "security.protocol=SASL_SSL" \
72 | --producer-property "sasl.mechanism=SCRAM-SHA-512" \
73 | --producer-property "sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="appconnect-kafka-user" password="$PASSWORD";" \
74 | --producer-property "ssl.truststore.location=ca.p12" \
75 | --producer-property "ssl.truststore.type=PKCS12" \
76 | --producer-property "ssl.truststore.password=$CA_PASSWORD"
77 | ```
78 |
79 | ## Check that the ACE flow put something in PostgreSQL
80 |
81 | ```sh
82 | oc exec -it -n postgresql -c database \
83 | $(oc get pods -n postgresql --selector='postgres-operator.crunchydata.com/cluster=store,postgres-operator.crunchydata.com/role=master' -o name) \
84 | -- psql -d store
85 | ```
86 |
87 | ```sql
88 | store=# select * from todos;
89 | id | user_id | title | encoded_title | is_completed
90 | ----+---------+--------------------+--------------------------------------+--------------
91 | 1 | 1 | delectus aut autem | RU5DT0RFRDogZGVsZWN0dXMgYXV0IGF1dGVt | f
92 | (1 row)
93 | ```
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | sample-ace-application
4 |
5 |
6 | sample-ace-application-java
7 |
8 |
9 |
10 | com.ibm.etools.mft.applib.applibbuilder
11 |
12 |
13 |
14 |
15 | com.ibm.etools.mft.applib.applibresourcevalidator
16 |
17 |
18 |
19 |
20 | com.ibm.etools.mft.connector.policy.ui.PolicyBuilder
21 |
22 |
23 |
24 |
25 | com.ibm.etools.mft.applib.mbprojectbuilder
26 |
27 |
28 |
29 |
30 | com.ibm.etools.msg.validation.dfdl.mlibdfdlbuilder
31 |
32 |
33 |
34 |
35 | com.ibm.etools.mft.flow.adapters.adapterbuilder
36 |
37 |
38 |
39 |
40 | com.ibm.etools.mft.flow.sca.scabuilder
41 |
42 |
43 |
44 |
45 | com.ibm.etools.msg.validation.dfdl.mbprojectresourcesbuilder
46 |
47 |
48 |
49 |
50 | com.ibm.etools.mft.esql.lang.esqllangbuilder
51 |
52 |
53 |
54 |
55 | com.ibm.etools.mft.map.builder.mslmappingbuilder
56 |
57 |
58 |
59 |
60 | com.ibm.etools.mft.flow.msgflowxsltbuilder
61 |
62 |
63 |
64 |
65 | com.ibm.etools.mft.flow.msgflowbuilder
66 |
67 |
68 |
69 |
70 | com.ibm.etools.mft.decision.service.ui.decisionservicerulebuilder
71 |
72 |
73 |
74 |
75 | com.ibm.etools.mft.pattern.capture.PatternBuilder
76 |
77 |
78 |
79 |
80 | com.ibm.etools.mft.json.builder.JSONBuilder
81 |
82 |
83 |
84 |
85 | com.ibm.etools.mft.restapi.ui.restApiDefinitionsBuilder
86 |
87 |
88 |
89 |
90 | com.ibm.etools.mft.policy.ui.policybuilder
91 |
92 |
93 |
94 |
95 | com.ibm.etools.mft.msg.assembly.messageAssemblyBuilder
96 |
97 |
98 |
99 |
100 | com.ibm.etools.msg.validation.dfdl.dfdlqnamevalidator
101 |
102 |
103 |
104 |
105 | com.ibm.etools.mft.bar.ext.barbuilder
106 |
107 |
108 |
109 |
110 | com.ibm.etools.mft.unittest.ui.TestCaseBuilder
111 |
112 |
113 |
114 |
115 |
116 | com.ibm.etools.msgbroker.tooling.applicationNature
117 | com.ibm.etools.msgbroker.tooling.messageBrokerProjectNature
118 |
119 |
120 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application_Test/src/main/java/test/Sample_ace_application_sample_message_flow_get_id_from_update_message_0002_Test.java:
--------------------------------------------------------------------------------
1 | package test;
2 |
3 | import java.io.InputStream;
4 |
5 | import org.junit.jupiter.api.AfterEach;
6 | import org.junit.jupiter.api.Test;
7 |
8 | import com.ibm.integration.test.v1.NodeSpy;
9 | import com.ibm.integration.test.v1.NodeStub;
10 | import com.ibm.integration.test.v1.SpyObjectReference;
11 | import com.ibm.integration.test.v1.TestMessageAssembly;
12 | import com.ibm.integration.test.v1.TestSetup;
13 | import com.ibm.integration.test.v1.exception.TestException;
14 |
15 | import static com.ibm.integration.test.v1.Matchers.*;
16 | import static org.hamcrest.MatcherAssert.assertThat;
17 | import static org.junit.jupiter.api.Assertions.assertEquals;
18 | import static org.junit.jupiter.api.Assertions.fail;
19 |
20 | public class Sample_ace_application_sample_message_flow_get_id_from_update_message_0002_Test {
21 |
22 | /*
23 | * Sample_ace_application_sample_message_flow_get_id_from_update_message_0002_Test
24 | * Test generated by IBM App Connect Enterprise Toolkit 12.0.6.0 on Oct 26, 2022 8:41:15 AM
25 | */
26 |
27 | @AfterEach
28 | public void cleanupTest() throws TestException {
29 | // Ensure any mocks created by a test are cleared after the test runs
30 | TestSetup.restoreAllMocks();
31 | }
32 |
33 | @Test
34 | public void sample_ace_application_sample_message_flow_get_id_from_update_message_TestCase_001()
35 | throws TestException {
36 |
37 | // Define the SpyObjectReference for the node we're going to mock
38 | SpyObjectReference nodeToBeMockedRef = new SpyObjectReference().application("sample-ace-application").messageFlow("sample_message_flow")
39 | .node("retrieve current todo details");
40 |
41 | // Define the SpyObjectReference objects for the start and end of our test run
42 | SpyObjectReference kafkaInputObjRef = new SpyObjectReference().application("sample-ace-application").messageFlow("sample_message_flow")
43 | .node("Kafka consumer todo updates");
44 | SpyObjectReference postgresObjRef = new SpyObjectReference().application("sample-ace-application").messageFlow("sample_message_flow")
45 | .node("insert into database");
46 |
47 | // Initialise NodeSpy objects
48 | NodeSpy kafkaInputSpy = new NodeSpy(kafkaInputObjRef);
49 | NodeSpy postgresSpy = new NodeSpy(postgresObjRef);
50 |
51 |
52 | // Initialize the service stub to avoid needing an HTTP server for unit testing
53 | NodeStub serviceStub = new NodeStub(nodeToBeMockedRef);
54 |
55 | // Create a Message Assembly and load it with the HTTP result
56 | TestMessageAssembly serviceResultMessageAssembly = new TestMessageAssembly();
57 | // Directly create the JSON data; useful for small testcases. We could also record the message and use that instead.
58 | serviceResultMessageAssembly.buildJSONMessage("{\"userId\": 1, \"id\": 1, \"title\": \"delectus aut autem\", \"completed\": false}");
59 |
60 | // Program the stub to return this dummy result instead of calling the service
61 | serviceStub.onCall().propagatesMessage("in", "out", serviceResultMessageAssembly);
62 |
63 | // Configure the "in" terminal on the Loopback Request node not to propagate.
64 | // If we don't do this, then the node will throw exceptions when it realizes
65 | // we haven't actually configured it properly.
66 | postgresSpy.setStopAtInputTerminal("in");
67 |
68 | // Directly create the JSON data; useful for small testcases.
69 | TestMessageAssembly inputMessageAssembly = new TestMessageAssembly();
70 | inputMessageAssembly.messagePath("JSON.Data.id").setValue(1);
71 | inputMessageAssembly.messagePath("JSON.Data.message").setValue("quick test");
72 |
73 | // Now call propagate on the "out" terminal of the Kafka Input node.
74 | // This takes the place of an actual Kafka message: we simple hand the node
75 | // the message assembly and tell it to propagate that as if it came from an
76 | // actual client. This line is where the flow is actually run.
77 | kafkaInputSpy.propagate(inputMessageAssembly, "out");
78 |
79 | // Validate the results from the flow execution
80 | // We will now pick up the message that is propagated into the Loopback Request node and validate it
81 | TestMessageAssembly loopbackMessageAssembly = postgresSpy.receivedMessageAssembly("in", 1);
82 |
83 | // Should still have the title
84 | assertEquals("delectus aut autem", loopbackMessageAssembly.messagePath("JSON.Data.title").getStringValue());
85 | // and also have the encoded title
86 | assertEquals("RU5DT0RFRDogZGVsZWN0dXMgYXV0IGF1dGVt", loopbackMessageAssembly.messagePath("JSON.Data.encodedTitle").getStringValue());
87 | }
88 |
89 | }
90 |
--------------------------------------------------------------------------------
/ace-projects/sample-ace-application/sample_message_flow.msgflow:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/tekton/tasks/git-clone.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: git-clone-home
5 | spec:
6 | description: >-
7 | These Tasks are Git tasks to work with repositories used by other tasks in
8 | your Pipeline.
9 |
10 | The git-clone-home Task will clone a repo from the provided url into the output
11 | Workspace. By default the repo will be cloned into the root of your
12 | Workspace. You can clone into a subdirectory by setting this Task's
13 | subdirectory param. This Task also supports sparse checkouts. To perform a
14 | sparse checkout, pass a list of comma separated directory patterns to this
15 | Task's sparseCheckoutDirectories param.
16 | params:
17 | - description: git url to clone
18 | name: url
19 | type: string
20 | - default: ''
21 | description: 'git revision to checkout (branch, tag, sha, ref…)'
22 | name: revision
23 | type: string
24 | - default: ''
25 | description: (optional) git refspec to fetch before checking out revision
26 | name: refspec
27 | type: string
28 | - default: 'true'
29 | description: defines if the resource should initialize and fetch the submodules
30 | name: submodules
31 | type: string
32 | - default: '1'
33 | description: >-
34 | performs a shallow clone where only the most recent commit(s) will be
35 | fetched
36 | name: depth
37 | type: string
38 | - default: 'true'
39 | description: >-
40 | defines if http.sslVerify should be set to true or false in the global
41 | git config
42 | name: sslVerify
43 | type: string
44 | - default: ''
45 | description: subdirectory inside the "output" workspace to clone the git repo into
46 | name: subdirectory
47 | type: string
48 | - default: ''
49 | description: >-
50 | defines which directories patterns to match or exclude when performing a
51 | sparse checkout
52 | name: sparseCheckoutDirectories
53 | type: string
54 | - default: 'true'
55 | description: >-
56 | clean out the contents of the repo's destination directory (if it
57 | already exists) before trying to clone the repo there
58 | name: deleteExisting
59 | type: string
60 | - default: ''
61 | description: git HTTP proxy server for non-SSL requests
62 | name: httpProxy
63 | type: string
64 | - default: ''
65 | description: git HTTPS proxy server for SSL requests
66 | name: httpsProxy
67 | type: string
68 | - default: ''
69 | description: git no proxy - opt out of proxying HTTP/HTTPS requests
70 | name: noProxy
71 | type: string
72 | - default: 'true'
73 | description: log the commands used during execution
74 | name: verbose
75 | type: string
76 | - default: >-
77 | registry.redhat.io/openshift-pipelines/pipelines-git-init-rhel8@sha256:afc5d3f9efe26c7042635d43b8ffd09d67936e3d0b6b901dc08a33e20313d361
78 | description: the image used where the git-init binary is
79 | name: gitInitImage
80 | type: string
81 | results:
82 | - description: The precise commit SHA that was fetched by this Task
83 | name: commit
84 | - description: The precise URL that was fetched by this Task
85 | name: url
86 | stepTemplate:
87 | env:
88 | - name: HOME
89 | value: "/tekton/home"
90 | steps:
91 | - image: $(params.gitInitImage)
92 | name: clone
93 | resources: {}
94 | script: |+
95 | #!/bin/sh
96 |
97 | set -eu -o pipefail
98 |
99 |
100 | if [[ "$(params.verbose)" == "true" ]] ; then
101 | set -x
102 | fi
103 |
104 |
105 | CHECKOUT_DIR="$(workspaces.output.path)/$(params.subdirectory)"
106 |
107 |
108 | cleandir() {
109 | # Delete any existing contents of the repo directory if it exists.
110 | #
111 | # We don't just "rm -rf $CHECKOUT_DIR" because $CHECKOUT_DIR might be "/"
112 | # or the root of a mounted volume.
113 | if [[ -d "$CHECKOUT_DIR" ]] ; then
114 | # Delete non-hidden files and directories
115 | rm -rf "$CHECKOUT_DIR"/*
116 | # Delete files and directories starting with . but excluding ..
117 | rm -rf "$CHECKOUT_DIR"/.[!.]*
118 | # Delete files and directories starting with .. plus any other character
119 | rm -rf "$CHECKOUT_DIR"/..?*
120 | fi
121 | }
122 |
123 |
124 | if [[ "$(params.deleteExisting)" == "true" ]] ; then
125 | cleandir
126 | fi
127 |
128 |
129 | test -z "$(params.httpProxy)" || export HTTP_PROXY=$(params.httpProxy)
130 |
131 | test -z "$(params.httpsProxy)" || export
132 | HTTPS_PROXY=$(params.httpsProxy)
133 |
134 | test -z "$(params.noProxy)" || export NO_PROXY=$(params.noProxy)
135 |
136 |
137 | /ko-app/git-init \
138 | -url "$(params.url)" \
139 | -revision "$(params.revision)" \
140 | -refspec "$(params.refspec)" \
141 | -path "$CHECKOUT_DIR" \
142 | -sslVerify="$(params.sslVerify)" \
143 | -submodules="$(params.submodules)" \
144 | -depth "$(params.depth)" \
145 | -sparseCheckoutDirectories "$(params.sparseCheckoutDirectories)"
146 | cd "$CHECKOUT_DIR"
147 |
148 | RESULT_SHA="$(git rev-parse HEAD)"
149 |
150 | EXIT_CODE="$?"
151 |
152 | if [ "$EXIT_CODE" != 0 ] ; then
153 | exit $EXIT_CODE
154 | fi
155 |
156 | # ensure we don't add a trailing newline to the result
157 |
158 | echo -n "$RESULT_SHA" > $(results.commit.path)
159 |
160 | echo -n "$(params.url)" > $(results.url.path)
161 | workspaces:
162 | - description: The git repo will be cloned onto the volume backing this workspace
163 | name: output
164 |
--------------------------------------------------------------------------------
/complex-pipelinerun.yaml:
--------------------------------------------------------------------------------
1 | #
2 | # Sample pipelinerun for deploying an App Connect Enterprise integration server
3 | #
4 | # You need to modify this with details of your own App Connect application.
5 | #
6 | # This sample shows how to deploy an App Connect application with
7 | # dependencies and additional configuration options.
8 |
9 | apiVersion: tekton.dev/v1beta1
10 | kind: PipelineRun
11 | metadata:
12 | generateName: ace-deploy-
13 | spec:
14 | serviceAccountName: pipeline-deployer-serviceaccount
15 | params:
16 | # ########################################################
17 | # where to run the App Connect application
18 | # ########################################################
19 | # what to call the IntegrationServer that runs your application
20 | - name: integration-server-name
21 | value: "my-ace-sample"
22 | # where to run your application
23 | - name: ace-namespace
24 | value: "ace-demo"
25 |
26 |
27 | # ########################################################
28 | # where to find the projects to create
29 | # ########################################################
30 | # location of the git repository with your application
31 | - name: git-repository
32 | value: "https://github.com/dalelane/app-connect-tekton-pipeline"
33 |
34 | # name of the App Connect project to deploy
35 | # This should be the name of a folder in ace-projects
36 | - name: ace-project-name
37 | value: "sample-ace-application"
38 |
39 | # name of an App Connect test project
40 | # This is used to verify the bar before the new
41 | # Integration Server is deployed
42 | # This should be the name of a folder in ace-projects
43 | - name: test-project-name
44 | value: "sample-ace-application_Test"
45 |
46 | # ########################################################
47 | # credentials to use
48 | #
49 | # space-separated list of strings containing:
50 | # file-to-update value-to-update key secret namespace
51 | #
52 | # where:
53 | # file-to-update - relative location of the template file to be updated
54 | # value-to-update - the value in the template file that should be replaced
55 | # key - which value to retrieve from a secret to replace the template value with
56 | # secret - name of the secret to retrieve value from
57 | # namespace - namespace of the secret to retrieve value from
58 | #
59 | # ########################################################
60 | - name: credentials-to-update
61 | value:
62 | # update connection information for the Kafka cluster
63 | - "sample-ace-policies/eventstreams.policyxml TEMPLATE_BOOTSTRAP_SERVER bootstrap-server kafka-connection-info eventstreams"
64 | - "sample-ace-policies/eventstreams.policyxml TEMPLATE_TRUSTSTORE_FILE truststore-name kafka-connection-info eventstreams"
65 | # update credentials for connecting to Kafka
66 | - "sample-ace-config/setdbparms.txt TEMPLATE_KAFKA_ES_PASSWORD password appconnect-kafka-user eventstreams"
67 | - "sample-ace-config/setdbparms.txt TEMPLATE_TRUSTSTORE_PASSWORD ca.password event-backbone-cluster-ca-cert eventstreams"
68 | # update connection information for the PostgreSQL database
69 | - "sample-ace-data-sources/datasources.json TEMPLATE_POSTGRES_HOST host store-pguser-cp4i postgresql"
70 | - "sample-ace-data-sources/datasources.json TEMPLATE_POSTGRES_PORT port store-pguser-cp4i postgresql"
71 | - "sample-ace-data-sources/datasources.json TEMPLATE_POSTGRES_DBNAME dbname store-pguser-cp4i postgresql"
72 | # update credentials for connecting to the PostgreSQL database
73 | - "sample-ace-config/setdbparms.txt TEMPLATE_POSTGRES_USERNAME user store-pguser-cp4i postgresql"
74 | - "sample-ace-config/setdbparms.txt TEMPLATE_POSTGRES_PASSWORD password store-pguser-cp4i postgresql"
75 |
76 |
77 | # ########################################################
78 | # optional supporting resources to deploy
79 | # ########################################################
80 |
81 | # Java project
82 | - name: java-project-name
83 | value: "sample-ace-application-java"
84 |
85 | # setdbparms.txt
86 | - name: setdbparms-file
87 | value: "ace-projects/sample-ace-config/setdbparms.txt"
88 | - name: setdbparms-name
89 | value: "sample-setdbparms"
90 |
91 | # server.conf.yaml
92 | - name: serverconf-file
93 | value: "ace-projects/sample-ace-config/server.conf.yaml"
94 | - name: serverconf-name
95 | value: "sample-serverconf"
96 |
97 | # Policy project
98 | - name: policies-project-folder
99 | value: "ace-projects/sample-ace-policies"
100 | - name: policies-project-name
101 | value: "sample-policies"
102 |
103 | # Loopback data sources
104 | - name: datasources-project-folder
105 | value: "ace-projects/sample-ace-data-sources"
106 | - name: datasources-project-name
107 | value: "sample-datasources"
108 |
109 | # truststore file
110 | - name: truststore-p12-secret
111 | value: "event-backbone-cluster-ca-cert"
112 | - name: truststore-p12-secret-namespace
113 | value: "eventstreams"
114 | - name: truststore-name
115 | value: "sample-truststore.jks"
116 |
117 |
118 |
119 | pipelineRef:
120 | name: pipeline-ace-integration-server
121 |
122 | workspaces:
123 | - name: pipeline-shared-workspace
124 | volumeClaimTemplate:
125 | spec:
126 | storageClassName: ibmc-block-gold
127 | accessModes:
128 | - ReadWriteOnce
129 | resources:
130 | requests:
131 | storage: 100Mi
132 |
133 |
--------------------------------------------------------------------------------
/tekton/tasks/create-integration-server.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: tekton.dev/v1beta1
2 | kind: Task
3 | metadata:
4 | name: ace-create-integration-server
5 | spec:
6 | params:
7 | # --------------------------------------------------------------
8 | # details of the IntegrationServer to create
9 | # --------------------------------------------------------------
10 | - name: integration-name
11 | description: name to give the integration server name
12 | - name: ace-namespace
13 | description: namespace to create the IntegrationServer in
14 | # --------------------------------------------------------------
15 | # what flavour of App Connect to run
16 | # for details, see https://www.ibm.com/docs/en/app-connect/containers_cd?topic=obtaining-app-connect-enterprise-server-image-from-cloud-container-registry
17 | # --------------------------------------------------------------
18 | - name: ace-server-version
19 | description: version of App Connect Enterprise to use
20 | - name: ace-server-license
21 | description: ID of the license to use for the IntegrationServer
22 | # --------------------------------------------------------------
23 | # details of the Docker image that will be created to run the
24 | # integration server
25 | # --------------------------------------------------------------
26 | - name: ace-server-base-image
27 | description: |+
28 | Docker image to use as a base for the server.
29 | Example values for this can be found at
30 | https://www.ibm.com/docs/en/app-connect/containers_cd?topic=obtaining-app-connect-enterprise-server-image-from-cloud-container-registry
31 | - name: bar-file-location
32 | description: folder containing an integration.bar file to deploy
33 | - name: integration-tag
34 | description: docker tag to use for the integration image
35 | # --------------------------------------------------------------
36 | # optional arguments
37 | # scaling and resource limits
38 | # --------------------------------------------------------------
39 | - name: cpu-limit
40 | default: 2
41 | - name: memory-limit
42 | default: "1Gi"
43 | - name: replicas
44 | default: 1
45 | # --------------------------------------------------------------
46 | # optional arguments
47 | # Configuration resources that can be added to
48 | # the integration server
49 | # --------------------------------------------------------------
50 | - name: policies-configuration
51 | description: name of the policies project to use
52 | default: ""
53 | - name: setdbparms-configuration
54 | description: name of the setdbparms configuration object to use
55 | default: ""
56 | - name: datasource-configuration
57 | description: name of the data source object to use
58 | default: ""
59 | - name: serverconf-configuration
60 | description: name of the server.conf.yaml configuration object to use
61 | default: ""
62 | - name: truststore-configuration
63 | description: name of the truststore configuration object to use
64 | default: ""
65 | # --------------------------------------------------------------
66 |
67 | workspaces:
68 | - name: output
69 | description: workspace with the resources
70 |
71 | steps:
72 | - name: run
73 | image: image-registry.openshift-image-registry.svc:5000/openshift/tools
74 | script: |
75 | #!/bin/sh
76 |
77 | set -e
78 |
79 | echo "creating base ACE imagestream"
80 | oc apply -f - < For background information about the Operator, and the different resources that this pipeline will create (e.g. `IntegrationServer` and `Configuration`), see these blog posts:
22 | > - [What is an Operator and why did we create one for IBM App Connect?](https://community.ibm.com/community/user/integration/blogs/rob-convery1/2022/05/11/ibm-app-connect-operators-part-1-what-is-an-operat)
23 | > - [Exploring the IntegrationServer Resource of the IBM App Connect Operator](https://community.ibm.com/community/user/integration/blogs/rob-convery1/2022/05/11/ibm-app-connect-operators-part-2-exploring)
24 |
25 |
26 | ## Pipeline
27 |
28 | The pipeline builds and deploys your App Connect Enterprise application. You need to run it every time your application has changed and you want to deploy the new version to OpenShift.
29 |
30 | When running App Connect Enterprise in containers, there is a lot of flexibility about how much of your application is built into your container image, and how much is provided when the container starts.
31 |
32 | > For background reading on some of the options, and some of the considerations about them, see the blog post:
33 | > - [Comparing styles of container-based deployment for IBM App Connect Enterprise](https://community.ibm.com/community/user/integration/blogs/aiden-gallagher1/2022/07/12/comparing-styles-of-container-based-deployment-for)
34 |
35 | This pipeline provides almost all parts of your application at runtime when the container starts. The only component that is [baked into](https://community.ibm.com/community/user/integration/blogs/aiden-gallagher1/2022/07/12/comparing-styles-of-container-based-deployment-for) the image is the application BAR file.
36 |
37 | Baking the BAR files into custom App Connect images prevents the need to run a dedicated content server to host BAR files, however if you would prefer to do that see the documentation on [Mechanisms for providing BAR files to an integration server](https://www.ibm.com/docs/en/app-connect/containers_cd?topic=servers-mechanisms-providing-bar-files-integration-server) for more details on how to do this. (The pipeline in this repository uses the approach described as "Custom image" in that documentation.)
38 |
39 | ### Running the pipeline
40 |
41 | | | **link** |
42 | | - | - |
43 | | **pipeline spec:** | [`pipeline.yaml`](./tekton/pipeline.yaml) |
44 | | **example pipeline runs:** | [`simple-pipelinerun.yaml`](./simple-pipelinerun.yaml) [`complex-pipelinerun.yaml`](./complex-pipelinerun.yaml) |
45 | | **helper scripts:** | [`1-deploy-simple-integration-server.sh`](./1-deploy-simple-integration-server.sh) [`1-deploy-complex-integration-server.sh`](./1-deploy-complex-integration-server.sh)
46 |
47 |
48 | **What the pipeline does**
49 |
50 | Builds your IBM App Connect Enterprise application and deploys it to the OpenShift cluster.
51 |
52 | **Outcome from running the pipeline**
53 |
54 | A new version of your application is deployed with zero-downtime - replacing any existing version of the app once it is ready.
55 |
56 | **Screenshot**
57 |
58 | 
59 |
60 | **Background**
61 |
62 | As discussed above, most of your application configuration will be provided to your application container at runtime by the Operator using `Configuration` resources.
63 |
64 | As shown in the screenshot above, this example pipeline currently supports many, but not all, of the types of Configuration resource:
65 | - [Loopback data source type](https://www.ibm.com/docs/en/SSTTDS_contcd/com.ibm.ace.icp.doc/config_loopbackdatasource.html)
66 | - [Policy project type](https://www.ibm.com/docs/en/SSTTDS_contcd/com.ibm.ace.icp.doc/config_policyproject.html)
67 | - [setdbparms.txt type](https://www.ibm.com/docs/en/SSTTDS_contcd/com.ibm.ace.icp.doc/config_setdbparmstxt.html)
68 | - [server.conf.yaml type](https://www.ibm.com/docs/en/SSTTDS_contcd/com.ibm.ace.icp.doc/config_serverconfyaml.html)
69 | - [Truststore type](https://www.ibm.com/docs/en/SSTTDS_contcd/com.ibm.ace.icp.doc/config_truststore.html)
70 |
71 | For more information about the other Configuration types, see the documentation on [Configuration types for integration servers](https://www.ibm.com/docs/en/app-connect/containers_cd?topic=servers-configuration-types-integration). Adding support for any of these additional types would involve adding additional tasks to the [tasks provided in this repo](./tekton/tasks/) - the existing tasks are commented to help assist with this.
72 |
73 | Each of these configuration resources is individually optional. Two example App Connect applications are provided to show how the pipeline supports different application types.
74 |
75 | #### Simple stand-alone applications
76 |
77 | The pipeline can be used to deploy a stand-alone application with no configuration dependencies.
78 |
79 | | | **link** |
80 | | - | - |
81 | | **sample application** | [simple-demo](./ace-projects/simple-demo/) |
82 | | **pipeline run config** | [`simple-pipelinerun.yaml`](./simple-pipelinerun.yaml) |
83 | | **demo script:** | [`1-deploy-simple-integration-server.sh`](./1-deploy-simple-integration-server.sh) |
84 |
85 | 
86 |
87 | This is a simple App Connect application with no external configuration.
88 |
89 | 
90 |
91 | When deploying this, the pipeline skips all of the Configuration tasks:
92 |
93 | 
94 |
95 | Watching the pipeline run looks like this (except it takes longer).
96 |
97 | 
98 |
99 | #### Complex applications
100 |
101 | The pipeline can be used to deploy complex applications with multiple configuration dependencies and supporting Java projects.
102 |
103 | | | **link** |
104 | | - | - |
105 | | **sample application** | [sample-ace-application](./ace-projects/sample-ace-application/) |
106 | | **pipeline run config** | [`complex-pipelinerun.yaml`](./complex-pipelinerun.yaml) |
107 | | **demo script:** | [`1-deploy-complex-integration-server.sh`](./1-deploy-complex-integration-server.sh) |
108 |
109 | 
110 |
111 | This is an example of an App Connect application that needs configuration for connecting to:
112 | - a PostgreSQL database
113 | - an external HTTP API
114 | - an Apache Kafka cluster
115 |
116 | 
117 |
118 | When deploying this, the pipeline runs all of the Configuration tasks required for this application:
119 |
120 | 
121 |
122 | Watching the pipeline run (also sped up!) it looks like this.
123 |
124 | 
125 |
126 | To avoid needing to store credentials in git with your application code, the pipeline retrieves credentials from Kubernetes secrets. When [configuring the pipeline for your application](#configuring-the-pipeline-for-your-app-connect-enterprise-application) you need to specify the secrets it should use to do this.
127 |
128 | ## Tests
129 |
130 | If you have a [test project](https://www.ibm.com/docs/en/app-connect/12.0?topic=tests-running-integration) for your App Connect application, the pipeline can run this test as well.
131 |
132 | Provide the name of your test project in the pipeline config, and your tests will be run after the BAR file is built.
133 |
134 | If you don't provide a test project, the test step in the pipeline will be skipped.
135 |
136 | ## Sample apps
137 |
138 | I've put notes on how I set up the sample apps to demonstrate the pipeline in [demo-pre-reqs/README.md](./demo-pre-reqs/README.md) however neither of the sample apps are particularly useful and were purely used to test and demo the pipeline.
139 |
140 | You can import them into [App Connect Toolkit](https://www.ibm.com/docs/en/app-connect/containers_cd?topic=developing-integrations-in-app-connect-toolkit) to edit them if you want to by:
141 |
142 | 1. **File** -> **Import...** -> **Projects from Folder or Archive**
143 | 2. Put the location of the [ace-projects](./ace-projects/) folder as the **Import source**.
144 | 3. Tick all of the projects
145 |
146 | That will let you open the projects and work on them locally. If you're curious what they do, I'll include some brief notes below:
147 |
148 | ### Simple app
149 |
150 | It provides an HTTP endpoint that returns a Hello World message.
151 |
152 | 
153 |
154 | Running this:
155 | ```sh
156 | curl "http://$(oc get route -nace-demo hello-world-http -o jsonpath='{.spec.host}')/hello"
157 | ```
158 |
159 | returns this:
160 | ```json
161 | {"hello":"world"}
162 | ```
163 |
164 | #### Test
165 |
166 | A test for this app is provided in [simple-demo_Test](./ace-projects/simple-demo_Test/).
167 |
168 | To run it:
169 | 1. Create a local integration server called `TEST_SERVER` (inheriting the configuration in the [TEST_SERVER](./ace-projects/TEST_SERVER/) folder)
170 | 2. Run the test launch configuration [simple-demo_Test.launch](./ace-projects/simple-demo_Test/simple-demo_Test.launch)
171 |
172 | ### Complex app
173 |
174 | It provides an intentionally contrived event-driven flow that:
175 | - "Kafka consumer todo updates"
176 | - receives a JSON message from a Kafka topic
177 | - "get id from update message"
178 | - parses the JSON message and extracts an ID number from it
179 | - uses the id number to create an HTTP URL for an external API
180 | - "retrieve current todo details"
181 | - makes an HTTP GET call to the external API
182 | - "base64 encode the description"
183 | - transforms the response from the external API using a custom Java class
184 | - "insert into database"
185 | - inserts the transformed response payload into a PostgreSQL database
186 |
187 | 
188 |
189 | The aim of this application was to demonstrate an ACE application which needed a variety of Configuration resources.
190 |
191 | But it means that running this:
192 | ```sh
193 | echo '{"id": 1, "message": "quick test"}' | kafka-console-producer.sh \
194 | --bootstrap-server $BOOTSTRAP \
195 | --topic TODO.UPDATES \
196 | --producer-property "security.protocol=SASL_SSL" \
197 | --producer-property "sasl.mechanism=SCRAM-SHA-512" \
198 | --producer-property "sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="appconnect-kafka-user" password="$PASSWORD";" \
199 | --producer-property "ssl.truststore.location=ca.p12" \
200 | --producer-property "ssl.truststore.type=PKCS12" \
201 | --producer-property "ssl.truststore.password=$CA_PASSWORD"
202 | ```
203 |
204 | gets you this:
205 | ```
206 | store=# select * from todos;
207 | id | user_id | title | encoded_title | is_completed
208 | ----+---------+--------------------+--------------------------------------+--------------
209 | 1 | 1 | delectus aut autem | RU5DT0RFRDogZGVsZWN0dXMgYXV0IGF1dGVt | f
210 | (1 row)
211 | ```
212 |
213 | #### Test
214 |
215 | A test for this app is provided in [sample-ace-application_Test](./ace-projects/sample-ace-application_Test/).
216 |
217 | To run it:
218 | 1. Create a local integration server called `TEST_SERVER` (inheriting the configuration in the [TEST_SERVER](./ace-projects/TEST_SERVER/) folder)
219 | 2. Run the test launch configuration [sample-ace-application_Test.launch](./ace-projects/sample-ace-application_Test/sample-ace-application_Test.launch)
220 |
221 |
222 | ## Configuring the pipeline for your App Connect Enterprise application
223 |
224 | To run the pipeline for your own application, you need to first create a `PipelineRun`.
225 |
226 | The sample pipeline runs described above provide a good starting point for this, which you can modify to your own needs. You need to specify the location of your App Connect Enterprise application code and configuration resources. All of the available parameters are documented in the [pipeline spec](./tekton/pipeline.yaml#L20-L199) if further guidance is needed.
227 |
228 | ### Accessing Git using credentials
229 |
230 | If your App Connect Enterprise files are in a git repository that requires authentication to access, you will also need to provide credentials for the pipeline to be able to clone your repository.
231 |
232 | Create a file called `github-credentials.yaml` (in the same folder as the `0-setup.sh` script)
233 |
234 | It should look like this:
235 |
236 | ```yaml
237 | apiVersion: v1
238 | kind: Secret
239 | metadata:
240 | name: github-credentials
241 | annotations:
242 | tekton.dev/git-0: https://github.com
243 | type: kubernetes.io/basic-auth
244 | stringData:
245 | username: your-user-name
246 | password: your-github-token
247 | ```
248 |
249 | You can alter the github URL if your Git repository is somewhere different (e.g. GitHub Enterprise).
250 |
251 | If your Git repository is publically readable, you can skip this step.
252 |
253 | ## Supported versions
254 |
255 | This sample pipeline was tested on OpenShift 4.10.
256 |
257 | You can see the versions of what I was running on OpenShift at [./demo-pre-reqs/operators/](./demo-pre-reqs/operators/). It is possible that this pipeline would need modifying to work with different versions of OpenShift, Tekton, or App Connect.
258 |
259 | ## More info
260 |
261 | For help with using this or if you have any questions, please create an issue or contact me.
262 |
--------------------------------------------------------------------------------
/tekton/pipeline.yaml:
--------------------------------------------------------------------------------
1 | #
2 | # Tekton pipeline for deploying App Connect Enterprise integration servers
3 | #
4 | # This will:
5 | # - create supporting configuration objects, containing connection
6 | # info, credentials, truststores, etc.
7 | # - compile the App Connect application into a bar file, including
8 | # a supporting Java project if needed
9 | # - deploy the application to an integration server in OpenShift
10 | #
11 |
12 | apiVersion: tekton.dev/v1beta1
13 | kind: Pipeline
14 | metadata:
15 | name: pipeline-ace-integration-server
16 | spec:
17 | workspaces:
18 | - name: pipeline-shared-workspace
19 |
20 | params:
21 | # ########################################################
22 | # where to run the App Connect application
23 | # ########################################################
24 | - name: integration-server-name
25 | description: Name of the IntegrationServer to create
26 |
27 | - name: ace-namespace
28 | type: string
29 | description: |+
30 | Namespace to deploy the App Connect Enterprise resources to.
31 | There needs to be an App Connect operator watching this namespace
32 |
33 |
34 | # ########################################################
35 | # where to find the projects to create
36 | # ########################################################
37 |
38 | - name: git-repository
39 | type: string
40 | description: |+
41 | URL for the git repository containing the App Connect project to
42 | deploy. If credentials are required, these should be available in
43 | a secret annotated with the git hostname.
44 |
45 | - name: ace-project-name
46 | description: |+
47 | Name of the App Connect project to deploy.
48 | This should be the name of a folder in ace-projects in the
49 | git-repository being cloned.
50 |
51 | - name: test-project-name
52 | default: ""
53 | description: |+
54 | Name of an App Connect test project to run to verify the bar file
55 | that is built with the ace-project-name project.
56 | The new integration server will only be deployed if all of the tests
57 | in this project pass.
58 | Leave this as an empty string if there are no tests to run.
59 |
60 | # ########################################################
61 | #
62 | # files in the git repository that need to be updated
63 | #
64 | # Each string in the array should contain a space-separated list of
65 | # strings that looks like this:
66 | # file-to-update value-to-update key secret namespace
67 | #
68 | # where:
69 | # file-to-update - relative location of the template file to be updated
70 | # value-to-update - the value in the template file that should be replaced
71 | # key - which value to retrieve from a secret to replace the template value with
72 | # secret - name of the secret to retrieve value from
73 | # namespace - namespace of the secret to retrieve value from
74 | #
75 | # ########################################################
76 |
77 | - name: credentials-to-update
78 | type: array
79 | default:
80 | - ""
81 |
82 | # ########################################################
83 | # optional supporting resources to deploy
84 | # ########################################################
85 |
86 | # Java project
87 | - name: java-project-name
88 | default: ""
89 | description: |+
90 | Name of Java project containing implementations of Java Compute nodes.
91 | This should be the name of a folder in ace-projects in the
92 | git-repository being cloned.
93 | Leave as an empty string if there is no Java project needed in the
94 | application.
95 |
96 | # setdbparms.txt file
97 | - name: setdbparms-file
98 | default: ""
99 | description: |+
100 | Location of a setdbparms.txt file to include in the project.
101 | This should be the location of a file in ace-projects in the
102 | git-repository being cloned.
103 | Leave as an empty string if there are no credentials needed in the
104 | application.
105 | - name: setdbparms-name
106 | default: ""
107 | description: |+
108 | Name to give to the setdbparms.txt file when deployed to OpenShift.
109 | Leave as an empty string if there are no credentials needed in the
110 | application.
111 |
112 | # server.conf.yaml
113 | - name: serverconf-file
114 | default: ""
115 | description: |+
116 | Location of a server.conf.yaml file to include in the project.
117 | This should be the location of a file in ace-projects in the
118 | git-repository being cloned.
119 | Leave as an empty string if no custom configuration is needed in the
120 | application.
121 | - name: serverconf-name
122 | default: ""
123 | description: |+
124 | Name to give to the server.conf.yaml file when deployed to OpenShift.
125 | Leave as an empty string if no custom configuration is needed in the
126 | application.
127 |
128 | # Policy project
129 | - name: policies-project-folder
130 | default: ""
131 | description: |+
132 | Location of a project containing App Connect policies.
133 | This should be the name of a folder in ace-projects in the
134 | git-repository being cloned.
135 | Leave as an empty string if there are no policies needed in the
136 | application.
137 | - name: policies-project-name
138 | default: ""
139 | description: |+
140 | Name to give to the policy project when deployed to OpenShift.
141 | Leave as an empty string if there are no policies needed in the
142 | application.
143 |
144 | # Loopback data sources
145 | - name: datasources-project-folder
146 | default: ""
147 | description: |+
148 | Location of a project containing Loopback data sources.
149 | This should be the name of a folder in ace-projects in the
150 | git-repository being cloned.
151 | Leave as an empty string if there are no data sources needed in the
152 | application.
153 | - name: datasources-project-name
154 | default: ""
155 | description: |+
156 | Name to give to the data sources project when deployed to OpenShift.
157 | Leave as an empty string if there are no data sources needed in the
158 | application.
159 |
160 | # truststore file
161 | - name: truststore-p12-secret
162 | default: ""
163 | description: |+
164 | Name of a secret containing a pkcs12 file to use as
165 | the basis for a truststore for App Connect.
166 | Leave as an empty string if no truststore is needed.
167 | - name: truststore-p12-secret-namespace
168 | default: ""
169 | description: |+
170 | Namespace for a secret containing a pkcs12 file to use as
171 | the basis for a truststore for App Connect.
172 | Leave as an empty string if no truststore is needed.
173 | - name: truststore-p12-secret-key
174 | default: "ca.p12"
175 | description: Key of the secret containing the pkcs12 file.
176 | - name: truststore-p12-secret-password
177 | default: "ca.password"
178 | description: Key of the secret containing the pkcs12 password.
179 | - name: truststore-name
180 | default: ""
181 | description: |+
182 | Name to give to the truststore created when deployed to OpenShift.
183 | Leave as an empty string if no truststore is needed.
184 |
185 |
186 | # ########################################################
187 | # App Connect options
188 | #
189 | # For details of other values that can be used for these
190 | # parameters, check https://www.ibm.com/docs/en/app-connect/containers_cd?topic=obtaining-app-connect-enterprise-server-image-from-cloud-container-registry
191 | #
192 | # ########################################################
193 |
194 | - name: app-connect-enterprise-version
195 | default: "12.0.5.0-r4"
196 | - name: app-connect-enterprise-license
197 | default: "L-KSBM-CJ2KWU"
198 | - name: app-connect-enterprise-base-image
199 | default: "cp.icr.io/cp/appc/ace-server-prod@sha256:473bbea96e65025c11c91014ac8dd72641212bd2f5f35d661dca7b3242f4155c"
200 |
201 | # ########################################################
202 |
203 | tasks:
204 |
205 | # --------------------------------------------------------
206 |
207 | - name: clone-source
208 | taskRef:
209 | name: git-clone-home
210 | params:
211 | - name: url
212 | value: "$(params.git-repository)"
213 | workspaces:
214 | - name: output
215 | workspace: pipeline-shared-workspace
216 |
217 |
218 | # --------------------------------------------------------
219 |
220 | - name: bar-file
221 | taskRef:
222 | name: ace-create-bar-file
223 | params:
224 | - name: ace-project-name
225 | value: "$(params.ace-project-name)"
226 | - name: java-project-name
227 | value: "$(params.java-project-name)"
228 | runAfter:
229 | # need the git repository to be able to
230 | # compile the project into a bar file
231 | - clone-source
232 | workspaces:
233 | - name: output
234 | workspace: pipeline-shared-workspace
235 |
236 | - name: test
237 | taskRef:
238 | name: ace-run-tests
239 | params:
240 | - name: bar-file-location
241 | value: "$(tasks.bar-file.results.folder)"
242 | - name: test-project-name
243 | value: "$(params.test-project-name)"
244 | when:
245 | - input: "$(params.test-project-name)"
246 | operator: notin
247 | values:
248 | - ""
249 | runAfter:
250 | # need the bar file to be built in order
251 | # to test it
252 | - bar-file
253 | workspaces:
254 | - name: output
255 | workspace: pipeline-shared-workspace
256 |
257 |
258 | # --------------------------------------------------------
259 |
260 | - name: update-templates
261 | taskRef:
262 | name: ace-update-templates-from-secrets
263 | params:
264 | - name: credentials
265 | value: ["$(params.credentials-to-update[*])"]
266 | runAfter:
267 | - clone-source
268 | workspaces:
269 | - name: output
270 | workspace: pipeline-shared-workspace
271 |
272 |
273 | # --------------------------------------------------------
274 |
275 | - name: p12
276 | taskRef:
277 | name: read-secret
278 | params:
279 | - name: name
280 | value: "$(params.truststore-p12-secret)"
281 | - name: namespace
282 | value: "$(params.truststore-p12-secret-namespace)"
283 | - name: key
284 | value: "$(params.truststore-p12-secret-key)"
285 | when:
286 | - input: "$(params.truststore-p12-secret)"
287 | operator: notin
288 | values:
289 | - ""
290 | - input: "$(params.truststore-p12-secret-namespace)"
291 | operator: notin
292 | values:
293 | - ""
294 | runAfter:
295 | # we don't need anything from the cloned
296 | # git repo, but doing this after it is
297 | # cloned prevents the git clone overwriting
298 | # the file we create
299 | - clone-source
300 | workspaces:
301 | - name: output
302 | workspace: pipeline-shared-workspace
303 |
304 | - name: p12-password
305 | taskRef:
306 | name: read-secret
307 | params:
308 | - name: name
309 | value: "$(params.truststore-p12-secret)"
310 | - name: namespace
311 | value: "$(params.truststore-p12-secret-namespace)"
312 | - name: key
313 | value: "$(params.truststore-p12-secret-password)"
314 | when:
315 | - input: "$(params.truststore-p12-secret)"
316 | operator: notin
317 | values:
318 | - ""
319 | - input: "$(params.truststore-p12-secret-namespace)"
320 | operator: notin
321 | values:
322 | - ""
323 | runAfter:
324 | # we don't need anything from the cloned
325 | # git repo, but doing this after it is
326 | # cloned prevents the git clone overwriting
327 | # the file we create
328 | - clone-source
329 | workspaces:
330 | - name: output
331 | workspace: pipeline-shared-workspace
332 |
333 | - name: jks
334 | taskRef:
335 | name: convert-p12-to-jks
336 | params:
337 | - name: p12-file
338 | value: "$(tasks.p12.results.file)"
339 | - name: password
340 | value: "$(tasks.p12-password.results.value)"
341 | runAfter:
342 | - p12
343 | - p12-password
344 | workspaces:
345 | - name: output
346 | workspace: pipeline-shared-workspace
347 |
348 |
349 | # --------------------------------------------------------
350 |
351 | - name: data-source
352 | taskRef:
353 | name: ace-create-datasource
354 | params:
355 | - name: ace-namespace
356 | value: "$(params.ace-namespace)"
357 | - name: datasources-project-template
358 | value: "$(params.datasources-project-folder)"
359 | - name: datasources-name
360 | value: "$(params.datasources-project-name)"
361 | when:
362 | - input: "$(params.datasources-project-folder)"
363 | operator: notin
364 | values:
365 | - ""
366 | - input: "$(params.datasources-project-name)"
367 | operator: notin
368 | values:
369 | - ""
370 | runAfter:
371 | - update-templates
372 | workspaces:
373 | - name: output
374 | workspace: pipeline-shared-workspace
375 |
376 |
377 | - name: policies
378 | taskRef:
379 | name: ace-create-policy-project
380 | params:
381 | - name: ace-namespace
382 | value: "$(params.ace-namespace)"
383 | - name: policies-project-folder
384 | value: "$(params.policies-project-folder)"
385 | - name: policies-name
386 | value: "$(params.policies-project-name)"
387 | when:
388 | - input: "$(params.policies-project-folder)"
389 | operator: notin
390 | values:
391 | - ""
392 | - input: "$(params.policies-project-name)"
393 | operator: notin
394 | values:
395 | - ""
396 | runAfter:
397 | - update-templates
398 | workspaces:
399 | - name: output
400 | workspace: pipeline-shared-workspace
401 |
402 |
403 | - name: setdbparms
404 | taskRef:
405 | name: ace-create-configuration
406 | params:
407 | - name: ace-namespace
408 | value: "$(params.ace-namespace)"
409 | - name: config-file
410 | value: "$(params.setdbparms-file)"
411 | - name: config-type
412 | value: "setdbparms"
413 | - name: config-name
414 | value: "$(params.setdbparms-name)"
415 | when:
416 | - input: "$(params.setdbparms-file)"
417 | operator: notin
418 | values:
419 | - ""
420 | - input: "$(params.setdbparms-name)"
421 | operator: notin
422 | values:
423 | - ""
424 | runAfter:
425 | - update-templates
426 | workspaces:
427 | - name: output
428 | workspace: pipeline-shared-workspace
429 |
430 |
431 | - name: serverconf
432 | taskRef:
433 | name: ace-create-configuration
434 | params:
435 | - name: ace-namespace
436 | value: "$(params.ace-namespace)"
437 | - name: config-file
438 | value: "$(params.serverconf-file)"
439 | - name: config-type
440 | value: "serverconf"
441 | - name: config-name
442 | value: "$(params.serverconf-name)"
443 | when:
444 | - input: "$(params.serverconf-file)"
445 | operator: notin
446 | values:
447 | - ""
448 | - input: "$(params.serverconf-name)"
449 | operator: notin
450 | values:
451 | - ""
452 | runAfter:
453 | - update-templates
454 | workspaces:
455 | - name: output
456 | workspace: pipeline-shared-workspace
457 |
458 |
459 | - name: truststore
460 | taskRef:
461 | name: ace-create-configuration
462 | params:
463 | - name: ace-namespace
464 | value: "$(params.ace-namespace)"
465 | - name: config-file
466 | value: "$(tasks.jks.results.file)"
467 | - name: config-type
468 | value: "truststore"
469 | - name: config-name
470 | value: "$(params.truststore-name)"
471 | runAfter:
472 | - jks
473 | workspaces:
474 | - name: output
475 | workspace: pipeline-shared-workspace
476 |
477 | # --------------------------------------------------------
478 |
479 | finally:
480 |
481 | - name: integration-server
482 | taskRef:
483 | name: ace-create-integration-server
484 | params:
485 | # --------------------------------------------------------------
486 | # details of the IntegrationServer to create
487 | # --------------------------------------------------------------
488 | - name: integration-name
489 | value: "$(params.integration-server-name)"
490 | - name: ace-namespace
491 | value: "$(params.ace-namespace)"
492 | # --------------------------------------------------------------
493 | # what flavour of App Connect to run
494 | # --------------------------------------------------------------
495 | - name: ace-server-version
496 | value: "$(params.app-connect-enterprise-version)"
497 | - name: ace-server-license
498 | value: "$(params.app-connect-enterprise-license)"
499 | # --------------------------------------------------------------
500 | # details of the Docker image that will be created to run the
501 | # integration server
502 | # --------------------------------------------------------------
503 | - name: ace-server-base-image
504 | value: "$(params.app-connect-enterprise-base-image)"
505 | - name: bar-file-location
506 | value: "$(tasks.bar-file.results.folder)"
507 | - name: integration-tag
508 | value: "$(tasks.clone-source.results.commit)"
509 | # --------------------------------------------------------------
510 | # configuration resources to add to the integration server
511 | # --------------------------------------------------------------
512 | # configurations to include
513 | - name: policies-configuration
514 | value: "$(params.policies-project-name)"
515 | - name: datasource-configuration
516 | value: "$(params.datasources-project-name)"
517 | - name: setdbparms-configuration
518 | value: "$(params.setdbparms-name)"
519 | - name: serverconf-configuration
520 | value: "$(params.serverconf-name)"
521 | - name: truststore-configuration
522 | value: "$(params.truststore-name)"
523 | when:
524 | # --------------------------------------------------------------
525 | # Don't run if the preceding tasks have failed
526 | # --------------------------------------------------------------
527 | - input: "$(tasks.status)"
528 | operator: notin
529 | values:
530 | - "Failed"
531 | workspaces:
532 | - name: output
533 | workspace: pipeline-shared-workspace
534 |
--------------------------------------------------------------------------------
/ace-projects/TEST_SERVER/server.conf.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # ACE Integration Server configuration file
3 | #
4 | # Created at 12.0.6.0 - 5f0e97cacec81a59105faafbe0bc9f93-725362bb6c4df32013b83b36a127f242
5 | #
6 | # General notes :
7 | # - Integration Server will load server.conf.yaml from directory set via --work-dir
8 | # - To ensure valid YAML avoid any use of TAB characters
9 | # - File paths may be taken as absolute, or relative to the integration server's work directory
10 | #
11 | # WARNING: Any value specified in the 'overrides/server.conf.yaml' will override values here
12 | #
13 | serverConfVersion: 1
14 |
15 | #lilPath: '' # A list of paths from where User-defined node LIL/JAR files are loaded. (multiple directories are separated by platform path separator)
16 |
17 | #deployMode: 'replace' # Deploy mode: replace | none | manual
18 | #scanForKeywords: '' # A comma-delimited list of file types, selected from
19 | # [esql, xsl, xslt], to scan for keywords.
20 |
21 | # The remoteDefaultQueueManager and defaultQueueManager properties are mutually exclusive. Only uncomment one of these two options.
22 | #defaultQueueManager: '' # Set non-empty string to specify a default queue manager
23 | #mqTrustedQueueManager: 'no' # Set to yes to enable MQ communication as a trusted (fastpath) application (does not apply to remote QM)
24 |
25 | #remoteDefaultQueueManager: '' # Specify an MQEndpoint policy in the format {policy project}:policy
26 | #replacementQueuePrefix: 'MYPREFIX' # When using a remoteDefaultQueueManager, use this prefix to enable the same queue manager
27 | # to be used for multiple separate integration servers. The prefix replaces the "SYSTEM.BROKER."
28 | # section of the queue name.
29 |
30 | #trace: 'none' # choose 1 of : none|debug|debugTree|diagnostic|diagnosticTree
31 | #traceSize: '1G' # Set the service trace size
32 |
33 | #traceNodeLevel: true # Enable or disable message flow 'Trace nodes'
34 |
35 | #userTrace: 'none' # choose 1 of : none|debug|debugTree
36 | #userTraceSize: '1G' # Set the service trace size
37 |
38 | #forceServerHTTP: false # Set true to override and force unsecured HTTP on all HTTP/SOAP input nodes deployed in this server. Defaults to false.
39 | #forceServerHTTPS: false # Set true to override and force secured HTTPS on all HTTP/SOAP input nodes deployed in this server. Defaults to false.
40 | #forceServerHTTPSecurityProfile: '' # Set a security profile, {}:, to override and force all HTTP/SOAP transport.
41 | # input nodes deployed in this server to apply the security set in the profile. Default is unset, so flow or node setting applies.
42 | #forceServerHTTPFaultDetail: 'specific' # Set the detail level for the default Error Handling fault messages returned by all HTTP/SOAPInput nodes. Default is 'specific'.
43 | # Set to 'specific' for full error & exception details to be included in the response. Set to 'generic' for error and exception details to be hidden and a generic error response to be sent.
44 | #httpServerName: '' # Set the value to be returned in the 'Server' HTTP response header. Cf. the ResourceManagers / HTTP[S]Connector / ServerName
45 | # pair of settings, which take precedence over this one.
46 | #iwaGSSHostnameOverride: '' # Set the local hostname to be used in GSS-controlled Kerberos authentication from a SOAP or HTTP input node. Can also be set
47 | # via the MQSI_GSS_HOSTNAME_OVERRIDE environment variable, which takes precedence over this setting.
48 |
49 | #additionalJpluginDirectories: '' # Extra directories to search for Java plugins; can reference shared libraries using '{shlibname}' syntax.
50 | #additionalSharedClassesDirectories: '' # Extra directories to add to the shared classloader search path; can reference shared libraries using '{shlibname}' syntax.
51 | #abendDirectory: '' # Set the directory to store abends and related error files
52 | #traceDirectory: '' # Set the directory to store trace files
53 |
54 | Log:
55 | consoleLog: true # Control writing BIP messages to standard out. Set to true or false, default is true.
56 | outputFormat: 'idText' # Control the format of BIP messages written to standard out and file. Set to idText, text or ibmjson, default is text if unset.
57 | #additionalJsonElements: '"name1": "value1", "name2": "value2"' #When the outputFormat is set to ibmjson the specified json elements are added to the root of the logging object
58 | #eventLog: '[iib.system-work-dir]/log/[iib.system-node-label].[iib.system-server-label].events.txt' # Control writing BIP messages to file. Set to '' to disable, default is as shown.
59 | #eventLogFileSize: 10 # The maximum size in MB of an event log file before it is rotated into a new file
60 | #eventLogFileCount: 10 # The maximum number of event log files that should be rotated between.
61 | #elkLog: false # Control the publication of BIP messages to an ELK (Elasticsearch, Logstash, Kibana) stack. Set to true or false, default is false.
62 | #elkConnections: '' # Name of the ELK connection to use, for example 'elkConnection1', must be defined in the ELKConnections section below.
63 |
64 | AdminLog:
65 | #enabled: true # Control logging admin log messages. Set to true or false, default is true.
66 | # When enabled the maximum amount of disk space required for admin log files is
67 | # fileLogRetentionPeriod * fileLogCountDaily * fileLogSize
68 | #fileLog: false # Control writing admin log messages to file. Set to true or false, default is false.
69 | #fileLogRetentionPeriod: 30 # Sets the number of days to record admin log.
70 | # After this, old files are deleted as new ones are created. Default is 30 days.
71 | #fileLogCountDaily: 10 # Maximum number of admin log files to write per day, default is 10 per day.
72 | #fileLogSize: 100 # Maximum size in MB for each admin log file. Maximum size is 2000MB, default size is 100MB.
73 | consoleLog: false # Control writing admin log messages to standard out. Set to true or false, default is false.
74 | consoleLogFormat: 'idText' # Control the format of admin log messages written to standard out. Set to idText, text or ibmjson, default is text if unset.
75 | #additionalJsonElements: '"name1": "value1", "name2": "value2"' #When the outputFormat is set to ibmjson the specified json elements are added to the root of the logging object
76 |
77 | RestAdminListener:
78 | #port: 7600 # Set the Admin REST API Port for ACE Web UI and Toolkit or -1 to disable. Defaults to 7600.
79 |
80 | # Note the Admin REST API will be insecure without the following being set
81 | #host: 'localhost' # Set the hostname otherwise we bind to the unspecified address
82 |
83 | # SSL Server auth
84 | #sslCertificate: '/path/to/serverPKCS.p12' # See comment below
85 | #sslPassword: 'adminRestApi::sslpwd' # See comment below
86 |
87 | # Server TLS configuration
88 | minimumTlsVersion: 'TLSv1.2' # Set the minimum TLS version that will be accepted from clients. Default is 'TLSv1.0', must be one of 'TLSv1.0', 'TLSv1.1', or 'TLSv1.2'
89 | # maximumTlsVersion: 'TLSv1.2' # Set the maximum TLS version that will be accepted from clients. Default is 'TLSv1.2', must be one of 'TLSv1.2' or 'TLSv1.3'. This must be at least the value set for minimumTlsVersion otherwise all connections will be rejected.
90 | #tlsCipherList: '' # Set the list of acceptable ciphers that should be used in TLS connections. Specify the empty string to use the default cipher list. This should be a colon separated list of upper case cipher names following the OpenSSL cipher list naming convention.
91 | #tlsTrace: false # Enables tracing of TLS handshake to the console.
92 |
93 | #antiClickJackingOption: 'SAMEORIGIN' # Set the value of the X-Frame-Origin header sent by the web user interface. The default is 'SAMEORIGIN' for security. Set to '' to disable the X-Frame-Origin header.
94 |
95 | # If using a pem certificate:
96 | # sslCertificate is the full path to the server certificate key
97 | # sslPassword is the full path to the server private key, which must be a standard private key and not an encrypted one
98 | # The file names must end with '.pem'
99 | # If using p12/pfx certificate:
100 | # sslCertificate is the full path to the server certificate store file
101 | # sslPassword is the passphrase or alias to the passphrase of the certificate store
102 |
103 | # SSL Client auth
104 | #requireClientCert: true # Request a certificate from the client
105 | #caPath: '/path/to/CA/certificates' # CA certs, all files at this path will be read
106 |
107 | # Admin Security
108 | # Authentication
109 | # If basicAuth enabled, a maximum of authMaxAttempts authentication attempts are allowed for a client within period authAttemptsDuration
110 | # If authMaxAttempts is reached without success, the client is locked out for period authBlockedDuration
111 | #basicAuth: false # Clients web user name and password will be authenticated when set true
112 | webUserPasswordHashAlgorithm: PBKDF2-SHA-512 # Algorithm used to hash the password for webuser accounts.
113 | #authMaxAttempts: 5 # Max allowed authentication attempts
114 | #authAttemptsDuration: 300 # Authentication attempts period in seconds
115 | #authBlockedDuration: 300 # Authentication blocked period in seconds
116 | #sessionTimeout: -1 # Client-side expiration time in seconds for REST API/Web UI sessions.
117 | # Negative values or zero signify that the session remain active
118 | # for the lifetime of the browser session. Defaults to -1.
119 | #serverSessionTimeout: 86400 # Server-side expiration time in secods for REST API/Web UI sessions. Can
120 | # be specified independently of the client-side timeout, allowing sessions
121 | # to be invalidated on the server before they are expired by the client. This
122 | # useful in particular when the client-side session lifetime is set to that
123 | # of the browser session ('BROWSER_EXIT' special
124 | #serverSessionTimeoutCheckInterval: 3600
125 | #ldapUrl: ldap[s]://server[:port]/baseDN[?[uid_attr][?[base|sub]]] # ldap authentication url
126 | #ldapBindDn: ldap::adminAuthentication # Resource alias or full bind dn
127 | #ldapBindPassword: ldap::adminAuthentication # Resource alias or bind password
128 | # Authorization
129 | #authorizationEnabled: false # Clients web user role will be authorized when set true
130 | #authorizationMode: 'file' # Set authorization mode. Choose 1 of : ldap, file
131 | #ldapAuthorizeUrl: ldap[s]://server[:port]/baseDN[?[attr_name][?[base|sub]][?filter_expr]] # ldap authorization search url
132 | #ldapCheckServerIdentity : true # Disables hostname verification of ldaps server when set to false
133 |
134 | Security:
135 | LdapAuthorizeAttributeToRoleMap:
136 | # When 'authMode' is ldap, set the mapping from a matched LDAP authorization attribute, as
137 | # configured in 'ldapAuthorizeUrl' to the ACE web user role name
138 | # e.g. map the following LDAP group DNs to web user roles 'adminRole', 'viewRole'
139 | #'cn=admins,cn=group,ou=ace': 'adminRole'
140 | #'cn=monitors,cn=group,ou=ace': 'viewRole'
141 | Permissions:
142 | # Set Admin Security Authorization file permissions by web user role using 'read+:write+:execute+' , or 'all+'
143 | # '+' grants permission, '-' denies permission
144 | # e.g. define the following web user roles 'viewRole' and 'adminRole'
145 | #viewRole: 'read+:write-:execute-'
146 | #adminRole: 'all+'
147 | DataPermissions:
148 | # Set Admin Security Authorization file permissions for Record and Replay web user role using 'read+:write+:execute+' , or 'all+'
149 | # '+' grants permission, '-' denies permission. Record and Replay roles also require 'read+' permission to be defined
150 | # in the Permissions section above.
151 | # e.g. define the following web user roles 'dataViewer', 'dataReplayer' and 'adminRole'
152 | #dataViewer: 'read+:write-:execute-'
153 | #dataReplayer: 'read+:write-:execute+'
154 | #adminRole: 'all+'
155 |
156 | Defaults:
157 | #defaultApplication: '' # Name a default application under which independent resources will be placed
158 | #policyProject: 'DefaultPolicies' # Name of the Policy project that will be used for unqualified Policy references, default is 'DefaultPolicies'
159 | Policies:
160 | # Set default policy names, optionally qualified with a policy project as {policy project}:name
161 | #monitoringProfile: '' # Default Monitoring profile
162 | Credentials:
163 | # Names a default credential name to be used when a more specific credential is not available for the credential type.
164 | #httpproxy: ''
165 | #jdbc: ''
166 | #kafka: ''
167 | #kerberos: ''
168 | #ldap: ''
169 | #odbc: ''
170 | #mq: ''
171 | #wsrr: ''
172 |
173 | Events:
174 | OperationalEvents: # Message flow and Resource statistics plus Workload management
175 | MQ:
176 | #policy: '' # Specify a {policy project}:policy if not using 'defaultQueueManager'
177 | #enabled: false # Set true or false, default false
178 | #format: '' # Set string or none
179 | #publishRetryInterval: 0 # Set the retry interval (in milliseconds), to pause all publications and retry, when publication failures are causing serious delay to the transaction.
180 | MQTT:
181 | #policy: '' # Specify a {policy project}:policy
182 | #enabled: false # Set true or false, default false
183 | #format: '' # Set string or none
184 | #publishRetryInterval: 0 # Set the retry interval (in milliseconds), to pause all publications and retry, when publication failures are causing serious delay to the transaction.
185 | BusinessEvents: # Monitoring events
186 | MQ:
187 | #policy: '' # Specify a {policy project}:policy if not using 'defaultQueueManager'
188 | #enabled: false # Set true or false, default false
189 | #format: '' # Set string or none
190 | #outputFormat: 'xml' # Set comma separated list of one or more of : json,xml. Defaults to 'xml'
191 | #publishRetryInterval: 0 # Set the retry interval (in milliseconds), to pause all publications and retry, when publication failures are causing serious delay to the transaction.
192 | MQTT:
193 | #policy: '' # Specify a {policy project}:policy
194 | #enabled: false # Set true or false, default false
195 | #format: '' # Set string or none
196 | #outputFormat: 'xml' # Set comma separated list of one or more of : json,xml. Defaults to 'xml'
197 | #publishRetryInterval: 0 # Set the retry interval (in milliseconds), to pause all publications and retry, when publication failures are causing serious delay to the transaction.
198 | ELK:
199 | #enabled: false # Set true or false, default false
200 | #outputFormat: 'json' # Set json, default json
201 | #elkConnections: '' # Name of the ELK connection to use, for example 'elkConnection1', must be defined in the ELKConnections section below.
202 | File:
203 | #enabled: false # Set true or false, default false
204 | #outputFormat: 'json' # Set comma separated list of one or more of : json,xml. Defaults to 'json'
205 |
206 | Monitoring:
207 | MessageFlow:
208 | #publicationOn: 'inactive' # choose 1 of : active|inactive, default inactive
209 | # Ensure Events.BusinessEvents.MQ|MQTT is set
210 | #eventFormat: 'MonitoringEventV2' # When BusinessEvents.-.outputFormat is xml set MonitoringEventV2 or WMB, default MonitoringEventV2
211 |
212 | Statistics:
213 | # All applications and message flows will inherit the Snapshot and Archive values set here, unless they have been set
214 | # to a specific value other than inherit via the WebUI, mqsichangeflowstats command, Toolkit or apiv2 REST
215 | # Notes
216 | # - values here can be overridden by 'overrides/server.conf.yaml'
217 | # - to publish on MQ or MQTT, also configure Events.OperationalEvents, and set outputFormat to include json and/or xml
218 | # - to display in the WebUI Snapshot.outputFormat must include json; nodeDataLevel needs to be set to basic or advanced
219 | Snapshot:
220 | publicationOn: 'active' # choose 1 of : active|inactive, explictly set 'active'. If unset will default to 'inactive'
221 | #accountingOrigin: 'none' # choose 1 of : none|basic, default none
222 | nodeDataLevel: 'basic' # choose 1 of : none|basic|advanced, explictly set 'basic'. If unset will default to 'none'
223 | outputFormat: 'json' # choose comma separated list of one or more of : csv,json,xml,usertrace. Explictly set to 'json' for WebUI. If unset will default to ''
224 | #threadDataLevel: 'none' # choose 1 of : none|basic. If unset will default to 'none'
225 | Archive:
226 | #archivalOn: 'inactive' # choose 1 of : active|inactive, default inactive
227 | # Also set Events.OperationalEvents.MQ|MQTT for outputFormat xml to be published to MQ/MQTT
228 | #accountingOrigin: 'none' # choose 1 of : none|basic
229 | #majorInterval: 60 # Sets the interval in minutes at which archive statistics are published
230 | #nodeDataLevel: 'none' # choose 1 of : none|basic|advanced
231 | #outputFormat: 'usertrace' # comma separated list of : csv,xml,usertrace
232 | #threadDataLevel: 'none' # choose 1 of : none|basic
233 | #csv: # The csv section is used when either Snapshot or Archive Statistics have an output format of csv.
234 | #filePath: '' # The file path where the statistics records will be written to when there is an outputFormat of csv.
235 | # If this is set to '' then the default path is /config/common/stats.
236 | #numberOfFiles: 4 # The maximum number of files that message flow statistics file writing can rotate through.
237 | #sizeOfFile: 25 # The maximum size in MB of a single file that a statistics file can use before rotating to the next file.
238 | #averages: true # When set to true the average values for each statistic are included in the written output.
239 | Resource:
240 | reportingOn: true # choose 1 of : true|false, Set to 'true'. If unset will default to 'false'
241 | #outputFormat: 'csvFile' # choose 'csvFile', or 'file' for IIB v10 compatibility. if unset will default to ''
242 | #filePath: '' # The file path where the statistics records will be written to when csvFile or file outputFormat is used.
243 | # If this is set to '' then the default path is /config/common/resourceStats.
244 | #numberOfFiles: 4 # The maximum number of files that resource statistics file writing can rotate through.
245 | #sizeOfFile: 25 # The maximum size in MB of a single file that a statistics file can use before rotating to the next file.
246 | #averages: true # When set to true the average values for each statistic are included in the written output.
247 |
248 | UserExits:
249 | #activeUserExitList: '' # Specify the name of an installed user exit to activate.
250 | #userExitPath: '' # Specify the path or paths containing the user exits to be loaded. Multiple paths should be separated by colons on Unix and semi-colons on Windows.
251 |
252 | BrokerRegistry:
253 | #brokerKeystoreType: 'JKS' # Key store type
254 | #brokerKeystoreFile: '' # Location of the broker key store
255 | #brokerKeystorePass: 'brokerKeystore::password' # Resource alias containing the key store password
256 | #brokerTruststoreType: 'JKS' # Trust store type
257 | #brokerTruststoreFile: '' # Location of the broker trust store
258 | #brokerTruststorePass: 'brokerTruststore::password' # Resource alias containing the trust store password
259 | #brokerCRLFileList: ''
260 | #brokerEnableCRLDP: ''
261 | #allowSSLv3: ''
262 | #allowSNI: ''
263 | #reenableTransportAlgorithms: ''
264 | #reenableCertificateAlgorithms: ''
265 | #mqCCDT: ''
266 | #httpConnectorPortRange: ''
267 | #httpsConnectorPortRange: ''
268 | #brokerKerberosConfigFile: ''
269 | #brokerKerberosKeytabFile: ''
270 | #mqKeyRepository: ''
271 |
272 | ResourceManagers:
273 | JVM:
274 | #jvmVerboseOption: 'none'
275 | #jvmDisableClassGC: ''
276 | #jvmEnableIncGC: ''
277 | #jvmShareClasses: ''
278 | #jvmNativeStackSize: -1
279 | #jvmJavaOSStackSize: -1
280 | #jvmMinHeapSize: 33554432 # minimum JVM heap size in bytes (32MB)
281 | #jvmMaxHeapSize: 268435456 # maximum JVM heap size in bytes (256MB)
282 | #jvmDebugPort: 0 # Set non-zero to activate JVM debug port for Toolkit debugging
283 | #jvmSystemProperty: ''
284 | #keystoreType: '' # JVM key store type
285 | #keystoreFile: '' # JVM location of the key store
286 | #keystorePass: '' # JVM resource alias containing the key store password
287 | #truststoreType: ' # JVM trust store type
288 | #truststoreFile: '' # JVM location of the trust store
289 | #truststorePass: '' # JVM resource alias containing the trust store password
290 | #crlFileList: ''
291 | #enableCRLDP: ''
292 | #kerberosConfigFile: ''
293 | #kerberosKeytabFile: ''
294 | #networkaddressCacheTtl: 60 # Default DNS cache timeout, applied to both networkaddress.cache.ttl and
295 | # sun.net.inetaddr.ttl unless either are already set.
296 |
297 | HTTPConnector:
298 | #ListenerPort: 0 # Set non-zero to set a specific port, defaults to 7800
299 | #ListenerAddress: '0.0.0.0' # Set the IP address for the listener to listen on. Default is to listen on all addresses
300 | #AutoRespondToHTTPHEADRequests: false # Automatically respond to HTTP HEAD requests without invoking the message flow. Set to true or false, default is false.
301 | #ServerName: '' # Set the value to be returned in the 'Server' HTTP response header.
302 | #CORSEnabled: false # Set the value to true to make the listener respond to valid HTTP CORS requests
303 | #CORSAllowOrigins: '*'
304 | #CORSAllowCredentials: false
305 | #CORSExposeHeaders: 'Content-Type'
306 | #CORSMaxAge: -1
307 | #CORSAllowMethods: 'GET,HEAD,POST,PUT,PATCH,DELETE,OPTIONS'
308 | #CORSAllowHeaders: 'Accept,Accept-Language,Content-Language,Content-Type,Authorization'
309 | #DuplicateHeaderAction: 'combineHeaders' # Set to 'combineHeaders' (the default) for standard RFC 2616 behaviour; for compatibility with
310 | # IIB v10 and earlier, set to 'useLastHeader' so that the HTTPInput header value is the taken
311 | # from the last header when duplicate headers are detected. Other valid settings are 'useFirstHeader'
312 | # and 'errorOnDuplicate'. Note that this value must match any value specified for HTTPSConnector below.
313 | HTTPSConnector:
314 | #ListenerPort: 0 # Set non-zero to set a specific port, defaults to 7843
315 | #ListenerAddress: '0.0.0.0' # Set the IP address for the listener to listen on. Default is to listen on all addresses
316 | #ReqClientAuth: true # Controls whether to request for client certificate during tls handshake.
317 | #RejectUnauthorizedClient: true # Controls whether to allow connection without client certificate when ReqClientAuth is set.
318 | #KeyAlias: ''
319 | #KeyPassword: 'P4s5w0rd' # Set the password or alias to the password of the key
320 | #KeystoreFile: '/path/to/keystore.jks'
321 | #KeystorePassword: 'P4s5w0rd' # Set the password or alias to the password of the keystore
322 | #KeystoreType: 'JKS' # Set the keystore type, can be 'JKS' or 'P12'. Default is JKS.
323 | #TruststoreFile: /path/tp/truststore.jks
324 | #TruststorePassword: 'P4s5w0rd' # Set the password or alias to the password of the keystore
325 | #TruststoreType: 'JKS' # Set the truststore type, can be 'JKS' or 'PEM'. Default is JKS.
326 | #CipherSpec: '' # Comma-separated list of allowable ciphers. IANA cipher names are accepted.
327 | # Example: 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384'.
328 | #AutoRespondToHTTPHEADRequests: false # Automatically respond to HTTP HEAD requests without invoking the message flow. Set to true or false, default is false.
329 | #ServerName: '' # Set the value to be returned in the 'Server' HTTP response header.
330 | #CORSEnabled: false # Set the value to true to make the listener respond to valid HTTP CORS requests
331 | #CORSAllowOrigins: '*'
332 | #CORSAllowCredentials: false
333 | #CORSExposeHeaders: 'Content-Type'
334 | #CORSMaxAge: -1
335 | #CORSAllowMethods: 'GET,HEAD,POST,PUT,PATCH,DELETE,OPTIONS'
336 | #CORSAllowHeaders: 'Accept,Accept-Language,Content-Language,Content-Type,Authorization'
337 | #EnableTLSTrace: false # Enables tracing of TLS handshake messages to the console
338 | #AllowRenegotiation: true # Controls whether renegotiation of connections is allowed when using TLSv1.2 and earlier.
339 | #DuplicateHeaderAction: 'combineHeaders' # Set to 'combineHeaders' (the default) for standard RFC 2616 behaviour; for compatibility with
340 | # IIB v10 and earlier, set to 'useLastHeader' so that the HTTPInput header value is the taken
341 | # from the last header when duplicate headers are detected. Other valid settings are 'useFirstHeader'
342 | # and 'errorOnDuplicate'. Note that this value must match any value specified for HTTPConnector above.
343 | ActivityLogManager:
344 | #activityLogEnabled: true
345 | #defaultLogSizePerThread: 1000
346 | #minSeverityLevel: 'INFO' # choose one of INFO|WARN|ERROR
347 |
348 | RecordedMessageManager:
349 | #recordedMessagePath: '' # Set the directory to store recorded messages
350 | #recordAllMessages: false # Set to 'true' to enabling recording of messages from all message flows
351 |
352 | DatabaseConnectionManager:
353 | #maxConnectionAge: 60 # Set to -1 to never release connections
354 | #useDefaultSchemaForStoredProcedures: true
355 |
356 | SocketConnectionManager:
357 | #allowedAuthTypes: 'platform default' # Sets the allowable authentication mechanisms when responding to a HTTP 401 response during a HTTP based request.
358 | # Multiple values can be specified and should be semi-colon separated.
359 | # Valid values are 'basic', 'ntlm', 'negotiate', 'nego2', 'all', 'iwa', 'platform default'.
360 | # Unix only supports 'basic'. Windows supports all values.
361 | # 'platform default' is expanded to "basic" on unix and "basic; ntlm; negotiate; nego2" on windows.
362 | # 'iwa' is expanded to mean "ntlm; negotiate; nego2"
363 | #allowNtlmNegotiation: true # Set to 'false' to prevent NTLM from being negotiated with the SPNEGO and SPNEGO-2 protocols.
364 | #negotiateMutualAuth: false # Set to 'true' if you require mutual authentication when the Kerberos protocol is negotiated.
365 | #preemptiveAuthType: '' # Configure the authentication mechanism to use preemptively when making a HTTP based request.
366 | # Valid values are '', 'basic', 'ntlm', 'negotiate', 'nego2'.
367 | # Set to the default value of '' to disable preemptive authentication.
368 | #tcpNoDelay: true #Set the tcpnodelay setting for non-SSL sockets created for HTTP based requests.
369 | # Valid values are 'default' 'platform default' 'true' 'false' 'active' 'inactive'
370 | # To disable tcpNoDelay use 'false' or 'inactive'
371 | # To enable tcpNoDelay use 'true' or 'active'
372 | # To use the default setting for the current platform use 'default' or 'platform default'
373 | #tcpNoDelaySSL: true #Set the tcpnodelay setting for SSL sockets created for HTTP based requests.
374 | # Valid values are 'default' 'platform default' 'true' 'false' 'active' 'inactive'
375 | # To disable tcpNoDelay use 'false' or 'inactive'
376 | # To enable tcpNoDelay use 'true' or 'active'
377 | # To use the default setting for the current platform use 'default' or 'platform default'
378 | #defaultLocalAddressForBind: '' # Set the default local address that will be used when binding newly created sockets.
379 | #tcpTLSBufferSize: 32768 # Set the receive buffer size to use for TLS operations
380 | #allowInsecureConnections: false # Disables certificate checking for TLS connections; use with caution!
381 |
382 | ContentBasedFiltering:
383 | #cbfEnabled: false # Set to true to enable content-based filtering
384 | #evaluationThreads: 1 # Configures the number of evaluation threads available for content-based filtering
385 | #validationThreads: 1 # Configured the number of validation threads available for content-based filtering
386 |
387 | FTEAgent:
388 | #coordinationQMgr: '' # Set to the name of the coordination Queue Manager to be used by the embedded FTE Agent if not using the default Queue Manager as the coordination Queue Manager.
389 |
390 | ParserManager:
391 | #parserWarningThreshold: 1000 # Sets the frequency, based on parsers per thread, with which a parser creation warning message is written to activity log
392 | #fieldWarningThreshold: 100000 # Sets the frequency, based on fields per parser, with which a field creation warning message is written to activity log
393 | #freeMasterParsers: false # Sets whether parsers will be freed after each input message has been processed.
394 |
395 | ESQL:
396 | #castWithAccurateZoneOffsets: false # Set ESQL to use the time zones that are stored with Time, Timestamp, and Date data types when you cast to and from GMTTime and GMTTimestamp data types.
397 | #alwaysCastToLocal: true # Set ESQL to convert the specified time into the local time zone of the integration node/server when a String is cast to a Time or TimeStamp with a Format.
398 | #useICUStringManipulation: false # Set ESQL to provide full support for UTF-16 surrogate pairs.
399 | #allowPropagateWithSharedLockHeld: false # Set ESQL to propagate the message even if a lock is held on a shared variable.
400 | #performanceStatsReportingOn: false # Set ESQL to capture performance statistics
401 | #performanceStatsOutputPath: '' # Sets the path that ESQL performance statistics are captured to
402 |
403 | XMLNSC:
404 | #invalidCharacterEscapeType: none # Sets the way XMLNSC serialization renders characters that do not exist in the target code page. Choose one of none|hex|decimal|unicode
405 | # The value of none will throw an exception for an invalid character.
406 | # Choosing hex|decimal|unicode will write the character either in hex, decimal or unicode character entity format.
407 | #storeTimeZoneInValue: true # Sets XMLNSC parsers to store timezone information as part of the Datetime value that is parsed.
408 | #writeTimeZone: whenSet # Sets XMLNSC parsers to write timezone information when an XML document is serialized. Choose one of never|whenSet|nonUTC|nonLocal|always
409 | # When the value of never is set timezones will not be written in the XML document.
410 | # When the value of always is set timezones will always be written where the local timezone will be used if required.
411 | # whenSet can be used to only write timezones if they were parsed from an XMLNSC message
412 | # The values of nonUTC and nonLocal can be used to avoid writing timezones for local or UTC timezones.
413 | #cdataFieldMerging: false # Sets XMLNSC parsers to represent CDATA sections in one message tree field.
414 |
415 | JSON:
416 | #escapeMode: strict # Specifies whether the JSON parser escapes forward slash characters when it is serializing a message tree. Choose one of preserveForwardSlashes|strict. Default is strict if unset.
417 | # To specify that the JSON parser must leave forward slashes unchanged in the output, set the value to preserveForwardSlashes.
418 | #allowScientificNotation: true # Specifies whether the JSON parser serializes numerical values using scientific notation (for example 1.10E+1). Default is true if unset.
419 | # To specify that the JSON parser should use scientific notation set the value to true
420 | # To specify that the JSON parser should use decimal notation set the value to false
421 | #disableSchemaLookupExceptionWhen: '' # Comma-separated list of events that will not throw exceptions when validation is requested of the JSON parser and a JSON schema cannot be loaded.
422 | # When an event in the list occurs, no exception will be thrown and instead the JSON parser will continue to parse but without validation.
423 | # Valid events are 'notSpecified', 'notFound'. If unset will default to '' which means that no exceptions are disabled.
424 | # 'notSpecified' disables the BIP5736 exception that is thrown if a JSON schema name is not supplied.
425 | # 'notFound' disables the range of exceptions (BIP1318, BIP1312, BIP5737, BIP5738, BIP5739) that are thrown if a JSON schema name is supplied, but cannot be resolved.
426 | # This option is available to allow behaviour from previous releases to be maintained.
427 |
428 | MQConnectionManager:
429 | #enableBrowseForAllInputQueues: false # Sets the MQOO_BROWSE open option when all input queues are opened.
430 | #expiryCompensation: false # Set to true to reduce the MQOutput MQPUT MQMD.Expiry by the amount of message flow processing time.
431 | #useIsolatedBindings: false # Sets the MQCNO_ISOLATED_BINDING connection option when the MQ connection is created.
432 | #queueCacheMaxSize: 10 # Sets the maximum number of a queue handles that a message flow thread can maintain.
433 | #queueCacheTimeout: 300 # Sets the time (in seconds) that a freed queue handle will stay cached until it removed from the cache.
434 | #queueMaxTimeout: 60 # Sets the maximum time (in seconds) that an MQ handle is idle before it is considered for closing.
435 |
436 | XPathCache:
437 | #minimumSize: 6000 # controls the minimum number of compiled XPath expressions that can be stored in the XPath cache.
438 | #maximumSize: 10000 # controls the maximum number of compiled XPath expressions that can be stored in the XPath cache
439 | #entryWarningThreshold: 1000 # controls how frequently activity log messages are emitted by the XPath cache as it grows in size.
440 | #mode: true # Set to false to disable XPath caching
441 |
442 | AsyncHandleManager:
443 | #asyncHandleTimeoutSeconds: -1 # Sets the expiry time (in seconds) when the reply identifiers expire for asynchronous handles used for HTTP, SOAP, SAP and Callable flow nodes.
444 |
445 | GlobalCache:
446 | #cacheOn: false # Set to true to enable Global Cache functionality
447 | # When using Global Cache it is advisable to change your jvmMinHeapSize and jvmMaxHeapSize depending on
448 | # the number of live objects in the heap, complexity of live objects in the heap and number of available cores.
449 | # see https://www.ibm.com/support/knowledgecenter/SSTVLU_8.6.0/com.ibm.websphere.extremescale.doc/cxsjvmtune.html
450 | #cacheServerName: '' # The name of this cache server component (a cache server component can be a catalog and/or a container); it must be unique in your global cache system
451 | #catalogServiceEndPoints: '' # Comma-separated list of hostnames and ports for the catalog servers to use, e.g. 'localhost:2800'
452 | #catalogDomainName: '' # Name of the shared global cache domain; this value should be shared by all catalog servers in the same domain, e.g. 'WMB_MyCacheDomain'
453 | #catalogClusterEndPoints: '' # Comma-separated list of catalog server connection details in the format 'cacheServerName:catalogCacheServerHost:HAPort:clientPort'
454 | # If this is a catalog server, cacheServerName should match the value above, and if not, it will be the value used on the integration server hosting it
455 | # The list should be in the same order for all catalog and container servers which are interacting together in the same domain
456 | #clientsDefaultToSSL: false # Set to true to enable SSL for any client connections to the cache servers
457 | #deploymentPolicyCustomFile: '' # Override the deployment policy file (default is '/server/cachesupport/config/deployment.xml')
458 | #enableCatalogService: false # Set to true to launch a catalog service cache server component in this integration server
459 | #enableContainerService: false # Set to true to launch a container service cache server component in this integration server
460 | #enableJMX: true # Allow admin access to this container service via JMX
461 | #listenerHost: '' # Comma-separated list of hostnames for this cacheServer component, e.g. 'localhost,myserver.mycompany.com'
462 | #listenerPort: 0 # Port number this cache server listens on; it must be unique on this machine
463 | # Four consecutive ports are assigned, e.g. 2800 for catalogCacheServerListenerPort, 2801 for clientPort, 2802 for JMXServicePort, 2803 for HAPort
464 | #objectGridCustomFile: '' # Override the ObjectGrid file (default is '/server/cachesupport/config/objectgrid_xio.xml')
465 | #overrideTraceSpec: '' # Set a trace level for the cache server components, e.g. ObjectGrid*=event=enabled
466 | #sslAlias: '' # SSL Alias to use for the cache server components
467 | #sslProtocol: '' # SSL Protocol to use for SSL connections eg. default is "TLSv1.2"
468 | #defaultCacheType: 'local' # Should default maps created use the Global Cache functionality or a Local Cache, defaults to global
469 |
470 | ExceptionLog:
471 | #enabled: false # Enables logging of exceptions
472 | #exceptionLog: '[iib.system-common-log-dir]/[iib.system-node-label].[iib-system-server-label].exceptionLog.txt' # The location in which the rotating exception log file should be written
473 | # This path must already exist and be writeable by the IntegrationServer process.
474 | #exceptionLogFileSize: 25 # The maximum size in MB of a single file that the exception log can use.
475 | #exceptionLogFileCount: 4 # The maximum number of files that the exception log can rotate through.
476 | #includeFlowThreadReporter: false # Toggles whether exception in the exception log include a flow stack and history from the flow thread reporter
477 | #showNestedExceptionDetails: false # Toggles whether nested exceptions are shown by default in the exception log
478 |
479 | GroupDirector:
480 | #managerType: default # Controls which implementation of in memory aggregation should be used. Valid values are: default
481 | #timeoutThreads: 1 # Sets the number of threads which will process timed-out groups per set of group nodes
482 | #timeoutInterval: 100 # Sets the duration in milliseconds between the end of one timeout processing run and the start of the next
483 | #unknownThreads: 1 # Sets the number of threads which will process unknown messages per set of group nodes
484 | #unknownInterval: 100 # Sets the duration in milliseconds between the end of one unknown message processing run and the start of the next
485 | #maximumNumberOfGroups: -1 # Limits the total number of groups that can be active at any one time. -1 means no limit.
486 | replyFormat: mixed-content # Controls which method should be used to store messages that are received by a GroupGather node before being output
487 | # in a GroupComplete node. Valid values are:
488 | # * basic - Serialize the entire message tree based on the values in the Properties parser. This was the default
489 | # behaviour before 11.0.0.6
490 | # * mixed-content - Serialize each first level child of the message root individually, using the subtree's own parser
491 | # to decide what CCSID and encoding to use. This requires that each individual subtree be valid
492 | # according to the rules of that parser. Subtrees that do not serialize will not be stored.
493 | # * toolkit - Serialize the message using the same method used by the toolkit for messages in the flow exerciser.
494 | # This is capable of representing arbitrary message trees accurately but is slower.
495 | MRM:
496 | #messageSetLoadStrategy: onServerStart # Controls when MRM message sets are loaded into the MRM cache. Valid values are:
497 | # * onServerStart - The default where all MRM message sets are loaded into the MRM cache when the server is started.
498 | # * onFirstMessage - A message set is loaded into the MRM cache when it is first referenced by message processing.
499 |
500 | FileNodes:
501 | #allowReadOnlyInputFiles: false # When set to true this will allow files to be processed by the FileInput node if the file has filesystem permissions set to
502 | # read-only for the ACE user. The parent directory must still be writeable.
503 |
504 | #disableLocking: false # When set to true prevents the File Nodes from obtaining operating system level locks on files before processing them.
505 | # This can be helpful to allow files to be processed on filesystems that do not support POSIX file locking semantics.
506 | # Note that file locking is used to prevent multiple Integration Servers or Integration Nodes from reading the same input file
507 | # when processing from the same directory. Therefore if disableLocking is set to true the user must ensure that only a
508 | # single Integration Server reads from the specified input directory.
509 |
510 |
511 | #avoidWriteLockCheck: false # When set to true bypasses the write lock check on an input file before it is processed by the FileInput node. For network
512 | # filesystems that cache file attributes between calls this can prevent the accumulation of zero length input file in the input
513 | # directory.
514 |
515 | #maxDirectoryDepth: 50 # The maximum depth of directory structure a FileInput or FileExists Node will traverse below the input directory.
516 |
517 | #rootDirectory: '' # Allows a base directory to be set such that relative paths set in the FileInput or FileExists Node's directory property will be constructed
518 | # relative to this base path.
519 |
520 | #allowFileSystemRoot: false # When set to true will allow the FileInput or FileExists node to read and delete processed files from the root directory of the operating system.
521 |
522 | #maxRecordLength: 104857600 # The maximum size of record that will be read by the FileInput or FileRead nodes. In the case where the Record Detection property is
523 | # set to "Whole File" this is the maximum length of file that may be processed.
524 |
525 | ConnectorProviders:
526 | #sharedConnectorIdleTimeout: 60 # length of time (in seconds) after which a ConnectorProvider connection for an idle message flow is released. The value can be greater than zero or -1 to mean a connection never times out.
527 | SAPConnectorProvider:
528 | #jarsURL: 'default_Path' # Set to the absolute path containing the SAP JCo JARs.
529 | #nativeLibs: 'default_Path' # Set to the absolute path containing the SAP JCo libraries.
530 | #enableStandaloneIDocParsing: false # Set to true to enable using the DataObject parser in SAP ALE IDoc mode without having adapter components deployed
531 | SiebelConnectorProvider:
532 | #jarsURL: 'default_Path' # Set to the absolute path containing the Siebel JARs.
533 | #nativeLibs: 'default_Path' # Set to the absolute path containing the Siebel libraries.
534 | #siebelPropertiesURL: '' # Set to the location of the siebel.properties file.
535 | PeopleSoftConnectorProvider:
536 | #jarsURL: 'default_Path' # Set to the absolute path containing the PeopleSoft JARs.
537 | JDEdwardsConnectorProvider:
538 | #jarsURL: 'default_Path' # Set to the absolute path containing the JDEdwards JARs.
539 | #nativeLibs: 'default_Path' # Set to the absolute path containing the JDEdwards libraries.
540 | #WSRRConnectorProvider: # Requires the optional WSRR component install
541 | #endpointAddress: 'https://host:9443/WSRR8_0/services/WSRRCoreSDOPort' # WSRR server endpoint url
542 | #needCache: true # enable WSRR cache
543 | #predefinedCacheQueries: '' # semicolon-separated XPath querys to initialize WSRR cache at start-up
544 | #enableCacheNotification: false # enable WSRR cache notification
545 | #locationJNDIBinding: 'iiop://host:2809' # WSRR cache WAS JMS provider JNDI bindings url
546 | #MyCustomProvider:
547 | #connectorClassName: 'com.company.connector.hyper.LoopConnectorFactory'
548 | #jarsURL: 'installation_directory/runtime/LoopConnector' # absolute path
549 | #nativeLibs: 'default_Path' # absolute path or special value "default_Path"
550 |
551 | BusinessTransactionDefinitions:
552 | #BusinessTransactionDefinitionTemplate:
553 | #policy: '{PolicyProject}:BTDPolicy' # The fully qualified name of the BusinessTransactionDefinition Policy
554 | #store: 'BTMDataStore' # The name of the DataStore this BusinessTransactionDefinition will use to propagate events.
555 | #enabled: true # Set to true to enable this BusinessTransactionDefinition.
556 |
557 | # Record and Replay requires a default queue manager to be associated with the integration server.
558 | RecordReplay:
559 | #recordReplayEnabled: true # Set to true to enable all Record and Replay functionality. Default is true.
560 | Stores:
561 | # Copy and customize the StoreTemplate section for each Record and Replay store that you want to create.
562 | # Rename the section title 'StoreTemplate' to be the name of your Record and Replay store. This name must be unique.
563 | #StoreTemplate:
564 | #dataSource: '' # The ODBC data source name (DSN) that is used to connect to the database that stores the recorded data. This property is mandatory and has no default value.
565 | #schema: '' # The schema name that owns the database tables that are used for storing recorded data. This property has no default value. If no value is set, either the default database schema is used (if there is one), or no schema is used, depending on the database.
566 | #storeMode: 'all' # The mode for the store to operate in. Valid values are record, view, and all. Default is all.
567 | #queue: 'SYSTEM.BROKER.DC.RECORD' # The name of the queue to which event messages will be published before being recorded to the database. The queue must exist.
568 | # Default is SYSTEM.BROKER.DC.RECORD. The queue SYSTEM.BROKER.DC.RECORD must be created manually if you use Record and Replay. The same queue can be specified for multiple Record and Replay stores.
569 | # Change the value of this property to distribute the data from multiple sources across multiple queues.
570 | #backoutQueue: 'SYSTEM.BROKER.DC.BACKOUT' # The name of the backout queue used by the recorder. Messages that cannot be processed (for example, because the specified database does not exist) are sent to this queue.
571 | # Default is SYSTEM.BROKER.DC.BACKOUT. The queue SYSTEM.BROKER.DC.BACKOUT must be created manually if you use Record and Replay. If a data capture source refers to this data capture store, and no backoutQueue has been specified, an error occurs. The same backoutQueue can be specified for multiple Record and Replay stores.
572 | #useCoordinatedTransaction: false # Specifies whether recorder transactions are globally coordinated across IBM MQ and database resources. Default is false.
573 | #commitCount: 10 # The number of input messages that are processed on a recorder thread before a sync point is taken. Default is 10.
574 | #threadPoolSize: 10 # The number of threads that are used by the recorder to process the monitoring topic subscriptions. Default is 10.
575 | #commitIntervalSecs: 5 # The time interval (in seconds) at which a commit is taken, if the commitCount property is greater than 1 but the number of messages processed has not reached the value of the commitCount property. Default is 5.
576 | # Like the StoreTemplate, copy, configure and rename the BTMDataStore section for each Record and Replay store you want to use to process Business Transaction Monitoring (BTM) Events.
577 | #BTMDataStore:
578 | #dataSource: ''
579 | #schema: ''
580 | #storeMode: 'all'
581 | #queue: 'SYSTEM.BROKER.DC.RECORD'
582 | #backoutQueue: 'SYSTEM.BROKER.DC.BACKOUT'
583 | #useCoordinatedTransaction: false
584 | #commitCount: 10
585 | #threadPoolSize: 10
586 | #commitIntervalSecs: 5
587 | Sources:
588 | # Copy and customize the SourceTemplate section for each Record and Replay source that you want to create.
589 | # Rename the section title 'SourceTemplate' to be the name of your Record and Replay source. This name must be unique.
590 | # If you have set 'eventFormat: MonitoringEventV2' for monitoring events, the value of the topic property takes the form: $SYS/Broker/integration_server/Monitoring/integrationServer/application/library/msgFlow ,where integrationServer, application, library, and msgFlow represent the names of your integration server, application, library, and message flow. The library name is optional and will be required only if the message flow is inside a library.
591 | # If you have set 'eventFormat: WMB' for monitoring events, the value of the topic property takes the form: $SYS/Broker/integration_server/Monitoring/integrationServer/msgFlow, where integrationServer, and msgFlow represent the names of your integration server, and message flow.
592 | # You can use wildcards in the topic property; for more information about using wildcards in topic strings, see "Special characters in topics" topic in the Knowledge Center.
593 | # A durable subscription is created for each source and is created with a subid of IntegrationServer:[servername]:[sourcename]. If multiple independent integration servers share the same queue manager, you must ensure that there is no clash in server and source name.
594 | # If you delete a source, you must manually delete the durable subscription for that source to avoid messages being published to the Record and Replay store's queue.
595 | #SourceTemplate:
596 | #topic: '$SYS/Broker/integration_server/Monitoring/[servername]/[applicationname]/[flowname]' # Sets the monitoring event topic string that is used when record and replay subscribes to monitoring events, see above.
597 | #store: 'StoreTemplate' # The Record and Replay store that is used to configure record and replay for the message flows specified in the topic property. Multiple instances of Record and Replay source can refer to one instance of a Record and Replay store.
598 |
599 | Destinations:
600 | # Copy and customize the MQDestinationTemplate section for each Record and Replay destination that you want to create.
601 | # Rename the section title 'MQDestinationTemplate' to be the name of your Record and Replay destination. This name must be unique.
602 | # For destinations of type WMQDestination, the value of the property takes the form: wmq:/msg/queue/queueName@queueManagerName, where queueName identifies the destination queue and queueManagerName identifies the queue manager that owns the queue. The queue manager name is optional, with the default queue manager (local or remote) being used if it is not specified. Only local queue managers can be specified by name.
603 | #MQDestinationTemplate:
604 | #endpointType: 'WMQDestination' # The type of the target destination to which messages will be replayed. The default is WMQDestination, which is the only valid value.
605 | #endpoint: 'wmq:/msg/queue/[QUEUE]@[QMGR]' # The destination to which you want to replay data. This property is mandatory. The default value is wmq:/msg/queue/[QUEUE]@[QMGR]. You must replace [QUEUE] and [QMGR] with the names of your queue and queue manager, or leave the queue manager unspecified (wmq:/msg/queue/[QUEUE]) to use the default queue manager (local or remote).
606 |
607 | Credentials:
608 | ServerCredentials:
609 | # Optionally define credentials for use by the Integration Server.
610 | # Customize the CredentialType section for each type of credential that you want to create credentials for.
611 | # You must define each CredentialType at most once.
612 | # Each CredentialName must be unique within the CredentialType.
613 | # Each CredentialType has a set of allowable properties which are a subset of username, password, passphrase, apiKey, clientId, clientSecret, sshIdentityFile.
614 | # For full details of allowed CredentialTypes and their properties, refer to the Knowledge Center.
615 | # For example:
616 | # jdbc:
617 | # USERDB:
618 | # username: 'user1'
619 | # password: 'myPassw0rd'
620 | # OTHERDB:
621 | # username: 'user2'
622 | # password: 'myOtherPassw0rd'
623 | # rest:
624 | # endpoint1:
625 | # username: 'user1'
626 | # password: 'myPassw0rd'
627 | # apiKey: 'anApiKeyWhichMightBeALongOne'
628 | #
629 | #CredentialType:
630 | #CredentialName:
631 | #property: 'value'
632 | #property2: 'value2'
633 |
634 | ExternalCredentialsProviders:
635 | # Optionally define external credentials providers that are invoked on startup to load credentials for use by the Integration Server.
636 | # Copy and customize the ExternalProviderTemplate section for each external credential provider that you want to invoke.
637 | # Rename the section title 'ExternalProviderTemplate' to be the name of your external credential provider. This name must be unique.
638 | # Configure the loadAllCredentialsCommand to be the command, including arguments, that should be run to retrieve and return any credentials to be loaded.
639 | # The command can include environment variables and keywords that are replaced before execution to provide context specific configuration such as , [iib.system-work-dir], [iib.system-server-label]. Environment variables are replaced before keywords.
640 | # The command that is executed must output a stream of bytes to stdout that represent the credentials to be loaded in the configured format, either xml, json, or yaml, and in the configured codepage.
641 | # The command must return 0 when executing successfully, any other return value results in an error being reported and the returned output not being processed.
642 | # The properties loadAllCredentialsDirectErrorToCommandOutput and loadAllCredentialsIncludeCommandOutputInLogs are present for debugging and loadAllCredentialsIncludeCommandOutputInLogs should not be permanently set in production as it could result in credentials being leaked into logs.
643 | # When set to true the property loadAllCredentialsDirectErrorToCommandOutput results in " 2>&1" being appended to the command before execution so that stderr is redirected to the stdout stream. When set to false stderr ends up in the integration server stderr stream.
644 | #ExternalProviderTemplate:
645 | #loadAllCredentialsCommand: ''
646 | #loadAllCredentialsFormat: 'json' # Format of the output returned by the command, Valid values are xml, json, or yaml. Default is json.
647 | #loadAllCredentialsCodepage: 1208 # Codepage of the output returned by the command. 0 means use the local codepage. Default is 1208.
648 | #loadAllCredentialsDirectErrorToCommandOutput: true # Specify whether the stderr output of the command should be redirected to the stdout stream and read by the integration server. Default is true.
649 | #loadAllCredentialsIncludeCommandOutputInLogs: false # Specify whether the output from the command should appear in log and user trace messages. Default is false.
650 | #loadAllCredentialsStopServerOnError: true # Specify whether the server should stop if the command returns a non-zero exit code. Default is true.
651 |
652 | StartupScripts:
653 | # Optionally run scripts during server startup. These scripts can optionally return YANL to set environment and user variables in the server itself.
654 | # The optional return YAML format should start with the standard "---" as the first line (which is detected by the server with the default "auto"
655 | # setting below); the content should be the "UserVariables" or "EnvironmentVariables" stanzas from this file (see below for examples), or else the
656 | # Kubernetes standard "env" format:
657 | #
658 | # ---
659 | # env:
660 | # - name: 'test1'
661 | # value: 'test1value'
662 | # UserVariables:
663 | # user-var-one: 'uservalueone'
664 | # EnvironmentVariables:
665 | # env-var-one: 'envvalueone'
666 | #
667 | #FirstScript:
668 | #command: '/path/to/startup-script.sh'
669 | #readVariablesFromOutput: 'auto' # Controls whether the server reads environment and user variables from the script output; Possible values are 'true', 'false', and 'auto' (default).
670 | #directErrorToCommandOutput: false # Specify whether the stderr output of the command should be redirected to the stdout stream and read by the integration server. Default is false.
671 | #includeCommandOutputInLogs: true # Specify whether the output from the command should appear in log and user trace messages. Default is true.
672 | #stopServerOnError: true # Specify whether the server should stop if the script returns a non-zero exit code. Default is true.
673 |
674 | ELKConnections:
675 | # Description for ELK Connections.
676 | # elkConnection1:
677 | # elkProtocol: 'beats' # Logstash input protocol. Valid values are: 'beats', 'beatsTls', 'http', or 'https'.
678 | # hostname: 'myhost.domain.com' # Hostname for the elkProtocol endpoint.
679 | # port: 0 # Port for the elkProtocol endpoint.
680 | # uploadIntervalMilliSecs: 60000 # Interval between uploading cached data, set in milliseconds.
681 | # elkCredential: '' # Set an 'elk' credential alias name to enable basic authentication, if it is required by the Logstash input protocol.
682 | # keystoreFile: '/path/to/keystore.jks' # Set the path to the keystore to be used, if it is required by the Logstash input protocol.
683 | # keystorePass: 'P4s5w0rd' # Set the password, or 'keystore' credential alias to the password, of the keystore.
684 | # keyAlias: '' # Set the alias name of the private key, if mutual authentication is required by the Logstash input protocol.
685 | # keyPass: '' # Set the password, or 'keystorekey' credential alias to the password, for accessing the private mutual authentication key.
686 | # truststoreFile: '/path/tp/truststore.jks' # Set the path to the truststore to be used, if it is required by the Logstash input protocol.
687 | # truststorePass: 'P4s5w0rd' # Set the password, or 'truststore' credential alias to the password, for accessing the truststore.
688 |
689 | EnvironmentVariables:
690 | #ENV_VAR_ONE: 'env_var_one_value'
691 |
692 | UserVariables:
693 | #Thing1: 'value'
694 |
--------------------------------------------------------------------------------