├── labs
├── test
├── ex288
│ └── container-build
│ │ └── Dockerfile
├── multicontainer-design
│ ├── deploy
│ │ └── nodejs
│ │ │ ├── networked
│ │ │ ├── work
│ │ │ │ ├── data
│ │ │ │ │ ├── mysql_upgrade_info
│ │ │ │ │ ├── public_key.pem
│ │ │ │ │ ├── ca.pem
│ │ │ │ │ ├── client-cert.pem
│ │ │ │ │ └── server-cert.pem
│ │ │ │ └── init
│ │ │ │ │ └── db.sql
│ │ │ ├── README.md
│ │ │ ├── db.sql
│ │ │ ├── stop.sh
│ │ │ └── run.sh
│ │ │ ├── run.sh
│ │ │ ├── nodejs-source
│ │ │ ├── compile.sh
│ │ │ ├── run.sh
│ │ │ ├── package.json
│ │ │ ├── todo
│ │ │ │ ├── lib
│ │ │ │ │ └── dependencies
│ │ │ │ │ │ ├── fonts
│ │ │ │ │ │ ├── glyphicons-halflings-regular.eot
│ │ │ │ │ │ ├── glyphicons-halflings-regular.ttf
│ │ │ │ │ │ └── glyphicons-halflings-regular.woff
│ │ │ │ │ │ ├── angular-resource.min.js
│ │ │ │ │ │ └── css
│ │ │ │ │ │ └── ng-grid.min.css
│ │ │ │ ├── hello.html
│ │ │ │ ├── css
│ │ │ │ │ └── style.css
│ │ │ │ ├── index.html
│ │ │ │ └── script
│ │ │ │ │ └── item.js
│ │ │ ├── models
│ │ │ │ ├── db.js
│ │ │ │ └── items.js
│ │ │ ├── README.md
│ │ │ ├── app.js
│ │ │ └── controllers
│ │ │ │ └── items.js
│ │ │ ├── build
│ │ │ ├── package.json
│ │ │ ├── todo
│ │ │ │ ├── lib
│ │ │ │ │ └── dependencies
│ │ │ │ │ │ ├── fonts
│ │ │ │ │ │ ├── glyphicons-halflings-regular.eot
│ │ │ │ │ │ ├── glyphicons-halflings-regular.ttf
│ │ │ │ │ │ └── glyphicons-halflings-regular.woff
│ │ │ │ │ │ ├── angular-resource.min.js
│ │ │ │ │ │ └── css
│ │ │ │ │ │ └── ng-grid.min.css
│ │ │ │ ├── hello.html
│ │ │ │ ├── css
│ │ │ │ │ └── style.css
│ │ │ │ ├── index.html
│ │ │ │ └── script
│ │ │ │ │ └── item.js
│ │ │ ├── models
│ │ │ │ ├── db.js
│ │ │ │ └── items.js
│ │ │ ├── README.md
│ │ │ ├── app.js
│ │ │ └── controllers
│ │ │ │ └── items.js
│ │ │ ├── Dockerfile
│ │ │ └── build.sh
│ └── images
│ │ ├── nodejs
│ │ ├── README.md
│ │ ├── enable-rh-nodejs8.sh
│ │ └── Dockerfile
│ │ ├── cleanup.sh
│ │ ├── mysql
│ │ ├── README.md
│ │ ├── Dockerfile
│ │ ├── root
│ │ │ └── usr
│ │ │ │ └── share
│ │ │ │ └── container-scripts
│ │ │ │ └── mysql
│ │ │ │ ├── post-init.sh
│ │ │ │ └── init
│ │ │ │ └── post-init.sh
│ │ └── training.repo
│ │ └── README.md
├── multicontainer-openshift
│ ├── images
│ │ ├── nodejs
│ │ │ ├── test
│ │ │ │ ├── run.sh
│ │ │ │ ├── Dockerfile
│ │ │ │ └── build
│ │ │ │ │ ├── app.js
│ │ │ │ │ └── package.json
│ │ │ ├── README.md
│ │ │ ├── build.sh
│ │ │ ├── enable-rh-nodejs8.sh
│ │ │ ├── test.sh
│ │ │ └── Dockerfile
│ │ ├── mysql
│ │ │ ├── test
│ │ │ │ ├── query.sql
│ │ │ │ └── testdata.sql
│ │ │ ├── build.sh
│ │ │ ├── delete.sh
│ │ │ ├── README.md
│ │ │ ├── root
│ │ │ │ └── usr
│ │ │ │ │ └── share
│ │ │ │ │ └── container-scripts
│ │ │ │ │ └── mysql
│ │ │ │ │ ├── post-init
│ │ │ │ │ └── db.sql
│ │ │ │ │ └── init
│ │ │ │ │ └── post-init.sh
│ │ │ └── Dockerfile
│ │ └── README.md
│ └── db.sql
├── comprehensive-review
│ ├── image
│ │ ├── install_java.snippet
│ │ ├── create_nexus_account.snippet
│ │ ├── get-nexus-bundle.sh
│ │ ├── nexus-start.sh
│ │ ├── training.repo
│ │ └── Dockerfile
│ └── deploy
│ │ ├── local
│ │ └── run-persistent.sh
│ │ └── openshift
│ │ └── resources
│ │ └── nexus-template.json
├── dockerfile-review
│ ├── src
│ │ └── index.html
│ ├── Dockerfile
│ ├── Dockerfile.old
│ └── training.repo
└── dockerfile-create
│ ├── training.repo
│ └── Dockerfile
├── images
├── test.txt
├── S2i.jpg
├── routes.png
├── routes2.jpeg
├── port-forward.png
├── route-service.png
├── secured-route.png
├── passthrough-route.png
├── podman-subcommand.png
├── container-life-cycle.png
├── openshift-features.png
├── openshift-operator.jpeg
├── openshift-architecture.png
├── openshift-control-plane.png
└── identity-provider-htpasswd.jpg
├── 02.minishift.md
├── memo.txt
├── README.md
├── tools
├── sidecar-busybox-LogAgent
│ ├── Dockerfile
│ ├── log-counter-busybox.yaml
│ ├── sidecar-busybox-logagent.yaml
│ └── readme.md
└── sidecar-fluentd-LogAgent
│ ├── fluentd-confmap.yaml
│ ├── sample2-fluentd-confmap.yaml
│ ├── sample2-pod-with-sidecar-fluentd.yaml
│ ├── sample2-preparation.md
│ ├── sidecar-fluentd-deployment.yaml
│ └── app-with-sidecarfluentd-deployment-configmap.yaml
├── git-template.md
├── 03.docker-registry.md
├── 04.elasticsearch.md
├── 05.fluentd.md
├── 02.openshif-application.md
├── 01.openshift-cheatsheet.md
└── 01.openshift-administration.md
/labs/test:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/images/test.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/labs/ex288/container-build/Dockerfile:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/02.minishift.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/02.minishift.md
--------------------------------------------------------------------------------
/images/S2i.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/S2i.jpg
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/networked/work/data/mysql_upgrade_info:
--------------------------------------------------------------------------------
1 | 5.7.24
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | node app.js
4 |
5 |
--------------------------------------------------------------------------------
/images/routes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/routes.png
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/nodejs/test/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | npm start
4 |
--------------------------------------------------------------------------------
/images/routes2.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/routes2.jpeg
--------------------------------------------------------------------------------
/labs/multicontainer-design/images/nodejs/README.md:
--------------------------------------------------------------------------------
1 | # DO180 JavaScript/Node.js Docker Image
2 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/mysql/test/query.sql:
--------------------------------------------------------------------------------
1 | select name, email from contacts;
2 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/nodejs/README.md:
--------------------------------------------------------------------------------
1 | # DO180 JavaScript/Node.js Docker Image
2 |
--------------------------------------------------------------------------------
/images/port-forward.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/port-forward.png
--------------------------------------------------------------------------------
/images/route-service.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/route-service.png
--------------------------------------------------------------------------------
/images/secured-route.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/secured-route.png
--------------------------------------------------------------------------------
/memo.txt:
--------------------------------------------------------------------------------
1 | OCPv.3.x VS OCPv.4.x
2 | Some additional commands/features:
3 | - oc debug node/my-node-name
4 |
--------------------------------------------------------------------------------
/images/passthrough-route.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/passthrough-route.png
--------------------------------------------------------------------------------
/images/podman-subcommand.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/podman-subcommand.png
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/nodejs/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | sudo podman build -t do180/nodejs .
4 |
--------------------------------------------------------------------------------
/images/container-life-cycle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/container-life-cycle.png
--------------------------------------------------------------------------------
/images/openshift-features.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/openshift-features.png
--------------------------------------------------------------------------------
/images/openshift-operator.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/openshift-operator.jpeg
--------------------------------------------------------------------------------
/images/openshift-architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/openshift-architecture.png
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/mysql/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sudo podman build -t do180/mysql-57-rhel7 .
4 |
--------------------------------------------------------------------------------
/images/openshift-control-plane.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/openshift-control-plane.png
--------------------------------------------------------------------------------
/images/identity-provider-htpasswd.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/images/identity-provider-htpasswd.jpg
--------------------------------------------------------------------------------
/labs/comprehensive-review/image/install_java.snippet:
--------------------------------------------------------------------------------
1 | yum install -y --setopt=tsflags=nodocs java-1.8.0-openjdk-devel
2 | yum clean all -y
3 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/compile.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | source /opt/rh/nodejs010/enable
4 |
5 | npm install
6 |
7 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | source /opt/rh/nodejs010/enable
4 |
5 | node app.js
6 |
7 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/mysql/delete.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker stop test-mysql
4 | docker rm test-mysql
5 | sleep 9
6 | sudo rm -rf work
7 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/images/nodejs/enable-rh-nodejs8.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source /opt/rh/rh-nodejs8/enable
3 | export X_SCLS="`scl enable rh-nodejs8 'echo $X_SCLS'`"
4 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/images/cleanup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Stops and deletes all containers
4 |
5 | docker stop $(docker ps -qa) ; docker rm $(docker ps -qa)
6 |
7 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/nodejs/enable-rh-nodejs8.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source /opt/rh/rh-nodejs8/enable
3 | export X_SCLS="`scl enable rh-nodejs8 'echo $X_SCLS'`"
4 |
--------------------------------------------------------------------------------
/labs/dockerfile-review/src/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Hello World! The dockerfile-review lab works!
5 |
6 |
7 |
--------------------------------------------------------------------------------
/labs/dockerfile-create/training.repo:
--------------------------------------------------------------------------------
1 | [rhel_dvd]
2 | baseurl = http://content.example.com/rhel7.6/x86_64/dvd
3 | enabled = true
4 | gpgcheck = false
5 | name = Remote classroom copy of dvd
6 |
--------------------------------------------------------------------------------
/labs/comprehensive-review/image/create_nexus_account.snippet:
--------------------------------------------------------------------------------
1 | groupadd -r nexus -f -g 1001
2 | useradd -u 1001 -r -g nexus -m -d ${NEXUS_HOME} \
3 | -s /sbin/nologin \
4 | -c "Nexus User" nexus
5 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/nodejs/test/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM do180/nodejs
2 | MAINTAINER Red Hat Training
3 |
4 | EXPOSE 3000
5 |
6 | CMD ["scl","enable","rh-nodejs4","./run.sh"]
7 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Repository for Red Hat OpenShift Container Platform (OCP 3.x and 4.x) cheatsheet
2 | Lets go to the [cheatsheet.md](https://github.com/fahmifahim/openshift/blob/master/01.openshift-cheatsheet.md) file
3 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/networked/README.md:
--------------------------------------------------------------------------------
1 | # Node.js To Do List
2 |
3 | To test the application, access http://localhost:30080/todo from your developer workstation (the box host, NOT the workstation VM)
4 |
5 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/mysql/test/testdata.sql:
--------------------------------------------------------------------------------
1 | create table contacts (
2 | name varchar(255),
3 | email varchar(100)
4 | );
5 |
6 | insert into contacts values (
7 | 'John Doe', 'jdoe@nowhere.net'
8 | );
9 |
10 |
--------------------------------------------------------------------------------
/labs/comprehensive-review/image/get-nexus-bundle.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | if curl -L --progress-bar -O https://download.sonatype.com/nexus/oss/nexus-2.14.3-02-bundle.tar.gz
3 | then
4 | echo "Nexus bundle download successful"
5 | else
6 | echo "Download failed"
7 | fi
8 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "todo",
3 | "version": "0.0.1",
4 | "private": true,
5 | "dependencies": {
6 | "restify": "8.4.0",
7 | "sequelize": "5.21.1",
8 | "mysql2": "2.0.0"
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build/todo/lib/dependencies/fonts/glyphicons-halflings-regular.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/labs/multicontainer-design/deploy/nodejs/build/todo/lib/dependencies/fonts/glyphicons-halflings-regular.eot
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build/todo/lib/dependencies/fonts/glyphicons-halflings-regular.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/labs/multicontainer-design/deploy/nodejs/build/todo/lib/dependencies/fonts/glyphicons-halflings-regular.ttf
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build/todo/lib/dependencies/fonts/glyphicons-halflings-regular.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/labs/multicontainer-design/deploy/nodejs/build/todo/lib/dependencies/fonts/glyphicons-halflings-regular.woff
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "todo",
3 | "version": "0.0.1",
4 | "private": true,
5 | "dependencies": {
6 | "restify": "8.4.0",
7 | "sequelize": "5.21.1",
8 | "mysql2": "2.0.0"
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/todo/lib/dependencies/fonts/glyphicons-halflings-regular.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/labs/multicontainer-design/deploy/nodejs/nodejs-source/todo/lib/dependencies/fonts/glyphicons-halflings-regular.eot
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/todo/lib/dependencies/fonts/glyphicons-halflings-regular.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/labs/multicontainer-design/deploy/nodejs/nodejs-source/todo/lib/dependencies/fonts/glyphicons-halflings-regular.ttf
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/todo/lib/dependencies/fonts/glyphicons-halflings-regular.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fahmifahim/openshift/HEAD/labs/multicontainer-design/deploy/nodejs/nodejs-source/todo/lib/dependencies/fonts/glyphicons-halflings-regular.woff
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/networked/db.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE `Item` (`id` BIGINT not null auto_increment primary key, `description` VARCHAR(100), `done` BIT);
2 | INSERT INTO `Item` (`id`,`description`,`done`) VALUES (1,'Pick up newspaper', 0);
3 | INSERT INTO `Item` (`id`,`description`,`done`) VALUES (2,'Buy groceries', 1);
4 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build/models/db.js:
--------------------------------------------------------------------------------
1 |
2 | module.exports.params = {
3 | dbname: process.env.MYSQL_DATABASE,
4 | username: process.env.MYSQL_USER,
5 | password: process.env.MYSQL_PASSWORD,
6 | params: {
7 | host: '10.88.100.101',
8 | port: '3306',
9 | dialect: 'mysql'
10 | }
11 | };
12 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/networked/work/init/db.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE `Item` (`id` BIGINT not null auto_increment primary key, `description` VARCHAR(100), `done` BIT);
2 | INSERT INTO `Item` (`id`,`description`,`done`) VALUES (1,'Pick up newspaper', 0);
3 | INSERT INTO `Item` (`id`,`description`,`done`) VALUES (2,'Buy groceries', 1);
4 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/db.sql:
--------------------------------------------------------------------------------
1 | DROP TABLE IF EXISTS `Item`;
2 | CREATE TABLE `Item` (`id` BIGINT not null auto_increment primary key, `description` VARCHAR(100), `done` BIT);
3 | INSERT INTO `Item` (`id`,`description`,`done`) VALUES (1,'Pick up newspaper', 0);
4 | INSERT INTO `Item` (`id`,`description`,`done`) VALUES (2,'Buy groceries', 1);
5 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM test/nodejs
2 | ARG NEXUS_BASE_URL
3 | MAINTAINER username
4 |
5 | COPY run.sh build ${HOME}/
6 | RUN scl enable rh-nodejs8 'npm install --registry=http://$NEXUS_BASE_URL/repository/nodejs/'
7 | EXPOSE 30080
8 |
9 | CMD ["scl","enable","rh-nodejs8","./run.sh"]
10 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build/todo/hello.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | HTML5 Todo App
4 |
5 |
6 | Test HTML page
7 |
8 |
9 | This is just a test page to check Apache Httpd is working before involving Angular and the REST services.
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/models/db.js:
--------------------------------------------------------------------------------
1 |
2 | module.exports.params = {
3 | dbname: process.env.MYSQL_DATABASE,
4 | username: process.env.MYSQL_USER,
5 | password: process.env.MYSQL_PASSWORD,
6 | params: {
7 | host: '10.88.100.101',
8 | port: '3306',
9 | dialect: 'mysql'
10 | }
11 | };
12 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/todo/hello.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | HTML5 Todo App
4 |
5 |
6 | Test HTML page
7 |
8 |
9 | This is just a test page to check Apache Httpd is working before involving Angular and the REST services.
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/labs/dockerfile-create/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubi7/ubi:7.7
2 | MAINTAINER Tim Robert
3 | LABEL description="This is a custom image for Ubi"
4 | RUN yum install -y httpd && \
5 | yum clean all
6 | RUN echo "Hello! This is created from customized Dockerfile" > /usr/share/httpd/noindex/index.html
7 | EXPOSE 80
8 | ENTRYPOINT ["httpd", "-D", "FOREGROUND"]
9 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/nodejs/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -x
2 |
3 | cd test
4 | docker build -t do180/test-nodejs .
5 | docker run -d --name test-nodejs -p 30080:3000 do180/test-nodejs
6 | sleep 3
7 | # Expected result is "Hello there" no HTML formatting
8 | curl http://127.0.0.1:30080/hi
9 | echo
10 | docker stop test-nodejs
11 | docker rm test-nodejs
12 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/networked/stop.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping and removing containers"
4 | sudo podman stop todo
5 | sudo podman stop mysql
6 | sudo podman rm todo
7 | sudo podman rm mysql
8 |
9 | # if there was a problem with run.sh delete data dir so the database cab be re-initialized:
10 | echo "Removing work directory"
11 | sudo rm -rf work
12 |
--------------------------------------------------------------------------------
/labs/comprehensive-review/deploy/local/run-persistent.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | if [ ! -d /tmp/docker/work ]; then
3 | mkdir -p /tmp/docker/work
4 | sudo semanage fcontext -a -t container_file_t '/tmp/docker/work(/.*)?'
5 | sudo restorecon -R /tmp/docker/work
6 | sudo chown 1001:1001 /tmp/docker/work
7 | fi
8 |
9 | sudo podman run -d -v /tmp/docker/work:/opt/nexus/sonatype-work nexus
10 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/images/mysql/README.md:
--------------------------------------------------------------------------------
1 | # DO180 MySQL 5.6 Docker Image
2 |
3 | This image customizes the supported Red Hat SCL MySQL image.
4 |
5 | The customization is mechanism that load every script file that is on the /var/lib/mysql/init folder to the database.
6 |
7 | To build this image, use the following command:
8 |
9 | sudo podman build -t do180/mysql-56-rhel7 .
10 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/mysql/README.md:
--------------------------------------------------------------------------------
1 | # DO180 MySQL 5.6 Docker Image
2 |
3 | This image customizes the supported Red Hat SCL MySQL image.
4 |
5 | The customization is mechanism that load every script file that is on the /var/lib/mysql/init folder to the database.
6 |
7 | To build this image, use the following command:
8 |
9 | sudo podman build -t do180/mysql-56-rhel7 .
10 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/mysql/root/usr/share/container-scripts/mysql/post-init/db.sql:
--------------------------------------------------------------------------------
1 | DROP TABLE IF EXISTS `Item`;
2 | CREATE TABLE `Item` (`id` BIGINT not null auto_increment primary key, `description` VARCHAR(100), `done` BIT);
3 | INSERT INTO `Item` (`id`,`description`,`done`) VALUES (1,'Pick up newspaper', 0);
4 | INSERT INTO `Item` (`id`,`description`,`done`) VALUES (2,'Buy groceries', 1);
5 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/nodejs/test/build/app.js:
--------------------------------------------------------------------------------
1 | var express = require('express');
2 | var app = express();
3 |
4 | app.get('/hi', function (req, res) {
5 | res.send('Hello there');
6 | });
7 |
8 | var server = app.listen(3000, function () {
9 | var host = server.address().address;
10 | var port = server.address().port;
11 |
12 | console.log('Example app listening at http://%s:%s', host, port);
13 | });
14 |
15 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/nodejs/test/build/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "test",
3 | "version": "1.0.0",
4 | "description": "Hello World application",
5 | "main": "app.js",
6 | "scripts": {
7 | "test": "echo \"Error: no test specified\" && exit 1",
8 | "start": "node app.js"
9 | },
10 | "author": "Red Hat Training",
11 | "license": "ISC",
12 | "dependencies": {
13 | "express": "^4.13.3"
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/tools/sidecar-busybox-LogAgent/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM busybox:latest
2 | #I am using busybox v.1.32.1
3 |
4 | COPY ./rsync /bin/rsync
5 | #Don't forget to prepare your rsync binary file
6 | CMD ["/bin/rsync"]
7 |
8 | #Notes:
9 | #You need to prepare for all related library to fully execute the rsync command
10 | #Check the related library with: ldd $(which rsync) on your Linux machine
11 | #Then, put the related library to your busybox image
12 |
--------------------------------------------------------------------------------
/tools/sidecar-fluentd-LogAgent/fluentd-confmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: fluentd-confmap
5 | namespace: dev-project
6 | data:
7 | fluent.conf: |
8 |
9 | @type tail
10 | path /logs/defaultServer/verbosegc*.log
11 | pos_file /var/log/1.log.pos
12 | tag wlp.verbose1
13 |
14 |
15 |
16 | @type stdout
17 |
18 |
--------------------------------------------------------------------------------
/labs/dockerfile-review/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubi7/ubi:7.7
2 | MAINTAINER TestUser
3 | ENV PORT 8080
4 | RUN yum install -y httpd && \
5 | yum clean all
6 | RUN sed -ri -e "/^Listen 80/c\Listen ${PORT}" /etc/httpd/conf/httpd.conf && \
7 | chown -R apache:apache /etc/httpd/logs/ && \
8 | chown -R apache.apache /run/httpd/
9 | USER apache
10 | EXPOSE ${PORT}
11 | COPY ./src/ /var/www/html/
12 | ENTRYPOINT ["httpd", "-D", "FOREGROUND"]
13 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo "Preparing build folder"
3 | rm -fr build
4 | mkdir -p build
5 | cp -ap nodejs-source/* build
6 | rm build/*.sh
7 |
8 | # image build complains if he cannot read the database folder even if not needed for building the image
9 | sudo rm -rf {linked,kubernetes}/work
10 |
11 | source /usr/local/etc/ocp4.config
12 | sudo podman build --layers=false -t test/todonodejs --build-arg NEXUS_BASE_URL=${RHT_OCP4_NEXUS_SERVER} .
13 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/README.md:
--------------------------------------------------------------------------------
1 | # DO180 Base images for the frameworks
2 |
3 | The folders contain the Dockerfiles to create the base images for all
4 | the language/frameworks used in the solution.
5 |
6 | Each folder contains a `build.sh` script to build the container image and a `test.sh` script to test the image, sometimes by creating a derived image.
7 |
8 | Child images (application images) should copy their sources to a build folder at the same level as the child Dockerfile.
9 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/images/README.md:
--------------------------------------------------------------------------------
1 | # DO180 Base images for the frameworks
2 |
3 | The folders contain the Dockerfiles to create the base images for all
4 | the language/frameworks used in the solution.
5 |
6 | Each folder may contain a `build.sh` script to build the container image. Otherwise, instructions for building the image are present in the student guide.
7 |
8 | Child images (application images) should copy their sources to a build folder at the same level as the child Dockerfile.
9 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/images/mysql/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM rhscl/mysql-57-rhel7
2 |
3 | # MySQL image for DO180
4 | #
5 | # Volumes:
6 | # * /var/lib/mysql/data - Datastore for MySQL
7 | # /var/lib/mysql/init - Folder to load *.sql scripts
8 | # Environment:
9 | # * $MYSQL_USER - Database user name
10 | # * $MYSQL_PASSWORD - User's password
11 | # * $MYSQL_DATABASE - Name of the database to create
12 | # * $MYSQL_ROOT_PASSWORD (Optional) - Password for the 'root' MySQL account
13 |
14 | ADD root /
15 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/mysql/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM rhscl/mysql-57-rhel7
2 |
3 | # MySQL image for DO180
4 | #
5 | # Volumes:
6 | # * /var/lib/mysql/data - Datastore for MySQL
7 | # /var/lib/mysql/init - Folder to load *.sql scripts
8 | # Environment:
9 | # * $MYSQL_USER - Database user name
10 | # * $MYSQL_PASSWORD - User's password
11 | # * $MYSQL_DATABASE - Name of the database to create
12 | # * $MYSQL_ROOT_PASSWORD (Optional) - Password for the 'root' MySQL account
13 |
14 | ADD root /
15 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/networked/work/data/public_key.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN PUBLIC KEY-----
2 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtT0AYANx8RxEhhZbZoeM
3 | 0PRZlE1cZji5fGkKeeWmWUwCAFyiS2+N4CBZFFhEPX5pT9wn1KeoORFv4LwCxqfK
4 | anRDTneYz0xSn7+p/YYW++2iLdVbYKpwoRdwUGfst4CJCzlHta1PJo3zp1DIhqje
5 | m8BKjJD4ys1927D0sAJ5DFDc0T3h4FfYa8oe1BrdxbzJDXt09ILQcJiYsn33sYXO
6 | /Nu7bpPlcA15CmhunoJjfgXV7IVmlQVvKh9io7ZcMXjwAtGFTamKGePOwofPy3J+
7 | GzuMIUJ+21F+zpQIU8sc0RxNFqHgj/ilHYj0JDKS6U9JTEP3EOH/x5ln2F3h41kO
8 | EQIDAQAB
9 | -----END PUBLIC KEY-----
10 |
--------------------------------------------------------------------------------
/git-template.md:
--------------------------------------------------------------------------------
1 | -  `#f03c15`
2 | -  `#c5f015`
3 | -  `#1589F0`
4 |
5 |
6 | ```diff
7 | - text in red
8 | + text in green
9 | ! text in orange
10 | # text in gray
11 | ```
12 |
13 | ```json
14 | // code for coloring
15 | ```
16 | ```html
17 | // code for coloring
18 | ```
19 | ```js
20 | // code for coloring
21 | ```
22 | ```css
23 | // code for coloring
24 | ```
25 | // etc.
26 |
--------------------------------------------------------------------------------
/labs/comprehensive-review/image/nexus-start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | CONTEXT_PATH="/nexus"
3 | MAX_HEAP="768m"
4 | MIN_HEAP="256m"
5 | JAVA_OPTS="-server -Djava.net.preferIPv4Stack=true"
6 | LAUNCHER_CONF="./conf/jetty.xml ./conf/jetty-requestlog.xml"
7 | SONATYPE_WORK="${NEXUS_HOME}/sonatype-work"
8 | cd ${NEXUS_HOME}/nexus2
9 | exec java \
10 | -Dnexus-work=${SONATYPE_WORK} \
11 | -Dnexus-webapp-context-path=${CONTEXT_PATH} \
12 | -Xms${MIN_HEAP} -Xmx${MAX_HEAP} \
13 | -cp 'conf/:lib/*' \
14 | ${JAVA_OPTS} \
15 | org.sonatype.nexus.bootstrap.Launcher ${LAUNCHER_CONF}
16 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/images/mysql/root/usr/share/container-scripts/mysql/post-init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | mysql_flags="-u root --socket=/tmp/mysql.sock"
4 | admin_flags="--defaults-file=$MYSQL_DEFAULTS_FILE $mysql_flags"
5 | DIRECTORY=/var/lib/mysql/init
6 |
7 | if [ -d $DIRECTORY ]; then
8 | for F in `ls $DIRECTORY`; do
9 | if [ -n "${MYSQL_DATABASE-}" ]; then
10 | echo "Running init script: $DIRECTORY/$F"
11 | mysql $admin_flags $MYSQL_DATABASE < $DIRECTORY/$F
12 | fi
13 | done
14 | fi
15 |
16 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/images/mysql/root/usr/share/container-scripts/mysql/init/post-init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | mysql_flags="-u root --socket=/tmp/mysql.sock"
4 | admin_flags="--defaults-file=$MYSQL_DEFAULTS_FILE $mysql_flags"
5 | DIRECTORY=/var/lib/mysql/init
6 |
7 | if [ -d $DIRECTORY ]; then
8 | for F in `ls $DIRECTORY`; do
9 | if [ -n "${MYSQL_DATABASE-}" ]; then
10 | echo "Running init script: $DIRECTORY/$F"
11 | mysql $admin_flags $MYSQL_DATABASE < $DIRECTORY/$F
12 | fi
13 | done
14 | fi
15 |
16 |
--------------------------------------------------------------------------------
/labs/dockerfile-review/Dockerfile.old:
--------------------------------------------------------------------------------
1 | FROM
2 |
3 | MAINTAINER username
4 |
5 | ENV PORT
6 |
7 | RUN
8 |
9 | RUN sed -ri -e "/^Listen 80/c\Listen ${PORT}" /etc/httpd/conf/httpd.conf && \
10 | chown -R apache:apache /etc/httpd/logs/ && \
11 | chown -R apache:apache /run/httpd/
12 |
13 | USER apache
14 |
15 | # Expose the custom port that you provided in the ENV var
16 |
17 | # Copy all files under src/ folder to Apache DocumentRoot (/var/www/html)
18 |
19 | # Start Apache in the foreground
20 |
--------------------------------------------------------------------------------
/tools/sidecar-fluentd-LogAgent/sample2-fluentd-confmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: fluentd-config
5 | data:
6 | fluentd.conf: |
7 |
8 | type tail
9 | format none
10 | path /var/log/1.log
11 | pos_file /var/log/1.log.pos
12 | tag count.format1
13 |
14 |
15 |
16 | type tail
17 | format none
18 | path /var/log/2.log
19 | pos_file /var/log/2.log.pos
20 | tag count.format2
21 |
22 |
23 |
24 | type stdout
25 |
26 |
--------------------------------------------------------------------------------
/03.docker-registry.md:
--------------------------------------------------------------------------------
1 | # Docker Registry
2 | ### CURL private docker registry
3 | ```bash
4 | $ curl https://oauth:`oc whoami -t`@/v2/_catalog --cacert
5 | $ curl https://oauth:`oc whoami -t`@/v2/_catalog --insecure
6 | ```
7 |
8 | ### CURL public-online docker registry (redhat.com, docker.com, etc) via proxy
9 | ```bash
10 | $ curl -x http://:8080 -L https://www.openshift.com
11 | $ curl -x http://:8080 -L https://sso.redhat.com
12 | $ curl -x http://:8080 -L https://www.docker.com
13 | ```
14 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build/README.md:
--------------------------------------------------------------------------------
1 | # DO276 JavaScript/Node.js To Do List App
2 |
3 | Based on Restify 4.0 and Sequelize 3.14. Tested on Node.js 0.10 from SCL with Mariadb 5.5.
4 |
5 | Do `npm install` do download dependencies.
6 |
7 | Run as `node app.js`
8 |
9 | * Don't do pagination yet.
10 |
11 | * Database connection parameters hardcoded (as a novice developer would usually do).
12 |
13 | * There is a lot of boiler plate code in the controller and the model. There should be a way to have more centralized error handling.
14 |
15 | * Access app as http://localhost:30080/todo
16 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/README.md:
--------------------------------------------------------------------------------
1 | # DO276 JavaScript/Node.js To Do List App
2 |
3 | Based on Restify 4.0 and Sequelize 3.14. Tested on Node.js 0.10 from SCL with Mariadb 5.5.
4 |
5 | Do `npm install` do download dependencies.
6 |
7 | Run as `node app.js`
8 |
9 | * Don't do pagination yet.
10 |
11 | * Database connection parameters hardcoded (as a novice developer would usually do).
12 |
13 | * There is a lot of boiler plate code in the controller and the model. There should be a way to have more centralized error handling.
14 |
15 | * Access app as http://localhost:30080/todo
16 |
--------------------------------------------------------------------------------
/04.elasticsearch.md:
--------------------------------------------------------------------------------
1 | #### Elasticsearch cheatsheet
2 | - [Elasticsearch cheatsheet page](https://gist.github.com/ruanbekker/e8a09604b14f37e8d2f743a87b930f93)
3 |
4 | - [Elasticsearch references](https://github.com/ruanbekker/cheatsheets/tree/master/elasticsearch)
5 |
6 | #### Get Elasticsearch Shard information
7 | ```bash
8 | $ oc exec -it [Elasticsearch-pod-name] -n openshift-logging -c elasticsearch -- ¥
9 | curl 'https://localhost:9200/_cat/shards?v&s=index' ¥
10 | --key /etc/elasticsearch/secret/admin-key ¥
11 | --cert /etc/elasticsearch/secret/admin-cert ¥
12 | --cacert /etc/elasticsearch/secret/admin-ca
13 | ```
14 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/mysql/root/usr/share/container-scripts/mysql/init/post-init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | mysql_flags="-u root --socket=/tmp/mysql.sock"
4 | admin_flags="--defaults-file=$MYSQL_DEFAULTS_FILE $mysql_flags"
5 | #DIRECTORY=/var/lib/mysql/init
6 | DIRECTORY=/usr/share/container-scripts/mysql/post-init
7 | if [ -d $DIRECTORY ]; then
8 | for F in `ls $DIRECTORY`; do
9 | if [ -n "${MYSQL_DATABASE-}" ]; then
10 | if [ -f "$DIRECTORY/$F" ]; then
11 | echo "Running init script: $DIRECTORY/$F"
12 | mysql $admin_flags $MYSQL_DATABASE < $DIRECTORY/$F
13 | fi
14 | fi
15 | done
16 | fi
17 |
18 |
--------------------------------------------------------------------------------
/tools/sidecar-busybox-LogAgent/log-counter-busybox.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: counter-customized
5 | spec:
6 | containers:
7 | - name: count
8 | image: custom-busybox:v1.32.1 #Use this newly customed busybox
9 | args:
10 | - /bin/sh
11 | - -c
12 | - >
13 | i=0;
14 | while true;
15 | do
16 | echo "$i: $(date)" >> /var/log/1.log;
17 | echo "$(date) INFO $i" >> /var/log/2.log;
18 | i=$((i+1));
19 | /bin/rsync -avu /var/log/ /tmp >> /tmp/rsync.log;
20 | sleep 3;
21 | done
22 | volumeMounts:
23 | - name: varlog
24 | mountPath: /var/log
25 | volumes:
26 | - name: varlog
27 | emptyDir: {}
28 |
--------------------------------------------------------------------------------
/05.fluentd.md:
--------------------------------------------------------------------------------
1 | - Fluentd pod setting for output-remote-syslog :
2 | - /etc/fluent/configs.d/dynamic/output-remote-syslog.conf
3 | ```bash
4 |
5 | @type syslog_buffered
6 | remote_syslog syslogserver.openshift-logging.svc.cluster.local
7 | port 514
8 | hostname fluentd-4nzfz
9 | tag_key ident,systemd.u.SYSLOG_IDENTIFIER
10 | facility local0
11 | severity info
12 | use_record false
13 |
14 | ```
15 |
16 | - `/usr/share/gems/fluent-plugin-remote-rsyslog-1.1/lib/fluentd/plugin/out_syslog_bufferd.rb`
17 | - Fluentd log buffer: `/var/lib/fluentd/buffer-output-es-config-xxx`
18 | - This buffer log will be forwarded to remote elasticsearch or remote syslog server
19 |
--------------------------------------------------------------------------------
/labs/multicontainer-openshift/images/nodejs/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubi7/ubi:7.7
2 |
3 | MAINTAINER username
4 |
5 | ENV NODEJS_VERSION=8.0 \
6 | HOME=/opt/app-root/src
7 |
8 | RUN yum install -y --setopt=tsflags=nodocs rh-nodejs8 make && \
9 | yum clean all -y && \
10 | mkdir -p /opt/app-root && \
11 | groupadd -r appuser -f -g 1001 && \
12 | useradd -u 1001 -r -g appuser -m -d ${HOME} -s /sbin/nologin \
13 | -c "Application User" appuser && \
14 | chown -R appuser:appuser /opt/app-root && \
15 | chmod -R 755 /opt/app-root
16 |
17 | ADD ./enable-rh-nodejs8.sh /etc/profile.d/
18 |
19 | USER appuser
20 | WORKDIR ${HOME}
21 |
22 | CMD ["echo", "You must create your own container from this one."]
23 |
--------------------------------------------------------------------------------
/labs/dockerfile-review/training.repo:
--------------------------------------------------------------------------------
1 | [rhel_dvd]
2 | baseurl = http://content.example.com/rhel7.6/x86_64/dvd
3 | enabled = true
4 | gpgcheck = false
5 | name = Remote classroom copy of dvd
6 |
7 | [rhel-7-server-optional-rpms]
8 | baseurl = http://content.example.com/ocp4.2/x86_64/rhelopt
9 | enabled = true
10 | gpgcheck = false
11 | name = Remote classroom copy of RHEL 7.6 Optional Packages
12 |
13 | [rhel-7-server-extras-rpms]
14 | baseurl = http://content.example.com/ocp4.2/x86_64/rheladditional/rhel-7-server-extras-rpms
15 | enabled = true
16 | gpgcheck = false
17 | name = Remote classroom copy of RHEL 7.6 Extra Packages
18 |
19 | [rhel-server-rhscl-7-rpms]
20 | baseurl = http://content.example.com/ocp4.2/x86_64/rhelrhscl
21 | enabled = true
22 | gpgcheck = false
23 | name = Remote classroom copy of RHSCL
24 |
--------------------------------------------------------------------------------
/labs/comprehensive-review/image/training.repo:
--------------------------------------------------------------------------------
1 | [rhel_dvd]
2 | baseurl = http://content.example.com/rhel7.6/x86_64/dvd
3 | enabled = true
4 | gpgcheck = false
5 | name = Remote classroom copy of dvd
6 |
7 | [rhel-7-server-optional-rpms]
8 | baseurl = http://content.example.com/ocp4.2/x86_64/rhelopt
9 | enabled = true
10 | gpgcheck = false
11 | name = Remote classroom copy of RHEL 7.6 Optional Packages
12 |
13 | [rhel-7-server-extras-rpms]
14 | baseurl = http://content.example.com/ocp4.2/x86_64/rheladditional/rhel-7-server-extras-rpms
15 | enabled = true
16 | gpgcheck = false
17 | name = Remote classroom copy of RHEL 7.6 Extra Packages
18 |
19 | [rhel-server-rhscl-7-rpms]
20 | baseurl = http://content.example.com/ocp4.2/x86_64/rhelrhscl
21 | enabled = true
22 | gpgcheck = false
23 | name = Remote classroom copy of RHSCL
24 |
--------------------------------------------------------------------------------
/labs/comprehensive-review/image/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubi7/ubi:7.7
2 |
3 | MAINTAINER username
4 |
5 | ENV NEXUS_VERSION=2.14.3-02 \
6 | NEXUS_HOME=/opt/nexus
7 |
8 | RUN yum install -y --setopt=tsflags=nodocs java-1.8.0-openjdk-devel && \
9 | yum clean all -y
10 |
11 | RUN groupadd -r nexus -f -g 1001 && \
12 | useradd -u 1001 -r -g nexus -m -d ${NEXUS_HOME} -s /sbin/nologin \
13 | -c "Nexus User" nexus && \
14 | chown -R nexus:nexus ${NEXUS_HOME} && \
15 | chmod -R 755 ${NEXUS_HOME}
16 |
17 | USER nexus
18 |
19 | ADD nexus-${NEXUS_VERSION}-bundle.tar.gz ${NEXUS_HOME}
20 | ADD nexus-start.sh ${NEXUS_HOME}/
21 |
22 | RUN ln -s ${NEXUS_HOME}/nexus-${NEXUS_VERSION} \
23 | ${NEXUS_HOME}/nexus2
24 |
25 | WORKDIR ${NEXUS_HOME}
26 |
27 | VOLUME ["/opt/nexus/sonatype-work"]
28 |
29 | CMD ["sh", "nexus-start.sh"]
30 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/images/nodejs/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubi7/ubi:7.7
2 |
3 | MAINTAINER username
4 |
5 | ENV NODEJS_VERSION=8.0 \
6 | HOME=/opt/app-root/src
7 |
8 | # Setting tsflags=nodocs helps create a leaner container
9 | # image, as documentation is not needed in the container.
10 | RUN yum install -y --setopt=tsflags=nodocs rh-nodejs8 make && \
11 | yum clean all --noplugins -y && \
12 | mkdir -p /opt/app-root && \
13 | groupadd -r appuser -f -g 1001 && \
14 | useradd -u 1001 -r -g appuser -m -d ${HOME} -s /sbin/nologin \
15 | -c "Application User" appuser && \
16 | chown -R appuser:appuser /opt/app-root && \
17 | chmod -R 755 /opt/app-root
18 |
19 | ADD ./enable-rh-nodejs8.sh /etc/profile.d/
20 |
21 | USER appuser
22 | WORKDIR ${HOME}
23 |
24 | CMD ["echo", "You must create your own container from this one."]
25 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/networked/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | if [ -d "work" ]; then
4 | sudo rm -fr work
5 | fi
6 |
7 | echo "Create database volume..."
8 | mkdir -p work/init work/data
9 | cp db.sql work/init
10 | sudo chcon -Rt container_file_t work
11 | sudo chown -R 27:27 work
12 |
13 | # TODO Add podman run for mysql image here
14 | # Assign the container an IP from range defined the RFC 1918: 10.88.0.0/16
15 | sudo podman run -d --name mysql -e MYSQL_DATABASE=items -e MYSQL_USER=user1 \
16 | -e MYSQL_PASSWORD=mypa55 -e MYSQL_ROOT_PASSWORD=r00tpa55 \
17 | -v $PWD/work/data:/var/lib/mysql/data \
18 | -v $PWD/work/init:/var/lib/mysql/init -p 30306:3306 \
19 | --ip 10.88.100.101 test/mysql-57-rhel7
20 |
21 | sleep 9
22 |
23 | # TODO Add podman run for todonodejs image here
24 | sudo podman run -d --name todoapi -e MYSQL_DATABASE=items -e MYSQL_USER=user1 \
25 | -e MYSQL_PASSWORD=mypa55 -p 30080:30080 \
26 | test/todonodejs
27 |
--------------------------------------------------------------------------------
/tools/sidecar-fluentd-LogAgent/sample2-pod-with-sidecar-fluentd.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: counter
5 | spec:
6 | containers:
7 | - name: count
8 | image: busybox:latest
9 | args:
10 | - /bin/sh
11 | - -c
12 | - >
13 | i=0;
14 | while true;
15 | do
16 | echo "$i: $(date)" >> /var/log/1.log;
17 | echo "$(date) INFO $i" >> /var/log/2.log;
18 | i=$((i+1));
19 | sleep 1;
20 | done
21 | volumeMounts:
22 | - name: varlog
23 | mountPath: /var/log
24 | - name: count-agent
25 | image: fluent/fluentd:latest
26 | env:
27 | - name: FLUENTD_ARGS
28 | value: -c /etc/fluentd-config/fluentd.conf
29 | volumeMounts:
30 | - name: varlog
31 | mountPath: /var/log
32 | - name: config-volume
33 | mountPath: /etc/fluentd-config
34 | volumes:
35 | - name: varlog
36 | emptyDir: {}
37 | - name: config-volume
38 | configMap:
39 | name: fluentd-config
40 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/images/mysql/training.repo:
--------------------------------------------------------------------------------
1 | [rhel_dvd]
2 | baseurl = http://content.example.com/rhel7.6/x86_64/dvd
3 | enabled = true
4 | gpgcheck = false
5 | name = Remote classroom copy of dvd
6 |
7 | [rhel-7-server-optional-rpms]
8 | baseurl = http://content.example.com/ocp4.2/x86_64/rhelopt
9 | enabled = true
10 | gpgcheck = false
11 | name = Remote classroom copy of RHEL 7.6 Optional Packages
12 |
13 | [rhel-7-server-extras-rpms]
14 | baseurl = http://content.example.com/ocp4.2/x86_64/rheladditional/rhel-7-server-extras-rpms
15 | enabled = true
16 | gpgcheck = false
17 | name = Remote classroom copy of RHEL 7.6 Extra Packages
18 |
19 | #[rhel-7-server-ose-3.9-rpms]
20 | #baseurl = http://content.example.com/ocp4.2/x86_64/ocp
21 | #enabled = true
22 | #gpgcheck = false
23 | #name = Remote classroom copy of OpenShift 3.9 RPMS
24 |
25 | [rhel-server-rhscl-7-rpms]
26 | baseurl = http://content.example.com/ocp4.2/x86_64/rhelrhscl
27 | enabled = true
28 | gpgcheck = false
29 | name = Remote classroom copy of RHSCL
30 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/networked/work/data/ca.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADA8MTowOAYDVQQDDDFNeVNR
3 | TF9TZXJ2ZXJfNS43LjI0X0F1dG9fR2VuZXJhdGVkX0NBX0NlcnRpZmljYXRlMB4X
4 | DTIwMTAxMTAxMDUxOFoXDTMwMTAwOTAxMDUxOFowPDE6MDgGA1UEAwwxTXlTUUxf
5 | U2VydmVyXzUuNy4yNF9BdXRvX0dlbmVyYXRlZF9DQV9DZXJ0aWZpY2F0ZTCCASIw
6 | DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALHJZKA7P/SpowExFxJi/SM9DvLV
7 | Qy0VKYIC7quTHkYAZISll6AD42TbvqzTkLb1kq1Gwjqh9BFcu7E3XUjoGQSGpcfw
8 | e+pAOQJFD9VwJamdv/fNGpe0Pquy5JG8Cazpc+UkFkwrmNjmmYLscHTz/Qv1lacK
9 | 1Zklk3Nk3wLd9DQnYGkcGCmBE27PVVpPQBfRpJzM1p8FcOqcGGLZkuLdia5jpGvs
10 | kf9CXB3dVEfrSWRm1hiUAmNmKjjCyoUUjMDw2LQPBMZsgfeEd+ivZfn575PJ9qX0
11 | z+gERihbCzNzDxiS/4Uz5r7lyPwDTJalUdsvulUxsMUilqfb7Xv4qiXtVv0CAwEA
12 | AaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEABTzPl8AV
13 | vB76knGOPgRdBt5qpkX/8CrF/aPN9KRd10y/r0gSeAZop/B4Nc6Rj1xsh7RDfWuS
14 | E7oI2jvYyix0MbrT6XlkA4UAoihYlCJl3pBh9Fznnd4r2kGOZBuDsjmAIziN/OCO
15 | bjU7d0O3vb+NyPQqzngx2RgZ6SWn+2trh6CRgJhhh/qUE+Js5Xt0iWcY9NemoAlX
16 | YxkF9zQl6L+sqONMIW6mUjCFJK3EH0lAobIoOjolLnxiLzrPJQjpfphNc+XL6hqW
17 | +I2B38CW6n392i37JvXNXdDQYIia3SpuK96h/25JytKbSoD0tIJZ5p6NpYR6RSFR
18 | BKtOdiUTMNkxCA==
19 | -----END CERTIFICATE-----
20 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/networked/work/data/client-cert.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDBzCCAe+gAwIBAgIBAzANBgkqhkiG9w0BAQsFADA8MTowOAYDVQQDDDFNeVNR
3 | TF9TZXJ2ZXJfNS43LjI0X0F1dG9fR2VuZXJhdGVkX0NBX0NlcnRpZmljYXRlMB4X
4 | DTIwMTAxMTAxMDUxOFoXDTMwMTAwOTAxMDUxOFowQDE+MDwGA1UEAww1TXlTUUxf
5 | U2VydmVyXzUuNy4yNF9BdXRvX0dlbmVyYXRlZF9DbGllbnRfQ2VydGlmaWNhdGUw
6 | ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDCHZNhs6XvNsl3X6pMPwlj
7 | imoY1r/eDj8dynt1d2vaztSNgu4VMOT17MzsGU2HbSyVCdkGZFQ0NMWzfDN0VDBp
8 | IoIzTD379BMLT02e/ezVlrVyecf0QVOa6C7zVjjaUE/c+JkFwrATNgBjGu/4dLFz
9 | hP1MVZJQSPLZmVFAdkgq4G3XGKuY8voRMoRY6acOCXZafD19X+baosZnTuV+wraK
10 | dr0PoEmsEk17GXaBiTRZWDBJEjP+bVzyfe9jL5qJmF0+nXUR7ua6ZcMygU3tj1HV
11 | /xQAlyIoCOZwY8rZyR1xXMyDP+x3hxW/jOGQyoxRkVCEvbSN0APjFy3TdCXmBHbJ
12 | AgMBAAGjEDAOMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggEBAHH5zW3T
13 | yfsDblziaJT+7Sq9rCCCbeb3TQY91zucaISNUd5seByhzQf/rGiOOE5rkMkOmUtB
14 | WxBhIN5HtvYRdRcZL426MPbyvURRicXm8YHW78UVf0hYvX20SZr4Y2C6ZSDSrSQa
15 | lOjLkNuIEFTMwbHS9keEbJXgKAZy7TL6Pzsi9S+Nu9tVLACBPZsKRDkx4iBc1fKh
16 | FqTzDhI2J8wtZCODW15vMsEQb5yn3Lxt4B1GAez9hONDGjU/uY6YxCyFHXzbLQ4+
17 | vxTvAElWLXg4vIWmFcnEB7+EkRztIFOXLPsfCwVjZCpbo5D0DdaZuJIvMAP4Khk2
18 | 6kvjL/LFI1KTuT4=
19 | -----END CERTIFICATE-----
20 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/networked/work/data/server-cert.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDBzCCAe+gAwIBAgIBAjANBgkqhkiG9w0BAQsFADA8MTowOAYDVQQDDDFNeVNR
3 | TF9TZXJ2ZXJfNS43LjI0X0F1dG9fR2VuZXJhdGVkX0NBX0NlcnRpZmljYXRlMB4X
4 | DTIwMTAxMTAxMDUxOFoXDTMwMTAwOTAxMDUxOFowQDE+MDwGA1UEAww1TXlTUUxf
5 | U2VydmVyXzUuNy4yNF9BdXRvX0dlbmVyYXRlZF9TZXJ2ZXJfQ2VydGlmaWNhdGUw
6 | ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDFHorU2abpxRrrNcWRMmfE
7 | S7hgHLi3VX7W26yqY1X8b9Eu5BKZCeL82nnBWxk3mFXojTTt/lNBV+RekfHCNJkQ
8 | 8L7n/SzXZkA0A8mwORUShfg17OdIySgU4iTCS0q/tbsUeA/U0tCA4JMibXEy1T0u
9 | YWcOZbEVxOsRDbubDi2FSktOADmAe0XifZ9XygVMQJLC6/M2+tH4D7VoUbT7uWzR
10 | cPKJbv1WCizYHrahhkigTjXt5yEAR6iEkoKK1RrAF16l4uwnRiZ6FFnLUMI/u9xJ
11 | Hr+EVmTwq7cIan55E4o5BHoOt0PQfOEMNU1ryxneJReZ4fcO5aJQRiVfPFFxpV6f
12 | AgMBAAGjEDAOMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggEBAHwwkFgk
13 | pUHPvBzjBY1pC8tLqp8RShn4emlc6UKPdhkQyYE/xHSg0YLbsKOPwma7mRwStcPf
14 | pMRvvMDwuVrBrXSZRed8SZjdOz9oXBFM5UFk/IfmWj07dfSdfxMwu2FQs0hdzfEd
15 | bODfaqPtU8DwaR7q5HUcwWJCyThunqYlc4aDSloUfYn9ct1RBhnrtpQXZdEU44ZD
16 | eWMTxCJ44CAm99n1N03rsSV4tt0cyp0wnQimW+n9ew/+9yTQLZ9B6MQXQSpJMAd3
17 | vhHNX53Zdexk519b/d83PxuEnvTjOSbFeFuTM3r7YrEcrblEPLk1nrC8LVso9FZe
18 | yai/acv4bER5tyg=
19 | -----END CERTIFICATE-----
20 |
--------------------------------------------------------------------------------
/tools/sidecar-fluentd-LogAgent/sample2-preparation.md:
--------------------------------------------------------------------------------
1 | ```bash
2 | docker pull docker.io/fluent/fluentd:latest
3 |
4 | docker images
5 | REPOSITORY TAG IMAGE ID CREATED SIZE
6 | fluent/fluentd latest 9406ff63f205 2 years ago 38.3MB
7 |
8 | #Save the image to local
9 | docker save -o /home/user1/fluentd.tar fluent/fluentd:latest
10 |
11 | ls -l /home/user1/fluentd.tar
12 |
13 | #Copy the image to your remote server
14 | scp /home/user1/fluentd.tar root@remote-server-ip:/home/user1/
15 |
16 | #Load image from local file at your remote server
17 | docker load -i /home/user1/fluentd.tar
18 | df64d3292fd6: Loading layer [==================================================>] 4.672MB/4.672MB
19 | 91d1787a48d9: Loading layer [==================================================>] 38.35MB/38.35MB
20 | 4c1e27ed455f: Loading layer [==================================================>] 2.048kB/2.048kB
21 | 2bf9e4461d5c: Loading layer [==================================================>] 2.56kB/2.56kB
22 | 96049201c093: Loading layer [==================================================>] 3.584kB/3.584kB
23 | cc1df1d557f2: Loading layer [==================================================>] 3.072kB/3.072kB
24 | e129090bea45: Loading layer [==================================================>] 3.072kB/3.072kB
25 | Loaded image: fluent/fluentd:latest
26 |
27 | ```
28 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build/todo/css/style.css:
--------------------------------------------------------------------------------
1 | body {
2 | width: 90%;
3 | margin-left: auto;
4 | margin-right: auto;
5 | text-align: center;
6 | }
7 |
8 | h3 {
9 | margin: 0 0 20px;
10 | text-align: left;
11 | }
12 |
13 | .message {
14 | margin-left: 5%;
15 | margin-right: 5%;
16 | }
17 |
18 | .grid {
19 | width: 40%;
20 | float: left;
21 | margin-left: 5%;
22 | margin-right: 5%;
23 | }
24 |
25 | .grid .remove {
26 | width: 20px;
27 | height: 20px;
28 | margin-top: 8px;
29 | color: #a94442;
30 | }
31 |
32 | .gridStyle {
33 | border: 1px solid rgb(212, 212, 212);
34 | height: 335px;
35 | }
36 |
37 | .form {
38 | width: 40%;
39 | float: right;
40 | margin-left: 5%;
41 | margin-right: 5%;
42 | text-align: left;
43 | }
44 |
45 | .form label {
46 | width: 100px;
47 | }
48 |
49 | .form input {
50 | width: 300px;
51 | float: right;
52 | }
53 |
54 | .form .form-group span {
55 | float: right;
56 | width: 20px;
57 | height: 20px;
58 | margin-top: 5px;
59 | margin-left: 5px;
60 | color: #5cb85c;
61 | }
62 |
63 | .form .form-group p {
64 | margin-top: 15px;
65 | margin-left: 200px;
66 | color: #a94442;
67 | }
68 |
69 | .form .avatar {
70 | height: 250px;
71 | float: right;
72 | margin-right: 25px;
73 | }
74 |
75 | .form .buttons {
76 | clear: both;
77 | float: right;
78 | margin-top: 10px;
79 | margin-right: 25px;
80 | }
81 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/todo/css/style.css:
--------------------------------------------------------------------------------
1 | body {
2 | width: 90%;
3 | margin-left: auto;
4 | margin-right: auto;
5 | text-align: center;
6 | }
7 |
8 | h3 {
9 | margin: 0 0 20px;
10 | text-align: left;
11 | }
12 |
13 | .message {
14 | margin-left: 5%;
15 | margin-right: 5%;
16 | }
17 |
18 | .grid {
19 | width: 40%;
20 | float: left;
21 | margin-left: 5%;
22 | margin-right: 5%;
23 | }
24 |
25 | .grid .remove {
26 | width: 20px;
27 | height: 20px;
28 | margin-top: 8px;
29 | color: #a94442;
30 | }
31 |
32 | .gridStyle {
33 | border: 1px solid rgb(212, 212, 212);
34 | height: 335px;
35 | }
36 |
37 | .form {
38 | width: 40%;
39 | float: right;
40 | margin-left: 5%;
41 | margin-right: 5%;
42 | text-align: left;
43 | }
44 |
45 | .form label {
46 | width: 100px;
47 | }
48 |
49 | .form input {
50 | width: 300px;
51 | float: right;
52 | }
53 |
54 | .form .form-group span {
55 | float: right;
56 | width: 20px;
57 | height: 20px;
58 | margin-top: 5px;
59 | margin-left: 5px;
60 | color: #5cb85c;
61 | }
62 |
63 | .form .form-group p {
64 | margin-top: 15px;
65 | margin-left: 200px;
66 | color: #a94442;
67 | }
68 |
69 | .form .avatar {
70 | height: 250px;
71 | float: right;
72 | margin-right: 25px;
73 | }
74 |
75 | .form .buttons {
76 | clear: both;
77 | float: right;
78 | margin-top: 10px;
79 | margin-right: 25px;
80 | }
81 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build/app.js:
--------------------------------------------------------------------------------
1 | var restify = require('restify');
2 |
3 | var controller = require('./controllers/items');
4 |
5 | var db = require('./models/db');
6 | var model = require('./models/items');
7 |
8 | model.connect(db.params, function(err) {
9 | if (err) throw err;
10 | });
11 |
12 | var server = restify.createServer()
13 | .use(restify.plugins.fullResponse())
14 | .use(restify.plugins.queryParser())
15 | .use(restify.plugins.bodyParser());
16 |
17 | controller.context(server, '/todo/api', model);
18 |
19 | server.get({
20 | path: '/todo/api/items/:id'
21 | }, controller.read);
22 |
23 |
24 | server.get({
25 | path: '/todo/api/items'
26 | }, controller.list);
27 |
28 | server.post({
29 | path: '/todo/api/items'
30 | }, controller.save);
31 |
32 | server.del({
33 | path: '/todo/api/items/:id'
34 | }, controller.destroy);
35 |
36 |
37 | server.get({
38 | path: '/todo/*',
39 | name: 'todoapi'
40 | }, restify.plugins.serveStatic({
41 | 'directory': __dirname,
42 | 'default': 'index.html'
43 | }));
44 |
45 | var port = process.env.PORT || 30080;
46 | server.listen(port, function (err) {
47 | if (err)
48 | console.error(err);
49 | else
50 | console.log('App is ready at : ' + port);
51 | });
52 |
53 | if (process.env.environment == 'production')
54 | process.on('uncaughtException', function (err) {
55 | console.error(JSON.parse(JSON.stringify(err, ['stack', 'message', 'inner'], 2)))
56 | });
57 |
58 |
59 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/app.js:
--------------------------------------------------------------------------------
1 | var restify = require('restify');
2 |
3 | var controller = require('./controllers/items');
4 |
5 | var db = require('./models/db');
6 | var model = require('./models/items');
7 |
8 | model.connect(db.params, function(err) {
9 | if (err) throw err;
10 | });
11 |
12 | var server = restify.createServer()
13 | .use(restify.plugins.fullResponse())
14 | .use(restify.plugins.queryParser())
15 | .use(restify.plugins.bodyParser());
16 |
17 | controller.context(server, '/todo/api', model);
18 |
19 | server.get({
20 | path: '/todo/api/items/:id'
21 | }, controller.read);
22 |
23 |
24 | server.get({
25 | path: '/todo/api/items'
26 | }, controller.list);
27 |
28 | server.post({
29 | path: '/todo/api/items'
30 | }, controller.save);
31 |
32 | server.del({
33 | path: '/todo/api/items/:id'
34 | }, controller.destroy);
35 |
36 |
37 | server.get({
38 | path: '/todo/*',
39 | name: 'todoapi'
40 | }, restify.plugins.serveStatic({
41 | 'directory': __dirname,
42 | 'default': 'index.html'
43 | }));
44 |
45 | var port = process.env.PORT || 30080;
46 | server.listen(port, function (err) {
47 | if (err)
48 | console.error(err);
49 | else
50 | console.log('App is ready at : ' + port);
51 | });
52 |
53 | if (process.env.environment == 'production')
54 | process.on('uncaughtException', function (err) {
55 | console.error(JSON.parse(JSON.stringify(err, ['stack', 'message', 'inner'], 2)))
56 | });
57 |
58 |
59 |
--------------------------------------------------------------------------------
/tools/sidecar-busybox-LogAgent/sidecar-busybox-logagent.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: counter
5 | spec:
6 | containers:
7 | - name: count
8 | image: busybox:latest
9 | args:
10 | - /bin/sh
11 | - -c
12 | - >
13 | i=0;
14 | while true;
15 | do
16 | echo "$i: $(date)" >> /var/log/1.log;
17 | echo "$(date) INFO $i" >> /var/log/2.log;
18 | i=$((i+1));
19 | sleep 1;
20 | done
21 | volumeMounts:
22 | - name: varlog
23 | mountPath: /var/log
24 | - name: log-agent
25 | image: custom-busybox:latest
26 | args:
27 | - /bin/sh
28 | - -c
29 | - >
30 | while true;
31 | do
32 | cp /var/log/1.log /dump-nfs/;
33 | cp /var/log/2.log /dump-nfs/;
34 | sleep 10;
35 | done
36 | volumeMounts:
37 | - name: varlog
38 | mountPath: /var/log
39 | - name: dump-volume
40 | mountPath: /dump-nfs
41 | volumes:
42 | - name: varlog
43 | emptyDir: {}
44 | - name: dump-volume
45 | persistentVolumeClaim:
46 | claimName: dump-volume-claim
47 | ---
48 | apiVersion: v1
49 | kind: PersistentVolumeClaim
50 | metadata:
51 | name: dump-volume-claim
52 | spec:
53 | accessModes:
54 | - ReadWriteMany
55 | resources:
56 | requests:
57 | storage: 1Gi
58 | ---
59 | apiVersion: v1
60 | kind: PersistentVolume
61 | metadata:
62 | name: dump-volume-pv
63 | spec:
64 | accessModes:
65 | - ReadWriteMany
66 | capacity:
67 | storage: 1Gi
68 | nfs:
69 | path: /shared/dump
70 | server: nfs-ip-address-here
71 | persistentVolumeReclaimPolicy: Retain
72 |
--------------------------------------------------------------------------------
/tools/sidecar-fluentd-LogAgent/sidecar-fluentd-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: app-with-sidecar-td
5 | namespace: dev-project
6 | labels:
7 | app: app
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: app
13 | template:
14 | metadata:
15 | labels:
16 | app: app
17 | spec:
18 | containers:
19 |
20 | # Primary Container
21 | - name: tomcat
22 | image: saravak/tomcat8
23 | volumeMounts:
24 | - name: applog
25 | mountPath: /opt/tomcat/webapps/ROOT/WEB-INF/log/
26 | - name: tomcatlog
27 | mountPath: /opt/tomcat/logs
28 | ports:
29 | - containerPort: 8080
30 | resources:
31 | limits:
32 | memory: 1028Mi
33 |
34 | # This is for FluentD Logging Container
35 | - name: fluentd
36 | image: openshift/ose-logging-fluentd:v3.11.248
37 | env:
38 | - name: FLUENT_UID
39 | value: root
40 | - name: FLUENT_CONF
41 | value: fluent.conf
42 | - name: FLUENTD_ARGS
43 | value: -c /fluentd/etc/fluent.conf
44 | volumeMounts:
45 | #The logs to be shared with tomcat app
46 | - name: applog
47 | mountPath: /opt/tomcat/webapps/ROOT/WEB-INF/log/
48 | #Fluentd configmap
49 | - name: fluentd-confmap
50 | mountPath: /fluentd/etc/
51 |
52 | volumes:
53 | - name: applog
54 | emptyDir: {}
55 | - name: tomcatlog
56 | emptyDir: {}
57 | - name: fluentd-confmap
58 | configMap:
59 | name: fluentd-confmap
60 |
--------------------------------------------------------------------------------
/tools/sidecar-busybox-LogAgent/readme.md:
--------------------------------------------------------------------------------
1 |
2 | #### Preparation
3 | ```bash
4 | #Create project directory.
5 | mkdir custom-busybox
6 | cd custom-busybox
7 |
8 | #Prepare your Dockerfile
9 |
10 | #Use docker build command to build the image.
11 | #This example names the image as custom-busybox:latest
12 | docker build -t custom-busybox:v1.32.1 ./
13 | Sending build context to Docker daemon 498.7kB
14 | Step 1/2 : FROM busybox:latest
15 | ---> 491198851f0c
16 | Step 2/2 : COPY ./rsync /bin/rsync
17 | ---> 036d1070db5c
18 | Successfully built 036d1070db5c
19 | Successfully tagged custom-busybox:v1.32.1
20 |
21 | #Check the newly created custom-busybox image
22 | docker images
23 | REPOSITORY TAG IMAGE ID CREATED SIZE
24 | custom-busybox v1.32.1 036d1070db5c 34 seconds ago 1.73MB
25 |
26 | #(OPTIONAL)Save the image to local
27 | docker save -o /home/user1/custom-busybox.tar custom-busybox:v1.32.1
28 |
29 | ls -l /home/user1/custom-busybox.tar
30 |
31 | #(OPTIONAL)Copy the image to your remote server
32 | scp /home/user1/custom-busybox.tar root@remote-server-ip:/home/user1/
33 |
34 | #Load image from local file at your remote server
35 | docker load -i /home/user1/custom-busybox.tar
36 | 6fac254bbae9: Loading layer [==================================================>] 498.2kB/498.2kB
37 | Loaded image: custom-busybox:v1.32.1
38 | ```
39 |
40 | #### Test your image
41 | ```bash
42 | #Create pod using log-counter-busybox.yaml
43 | kubectl create -f log-counter-busybox.yaml -n dev-namespace
44 | pod/counter-customized created
45 |
46 | kubectl get pod -n dev-namespace
47 | NAME READY STATUS RESTARTS AGE
48 | counter-customized 1/1 Running 0 91s
49 |
50 |
51 | ```
52 |
--------------------------------------------------------------------------------
/tools/sidecar-fluentd-LogAgent/app-with-sidecarfluentd-deployment-configmap.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: fluentd-confmap
6 | namespace: dev-project
7 | data:
8 | fluent.conf: |
9 |
10 | @type tail
11 | path /logs/defaultServer/verbosegc*.log
12 | pos_file /var/log/1.log.pos
13 | tag wlp.verbose1
14 |
15 |
16 |
17 | @type stdout
18 |
19 | ---
20 | apiVersion: apps/v1
21 | kind: Deployment
22 | metadata:
23 | name: app-with-sidecar-td
24 | namespace: dev-project
25 | labels:
26 | app: app
27 | spec:
28 | replicas: 1
29 | selector:
30 | matchLabels:
31 | app: app
32 | template:
33 | metadata:
34 | labels:
35 | app: app
36 | spec:
37 | containers:
38 |
39 | # Primary Container
40 | - name: tomcat
41 | image: saravak/tomcat8
42 | volumeMounts:
43 | - name: applog
44 | mountPath: /opt/tomcat/webapps/ROOT/WEB-INF/log/
45 | - name: tomcatlog
46 | mountPath: /opt/tomcat/logs
47 | ports:
48 | - containerPort: 8080
49 | resources:
50 | limits:
51 | memory: 1028Mi
52 |
53 | # This is for FluentD Logging Container
54 | - name: fluentd
55 | image: openshift/ose-logging-fluentd:v3.11.248
56 | env:
57 | - name: FLUENT_UID
58 | value: root
59 | - name: FLUENT_CONF
60 | value: fluent.conf
61 | - name: FLUENTD_ARGS
62 | value: -c /fluentd/etc/fluent.conf
63 | volumeMounts:
64 | #The logs to be shared with tomcat app
65 | - name: applog
66 | mountPath: /opt/tomcat/webapps/ROOT/WEB-INF/log/
67 | #Fluentd configmap
68 | - name: fluentd-confmap
69 | mountPath: /fluentd/etc/
70 |
71 | volumes:
72 | - name: applog
73 | emptyDir: {}
74 | - name: tomcatlog
75 | emptyDir: {}
76 | - name: fluentd-confmap
77 | configMap:
78 | name: fluentd-confmap
79 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build/todo/lib/dependencies/angular-resource.min.js:
--------------------------------------------------------------------------------
1 | /*
2 | AngularJS v1.2.21
3 | (c) 2010-2014 Google, Inc. http://angularjs.org
4 | License: MIT
5 | */
6 | (function(H,a,A){'use strict';function D(p,g){g=g||{};a.forEach(g,function(a,c){delete g[c]});for(var c in p)!p.hasOwnProperty(c)||"$"===c.charAt(0)&&"$"===c.charAt(1)||(g[c]=p[c]);return g}var v=a.$$minErr("$resource"),C=/^(\.[a-zA-Z_$][0-9a-zA-Z_$]*)+$/;a.module("ngResource",["ng"]).factory("$resource",["$http","$q",function(p,g){function c(a,c){this.template=a;this.defaults=c||{};this.urlParams={}}function t(n,w,l){function r(h,d){var e={};d=x({},w,d);s(d,function(b,d){u(b)&&(b=b());var k;if(b&&
7 | b.charAt&&"@"==b.charAt(0)){k=h;var a=b.substr(1);if(null==a||""===a||"hasOwnProperty"===a||!C.test("."+a))throw v("badmember",a);for(var a=a.split("."),f=0,c=a.length;f DEPLOY FAILED
24 |
25 | git clone xxxxxgit
26 |
27 | #Configure git config
28 | git config --global user.email="developer@email.com"
29 | git config --global user.name="developer"
30 | cat ~/.gitconfig
31 | [user]
32 | name = developer
33 | email = developer@email.com
34 |
35 | git checkout master
36 | git checkout -b source-build
37 | git push -u origin source-build
38 |
39 | oc logs bc/greet
40 | --->error on xxxx.json
41 |
42 | python -m json.tool xxxx.json
43 | --->find the error syntax
44 | vi xxxx.json
45 | --->fix the syntax
46 |
47 | git commit -a -m "xxxx"
48 | git push
49 |
50 | oc start-build greet
51 | oc logs -f bc/greet
52 | oc get pod
53 |
54 | oc expose service greet
55 |
56 | oc get route
57 | curl
58 | --->Check the app from browser too
59 | ```
60 |
61 |
62 | ##### Dockerfile
63 | ###### Httpd-parent Dockerfile
64 | ```bash
65 | FROM registry.access.redhat.com/ubi8/ubi:8.0
66 |
67 | MAINTAINER Red Hat Training
68 |
69 | # DocumentRoot for Apache
70 | ENV DOCROOT=/var/www/html
71 |
72 | RUN yum install -y --no-docs --disableplugin=subscription-manager httpd && \
73 | yum clean all --disableplugin=subscription-manager -y && \
74 | echo "Hello from the httpd-parent container!" > ${DOCROOT}/index.html
75 |
76 | # Allows child images to inject their own content into DocumentRoot
77 | ONBUILD COPY src/ ${DOCROOT}/
78 |
79 | EXPOSE 80
80 |
81 | # This stuff is needed to ensure a clean start
82 | RUN rm -rf /run/httpd && mkdir /run/httpd
83 |
84 | # Run as the root user
85 | USER root
86 |
87 | # Launch httpd
88 | CMD /usr/sbin/httpd -DFOREGROUND
89 | ```
90 |
91 | ###### Httpd-child Dockerfile
92 | ```bash
93 | FROM
94 |
95 | EXPOSE 8080
96 |
97 | # Labels consumed by OpenShift
98 | LABEL io.k8s.description="A basic Apache" ¥
99 | io.k8s.display-name="Apache HTTP Server" ¥
100 | io.openshift.expose-services="8080:http" ¥
101 | io.openshift.tags="apache, httpd"
102 |
103 | # Optional
104 | #RUN yum install -y --no-docs --disableplugin=subscription-manager httpd && ¥
105 | # yum clean all -y --disableplugin=subscription-manager
106 |
107 | # Change webserver port to 8080
108 | RUN sed -i "s/Listen 80/Listen 8080/g" /etc/httpd/conf/httpd.conf
109 |
110 | # Permissions to allow container to run on OpenShift
111 | RUN chgrp -R 0 /var/log/httpd /var/run/httpd && ¥
112 | chmod -R g=u /var/log/httpd /var/run/httpd
113 |
114 | # Ensure clean run (omit if already configured in parent-Dockerfile)
115 | RUN rm -rf /run/httpd && mkdir /run/httpd
116 |
117 | # Run as non-privileged user
118 | USER 1001
119 |
120 | # Launch httpd (omit if already configured in parent-Dockerfile)
121 | CMD /usr/sbin/httpd -DFOREGROUND
122 |
123 | ```
124 |
125 | ###### Another variation for httpd24
126 | ```bash
127 | FROM registry.access.redhat.com/rhscl/httpd-24-rhel7
128 |
129 | COPY src/ /var/www/html/
130 |
131 | RUN echo “THIS IS INFORMATION” > /var/www/html/info.html
132 |
133 | EXPOSE 8080
134 |
135 | LABEL io.k8s.description=“xxx” io.k8s.display-name=“xx” ¥
136 | io.openshift.expose-service=“8080:http” io.openshift.tags=“xxx”
137 |
138 | RUN sed -i ”s/Listen 80/Listen 8080/g” /etc/httpd/conf/httpd.conf
139 |
140 | USER 1001
141 |
142 | CMD run-httpd
143 | ```
144 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build/todo/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | To Do List
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 | To Do List Application
28 |
29 |
30 |
31 |
32 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
To Do List
42 |
43 |
44 |
45 |
46 |
47 |
48 |
51 |
52 |
53 |
54 |
55 |
103 |
104 |
105 |
106 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/todo/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | To Do List
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 | To Do List Application
28 |
29 |
30 |
31 |
32 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
To Do List
42 |
43 |
44 |
45 |
46 |
47 |
48 |
51 |
52 |
53 |
54 |
55 |
103 |
104 |
105 |
106 |
--------------------------------------------------------------------------------
/labs/comprehensive-review/deploy/openshift/resources/nexus-template.json:
--------------------------------------------------------------------------------
1 | {
2 | "kind": "Template",
3 | "apiVersion": "v1",
4 | "metadata": {
5 | "name": "nexus-persistent",
6 | "annotations": {
7 | "openshift.io/display-name": "Nexus (Persistent)",
8 | "description": "This template creates a Nexus instance backed by a persistent data store.",
9 | "tags": "servers,nexus",
10 | "template.openshift.io/long-description": "This template creates a Nexus instance backed by a persistent data store. The data store contains the Nexus repositories which contain library modules.",
11 | "template.openshift.io/provider-display-name": "Red Hat Training"
12 | }
13 | },
14 | "labels": {
15 | "template": "nexus-persistent-template",
16 | "app": "nexus"
17 | },
18 | "objects": [
19 | {
20 | "kind": "Service",
21 | "apiVersion": "v1",
22 | "metadata": {
23 | "name": "nexus"
24 | },
25 | "spec": {
26 | "ports": [
27 | {
28 | "name": "nexus",
29 | "port": 8081
30 | }
31 | ],
32 | "selector": {
33 | "name": "nexus"
34 | }
35 | }
36 | },
37 | {
38 | "kind": "PersistentVolumeClaim",
39 | "apiVersion": "v1",
40 | "metadata": {
41 | "name": "nexus"
42 | },
43 | "spec": {
44 | "accessModes": [
45 | "ReadWriteOnce"
46 | ],
47 | "resources": {
48 | "requests": {
49 | "storage": "${VOLUME_CAPACITY}"
50 | }
51 | }
52 | }
53 | },
54 | {
55 | "kind": "DeploymentConfig",
56 | "apiVersion": "v1",
57 | "metadata": {
58 | "name": "nexus"
59 | },
60 | "spec": {
61 | "strategy": {
62 | "type": "Recreate"
63 | },
64 | "triggers": [
65 | {
66 | "type": "ConfigChange"
67 | }
68 | ],
69 | "replicas": 1,
70 | "selector": {
71 | "name": "nexus"
72 | },
73 | "template": {
74 | "metadata": {
75 | "labels": {
76 | "name": "nexus"
77 | }
78 | },
79 | "spec": {
80 | "containers": [
81 | {
82 | "name": "nexus",
83 | "image": "quay.io/${RHT_OCP4_QUAY_USER}/nexus:${NEXUS_VERSION}",
84 | "ports": [
85 | {
86 | "containerPort": 8081
87 | }
88 | ],
89 | "readinessProbe": {
90 | "timeoutSeconds": 1,
91 | "initialDelaySeconds": 5,
92 | "exec": {
93 | "command": [ "/bin/sh", "-i", "-c",
94 | "curl 127.0.0.1:8081"]
95 | }
96 | },
97 | "livenessProbe": {
98 | "timeoutSeconds": 1,
99 | "initialDelaySeconds": 30,
100 | "failureThreshold": 10,
101 | "tcpSocket": {
102 | "port": 8081
103 | }
104 | },
105 | "env": [
106 | ],
107 | "resources": {
108 | "limits": {
109 | "memory": "${MEMORY_LIMIT}"
110 | }
111 | },
112 | "volumeMounts": [
113 | {
114 | "name": "nexus-data",
115 | "mountPath": "/opt/nexus/sonatype-work"
116 | }
117 | ],
118 | "imagePullPolicy": "Always"
119 | }
120 | ],
121 | "volumes": [
122 | {
123 | "name": "nexus-data",
124 | "persistentVolumeClaim": {
125 | "claimName": "nexus"
126 | }
127 | }
128 | ]
129 | }
130 | }
131 | }
132 | }
133 | ],
134 | "parameters": [
135 | {
136 | "name": "MEMORY_LIMIT",
137 | "displayName": "Memory Limit",
138 | "description": "Maximum amount of memory the container can use.",
139 | "value": "512Mi",
140 | "required": true
141 | },
142 | {
143 | "name": "NAMESPACE",
144 | "displayName": "Namespace",
145 | "description": "The OpenShift Namespace where the ImageStream resides.",
146 | "value": "openshift"
147 | },
148 | {
149 | "name": "VOLUME_CAPACITY",
150 | "displayName": "Volume Capacity",
151 | "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
152 | "value": "1Gi",
153 | "required": true
154 | },
155 | {
156 | "name": "NEXUS_VERSION",
157 | "displayName": "Version of Nexus Image",
158 | "description": "Version of Nexus image to be used (must be 2.x).",
159 | "value": "latest",
160 | "required": true
161 | },
162 | {
163 | "name": "RHT_OCP4_QUAY_USER",
164 | "displayName": "Quay.io namespace",
165 | "description": "Namespace where the Nexus image is stored in Quay.io",
166 | "required": true
167 | }
168 | ]
169 | }
170 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build/todo/script/item.js:
--------------------------------------------------------------------------------
1 | var app = angular.module('items', ['ngResource', 'ngGrid', 'ui.bootstrap']);
2 |
3 | // Create a controller with name itemListController to bind to the grid section.
4 | app.controller('itemsListController', function ($scope, $rootScope, itemService) {
5 | // Initialize required information: sorting, the first page to show and the grid options.
6 | $scope.sortInfo = {fields: ['id'], directions: ['asc']};
7 | $scope.items = {currentPage: 1};
8 |
9 | $scope.gridOptions = {
10 | data: 'items.list',
11 | useExternalSorting: true,
12 | sortInfo: $scope.sortInfo,
13 |
14 |
15 | columnDefs: [
16 | { field: 'id', displayName: 'Id' },
17 | { field: 'description', displayName: 'Description' },
18 | { field: 'done', displayName: 'Done' },
19 | { field: '', width: 30, cellTemplate: '' }
20 | ],
21 |
22 | multiSelect: false,
23 | selectedItems: [],
24 | // Broadcasts an event when a row is selected, to signal the form that it needs to load the row data.
25 | afterSelectionChange: function (rowItem) {
26 | if (rowItem.selected) {
27 | $rootScope.$broadcast('itemselected', $scope.gridOptions.selectedItems[0].id);
28 | }
29 | }
30 | };
31 |
32 | // Refresh the grid, calling the appropriate rest method.
33 | $scope.refreshGrid = function () {
34 | var listitemsArgs = {
35 | page: $scope.items.currentPage,
36 | sortFields: $scope.sortInfo.fields[0],
37 | sortDirections: $scope.sortInfo.directions[0]
38 | };
39 |
40 | itemService.get(listitemsArgs, function (data) {
41 | $scope.items = data;
42 | })
43 | };
44 |
45 | // Broadcast an event when an element in the grid is deleted. No real deletion is perfomed at this point.
46 | $scope.deleteRow = function (row) {
47 | $rootScope.$broadcast('deleteitem', row.entity.id);
48 | };
49 |
50 | // Watch the sortInfo variable. If changes are detected than we need to refresh the grid.
51 | // This also works for the first page access, since we assign the initial sorting in the initialize section.
52 | $scope.$watch('sortInfo.fields[0]', function () {
53 | $scope.refreshGrid();
54 | }, true);
55 | $scope.$watch('sortInfo.directions[0]', function () {
56 | $scope.refreshGrid();
57 | }, true);
58 |
59 | // Do something when the grid is sorted.
60 | // The grid throws the ngGridEventSorted that gets picked up here and assigns the sortInfo to the scope.
61 | // This will allow to watch the sortInfo in the scope for changed and refresh the grid.
62 | $scope.$on('ngGridEventSorted', function (event, sortInfo) {
63 | $scope.sortInfo = sortInfo;
64 | });
65 |
66 | // Picks the event broadcasted when a item is saved or deleted to refresh the grid elements with the most
67 | // updated information.
68 | $scope.$on('refreshGrid', function () {
69 | $scope.refreshGrid();
70 | });
71 |
72 | // Picks the event broadcasted when the form is cleared to also clear the grid selection.
73 | $scope.$on('clear', function () {
74 | $scope.gridOptions.selectAll(false);
75 | });
76 | });
77 |
78 | // Create a controller with name itemsFormController to bind to the form section.
79 | app.controller('itemsFormController', function ($scope, $rootScope, itemService) {
80 | // Clears the form. Either by clicking the 'Clear' button in the form, or when a successful save is performed.
81 | $scope.clearForm = function () {
82 | $scope.item = null;
83 | // Resets the form validation state.
84 | $scope.itemForm.$setPristine();
85 | // Broadcast the event to also clear the grid selection.
86 | $rootScope.$broadcast('clear');
87 | };
88 |
89 | // Calls the rest method to save a item.
90 | $scope.updateItem = function () {
91 | itemService.save($scope.item).$promise.then(
92 | function () {
93 | // Broadcast the event to refresh the grid.
94 | $rootScope.$broadcast('refreshGrid');
95 | // Broadcast the event to display a save message.
96 | $rootScope.$broadcast('itemsaved');
97 | // XXX Generates null error in browser ?!?
98 | $scope.clearForm();
99 | },
100 | function () {
101 | // Broadcast the event for a server error.
102 | $rootScope.$broadcast('error');
103 | });
104 | };
105 |
106 | // Picks up the event broadcasted when the item is selected from the grid and perform the item load by calling
107 | // the appropiate rest service.
108 | $scope.$on('itemselected', function (event, id) {
109 | $scope.item = itemService.get({id: id});
110 | });
111 |
112 | // Picks us the event broadcasted when the item is deleted from the grid and perform the actual item delete by
113 | // calling the appropiate rest service.
114 | $scope.$on('deleteitem', function (event, id) {
115 | itemService.delete({id: id}).$promise.then(
116 | function () {
117 | // Broadcast the event to refresh the grid.
118 | $rootScope.$broadcast('refreshGrid');
119 | // Broadcast the event to display a delete message.
120 | $rootScope.$broadcast('itemDeleted');
121 | $scope.clearForm();
122 | },
123 | function () {
124 | // Broadcast the event for a server error.
125 | $rootScope.$broadcast('error');
126 | });
127 | });
128 | });
129 |
130 | // Create a controller with name alertMessagesController to bind to the feedback messages section.
131 | app.controller('alertMessagesController', function ($scope) {
132 | // Picks up the event to display a saved message.
133 | $scope.$on('itemSaved', function () {
134 | $scope.alerts = [
135 | { type: 'success', msg: 'Record saved successfully!' }
136 | ];
137 | });
138 |
139 | // Picks up the event to display a deleted message.
140 | $scope.$on('itemDeleted', function () {
141 | $scope.alerts = [
142 | { type: 'success', msg: 'Record deleted successfully!' }
143 | ];
144 | });
145 |
146 | // Picks up the event to display a server error message.
147 | $scope.$on('error', function () {
148 | $scope.alerts = [
149 | { type: 'danger', msg: 'There was a problem in the server!' }
150 | ];
151 | });
152 |
153 | $scope.closeAlert = function (index) {
154 | $scope.alerts.splice(index, 1);
155 | };
156 | });
157 |
158 | // Service that provides items operations
159 | app.factory('itemService', function ($resource) {
160 | return $resource('api/items/:id');
161 | });
162 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/todo/script/item.js:
--------------------------------------------------------------------------------
1 | var app = angular.module('items', ['ngResource', 'ngGrid', 'ui.bootstrap']);
2 |
3 | // Create a controller with name itemListController to bind to the grid section.
4 | app.controller('itemsListController', function ($scope, $rootScope, itemService) {
5 | // Initialize required information: sorting, the first page to show and the grid options.
6 | $scope.sortInfo = {fields: ['id'], directions: ['asc']};
7 | $scope.items = {currentPage: 1};
8 |
9 | $scope.gridOptions = {
10 | data: 'items.list',
11 | useExternalSorting: true,
12 | sortInfo: $scope.sortInfo,
13 |
14 |
15 | columnDefs: [
16 | { field: 'id', displayName: 'Id' },
17 | { field: 'description', displayName: 'Description' },
18 | { field: 'done', displayName: 'Done' },
19 | { field: '', width: 30, cellTemplate: '' }
20 | ],
21 |
22 | multiSelect: false,
23 | selectedItems: [],
24 | // Broadcasts an event when a row is selected, to signal the form that it needs to load the row data.
25 | afterSelectionChange: function (rowItem) {
26 | if (rowItem.selected) {
27 | $rootScope.$broadcast('itemselected', $scope.gridOptions.selectedItems[0].id);
28 | }
29 | }
30 | };
31 |
32 | // Refresh the grid, calling the appropriate rest method.
33 | $scope.refreshGrid = function () {
34 | var listitemsArgs = {
35 | page: $scope.items.currentPage,
36 | sortFields: $scope.sortInfo.fields[0],
37 | sortDirections: $scope.sortInfo.directions[0]
38 | };
39 |
40 | itemService.get(listitemsArgs, function (data) {
41 | $scope.items = data;
42 | })
43 | };
44 |
45 | // Broadcast an event when an element in the grid is deleted. No real deletion is perfomed at this point.
46 | $scope.deleteRow = function (row) {
47 | $rootScope.$broadcast('deleteitem', row.entity.id);
48 | };
49 |
50 | // Watch the sortInfo variable. If changes are detected than we need to refresh the grid.
51 | // This also works for the first page access, since we assign the initial sorting in the initialize section.
52 | $scope.$watch('sortInfo.fields[0]', function () {
53 | $scope.refreshGrid();
54 | }, true);
55 | $scope.$watch('sortInfo.directions[0]', function () {
56 | $scope.refreshGrid();
57 | }, true);
58 |
59 | // Do something when the grid is sorted.
60 | // The grid throws the ngGridEventSorted that gets picked up here and assigns the sortInfo to the scope.
61 | // This will allow to watch the sortInfo in the scope for changed and refresh the grid.
62 | $scope.$on('ngGridEventSorted', function (event, sortInfo) {
63 | $scope.sortInfo = sortInfo;
64 | });
65 |
66 | // Picks the event broadcasted when a item is saved or deleted to refresh the grid elements with the most
67 | // updated information.
68 | $scope.$on('refreshGrid', function () {
69 | $scope.refreshGrid();
70 | });
71 |
72 | // Picks the event broadcasted when the form is cleared to also clear the grid selection.
73 | $scope.$on('clear', function () {
74 | $scope.gridOptions.selectAll(false);
75 | });
76 | });
77 |
78 | // Create a controller with name itemsFormController to bind to the form section.
79 | app.controller('itemsFormController', function ($scope, $rootScope, itemService) {
80 | // Clears the form. Either by clicking the 'Clear' button in the form, or when a successful save is performed.
81 | $scope.clearForm = function () {
82 | $scope.item = null;
83 | // Resets the form validation state.
84 | $scope.itemForm.$setPristine();
85 | // Broadcast the event to also clear the grid selection.
86 | $rootScope.$broadcast('clear');
87 | };
88 |
89 | // Calls the rest method to save a item.
90 | $scope.updateItem = function () {
91 | itemService.save($scope.item).$promise.then(
92 | function () {
93 | // Broadcast the event to refresh the grid.
94 | $rootScope.$broadcast('refreshGrid');
95 | // Broadcast the event to display a save message.
96 | $rootScope.$broadcast('itemsaved');
97 | // XXX Generates null error in browser ?!?
98 | $scope.clearForm();
99 | },
100 | function () {
101 | // Broadcast the event for a server error.
102 | $rootScope.$broadcast('error');
103 | });
104 | };
105 |
106 | // Picks up the event broadcasted when the item is selected from the grid and perform the item load by calling
107 | // the appropiate rest service.
108 | $scope.$on('itemselected', function (event, id) {
109 | $scope.item = itemService.get({id: id});
110 | });
111 |
112 | // Picks us the event broadcasted when the item is deleted from the grid and perform the actual item delete by
113 | // calling the appropiate rest service.
114 | $scope.$on('deleteitem', function (event, id) {
115 | itemService.delete({id: id}).$promise.then(
116 | function () {
117 | // Broadcast the event to refresh the grid.
118 | $rootScope.$broadcast('refreshGrid');
119 | // Broadcast the event to display a delete message.
120 | $rootScope.$broadcast('itemDeleted');
121 | $scope.clearForm();
122 | },
123 | function () {
124 | // Broadcast the event for a server error.
125 | $rootScope.$broadcast('error');
126 | });
127 | });
128 | });
129 |
130 | // Create a controller with name alertMessagesController to bind to the feedback messages section.
131 | app.controller('alertMessagesController', function ($scope) {
132 | // Picks up the event to display a saved message.
133 | $scope.$on('itemSaved', function () {
134 | $scope.alerts = [
135 | { type: 'success', msg: 'Record saved successfully!' }
136 | ];
137 | });
138 |
139 | // Picks up the event to display a deleted message.
140 | $scope.$on('itemDeleted', function () {
141 | $scope.alerts = [
142 | { type: 'success', msg: 'Record deleted successfully!' }
143 | ];
144 | });
145 |
146 | // Picks up the event to display a server error message.
147 | $scope.$on('error', function () {
148 | $scope.alerts = [
149 | { type: 'danger', msg: 'There was a problem in the server!' }
150 | ];
151 | });
152 |
153 | $scope.closeAlert = function (index) {
154 | $scope.alerts.splice(index, 1);
155 | };
156 | });
157 |
158 | // Service that provides items operations
159 | app.factory('itemService', function ($resource) {
160 | return $resource('api/items/:id');
161 | });
162 |
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/build/todo/lib/dependencies/css/ng-grid.min.css:
--------------------------------------------------------------------------------
1 | .ngGrid{background-color:#fdfdfd}.ngGrid input[type="checkbox"]{margin:0;padding:0}.ngGrid input{vertical-align:top}.ngGrid.unselectable{-moz-user-select:none;-khtml-user-select:none;-webkit-user-select:none;-o-user-select:none;user-select:none}.ngViewport{overflow:auto;min-height:20px}.ngViewport:focus{outline:0}.ngCanvas{position:relative}.ngVerticalBar{position:absolute;right:0;width:0}.ngVerticalBarVisible{width:1px;background-color:#d4d4d4}#testDataLength{position:absolute;top:0;right:0;z-index:-100000}.ngHeaderContainer{position:relative;overflow:hidden;font-weight:bold;background-color:inherit}.ngHeaderCell{position:absolute;top:0;bottom:0;background-color:inherit}.ngHeaderCell.pinned{z-index:1}.ngHeaderSortColumn{position:absolute;overflow:hidden}.ngTopPanel{position:relative;z-index:1;background-color:#eaeaea;border-bottom:1px solid #d4d4d4}.ngSortButtonDown{position:absolute;top:3px;left:0;right:0;margin-left:auto;margin-right:auto;border-color:gray transparent;border-style:solid;border-width:0 5px 5px 5px;height:0;width:0}.ngNoSort{cursor:default}.ngHeaderButton{position:absolute;right:2px;top:8px;-moz-border-radius:50%;-webkit-border-radius:50%;border-radius:50%;width:14px;height:14px;z-index:1;background-color:#9fbbb4;cursor:pointer}.ngSortButtonUp{position:absolute;top:3px;left:0;right:0;margin-left:auto;margin-right:auto;border-color:gray transparent;border-style:solid;border-width:5px 5px 0 5px;height:0;width:0}.ngHeaderScroller{position:absolute;background-color:inherit}.ngSortPriority{position:absolute;top:-5px;left:1px;font-size:6pt;font-weight:bold}.ngHeaderGrip{cursor:col-resize;width:10px;right:-5px;top:0;height:100%;position:absolute;background-color:transparent}.ngHeaderText{padding:5px;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box;white-space:nowrap;-ms-text-overflow:ellipsis;-o-text-overflow:ellipsis;text-overflow:ellipsis;overflow:hidden}.ngHeaderButtonArrow{position:absolute;top:4px;left:3px;width:0;height:0;border-style:solid;border-width:6.5px 4.5px 0 4.5px;border-color:#4d4d4d transparent transparent transparent}.ngPinnedIcon{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAABGdBTUEAALGPC/xhBQAAAAlwSFlzAAAOwgAADsIBFShKgAAAABp0RVh0U29mdHdhcmUAUGFpbnQuTkVUIHYzLjUuMTAw9HKhAAAAmElEQVQoU33PQapBURjA8UtkwJuaWYGSgfQWYBMvczPmTCzAAGVuaA228BZhRCkDGSmE31FucuRfvzq3vr5zT/JSjSU7DsypEPXDkDVn2hSIytJhw4kWGaLCxgHh2gt/RBuLzNhz5caWPjnSqqw4EraFfwznf8qklWjwy4IRTerkiQoPGtPl40OehcEJvcfXl8LglLfBJLkDcMgbgHlHhK8AAAAASUVORK5CYII=);background-repeat:no-repeat;position:absolute;right:5px;top:5px;height:10px;width:10px}.ngUnPinnedIcon{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAABGdBTUEAALGPC/xhBQAAAAlwSFlzAAAOwgAADsIBFShKgAAAABp0RVh0U29mdHdhcmUAUGFpbnQuTkVUIHYzLjUuMTAw9HKhAAAAlElEQVQoU33PPQrCQBRF4fFnI2KfZVi5ARvdgo1l6mwmkCJVOgluwd5OwUoDtnoOxAei8cLXTN7cvEl/skCNDCMPfsUPO5zQwOHIDEvYtMURHe6wOVLgigvOePRyeDkyR4ln7wZ//7XfFBu8B23+aDJjrHGAwza7hjtHJvDmHg7b7Bru7AMjK7Rw2ObBVHDY5oGk9AKQNB2zy8MBTgAAAABJRU5ErkJggg==);background-repeat:no-repeat;position:absolute;height:10px;width:10px;right:5px;top:5px}.ngColMenu{right:2px;padding:5px;top:25px;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;background-color:#bdd0cb;position:absolute;border:2px solid #d4d4d4;z-index:1}.ngColListCheckbox{position:relative;right:3px;top:4px}.ngColList{list-style-type:none;margin-top:2px;margin-left:8px}.ngColListItem{white-space:nowrap}.ngMenuText{position:relative;top:2px;left:2px}.ngGroupPanel{background-color:#eaeaea;overflow:hidden;border-bottom:1px solid #d4d4d4}.ngGroupPanelDescription{margin-top:5px;margin-left:5px}.ngGroupList{list-style-type:none;margin:0;padding:0}.ngAggHeader{position:absolute;border:0}.ngGroupElement{float:left;height:100%;width:100%}.ngGroupIcon{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAANCAYAAACZ3F9/AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAAadEVYdFNvZnR3YXJlAFBhaW50Lk5FVCB2My41LjEwMPRyoQAAAEFJREFUKFNjoAhISkr+h2J5JDZODNXGwGBsbPwfhIGAA8bGh6HaGBiAGhxAGJmND4M1gQCSM0adCsVQbcPcqQwMALWDGyDvWPefAAAAAElFTkSuQmCC);background-repeat:no-repeat;height:15px;width:15px;position:absolute;right:-2px;top:2px}.ngGroupedByIcon{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAANCAYAAACZ3F9/AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAAadEVYdFNvZnR3YXJlAFBhaW50Lk5FVCB2My41LjEwMPRyoQAAAElJREFUKFNjoAhISkr+R8LyaHwMDNXGwGBsbPwfhoGAA5mPDUO1oWpE52PDYE0gALTFAYbR+dgwWBMIoPlh1I9ADNU2NPzIwAAAFQYI9E4OLvEAAAAASUVORK5CYII=);background-repeat:no-repeat;height:15px;width:15px;position:absolute;right:-2px;top:2px}.ngGroupName{background-color:#fdfdfd;border:1px solid #d4d4d4;padding:3px 10px;float:left;margin-left:0;margin-top:2px;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;font-weight:bold}.ngGroupArrow{width:0;height:0;border-top:6px solid transparent;border-bottom:6px solid transparent;border-left:6px solid black;margin-top:10px;margin-left:5px;margin-right:5px;float:right}.ngGroupingNumber{position:absolute;right:-10px;top:-2px}.ngAggArrowCollapsed{position:absolute;left:8px;bottom:10px;width:0;height:0;border-style:solid;border-width:5px 0 5px 8.7px;border-color:transparent transparent transparent #000}.ngGroupItem{float:left}.ngGroupItem:first-child{margin-left:2px}.ngRemoveGroup{width:5px;-moz-opacity:.4;opacity:.4;margin-top:-1px;margin-left:5px}.ngRemoveGroup:hover{color:black;text-decoration:none;cursor:pointer;-moz-opacity:.7;opacity:.7}.ngAggArrowExpanded{position:absolute;left:8px;bottom:10px;width:0;height:0;border-style:solid;border-width:0 0 9px 9px;border-color:transparent transparent #000 transparent}.ngAggregate{position:absolute;background-color:#c9dde1;border-bottom:1px solid beige;overflow:hidden;top:0;bottom:0;right:-1px;left:0}.ngAggregateText{position:absolute;left:27px;top:5px;line-height:20px;white-space:nowrap}.ngRow{position:absolute;border-bottom:1px solid #d4d4d4}.ngRow.odd{background-color:#fdfdfd}.ngRow.even{background-color:#f3f3f3}.ngRow.selected{background-color:#c9dde1}.ngCell{overflow:hidden;position:absolute;top:0;bottom:0;background-color:inherit}.ngCell.pinned{z-index:1}.ngCellText{padding:5px;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box;white-space:nowrap;-ms-text-overflow:ellipsis;-o-text-overflow:ellipsis;text-overflow:ellipsis;overflow:hidden}.ngSelectionCell{margin-top:9px;margin-left:6px}.ngSelectionHeader{position:absolute;top:11px;left:6px}.ngCellElement:focus{outline:0;background-color:#b3c4c7}.ngRow.canSelect{cursor:pointer}.ngSelectionCheckbox{margin-top:9px;margin-left:6px}.ngFooterPanel{background-color:#eaeaea;padding:0;border-top:1px solid #d4d4d4;position:relative}.nglabel{display:block;float:left;font-weight:bold;padding-right:5px}.ngTotalSelectContainer{float:left;margin:5px;margin-top:7px}.ngFooterSelectedItems{padding:2px}.ngFooterTotalItems.ngnoMultiSelect{padding:0!important}.ngGridMaxPagesNumber{vertical-align:middle}.ngPagerFirstBar{width:10px;border-left:2px solid #4d4d4d;margin-top:-6px;height:12px;margin-left:-3px}.ngPagerButton{height:25px;min-width:26px}.ngPagerFirstTriangle{width:0;height:0;border-style:solid;border-width:5px 8.7px 5px 0;border-color:transparent #4d4d4d transparent transparent;margin-left:2px}.ngPagerNextTriangle{margin-left:1px}.ngPagerPrevTriangle{margin-left:0}.ngPagerLastTriangle{width:0;height:0;border-style:solid;border-width:5px 0 5px 8.7px;border-color:transparent transparent transparent #4d4d4d;margin-left:-1px}.ngPagerLastBar{width:10px;border-left:2px solid #4d4d4d;margin-top:-6px;height:12px;margin-left:1px}.ngFooterTotalItems{padding:2px}
--------------------------------------------------------------------------------
/labs/multicontainer-design/deploy/nodejs/nodejs-source/todo/lib/dependencies/css/ng-grid.min.css:
--------------------------------------------------------------------------------
1 | .ngGrid{background-color:#fdfdfd}.ngGrid input[type="checkbox"]{margin:0;padding:0}.ngGrid input{vertical-align:top}.ngGrid.unselectable{-moz-user-select:none;-khtml-user-select:none;-webkit-user-select:none;-o-user-select:none;user-select:none}.ngViewport{overflow:auto;min-height:20px}.ngViewport:focus{outline:0}.ngCanvas{position:relative}.ngVerticalBar{position:absolute;right:0;width:0}.ngVerticalBarVisible{width:1px;background-color:#d4d4d4}#testDataLength{position:absolute;top:0;right:0;z-index:-100000}.ngHeaderContainer{position:relative;overflow:hidden;font-weight:bold;background-color:inherit}.ngHeaderCell{position:absolute;top:0;bottom:0;background-color:inherit}.ngHeaderCell.pinned{z-index:1}.ngHeaderSortColumn{position:absolute;overflow:hidden}.ngTopPanel{position:relative;z-index:1;background-color:#eaeaea;border-bottom:1px solid #d4d4d4}.ngSortButtonDown{position:absolute;top:3px;left:0;right:0;margin-left:auto;margin-right:auto;border-color:gray transparent;border-style:solid;border-width:0 5px 5px 5px;height:0;width:0}.ngNoSort{cursor:default}.ngHeaderButton{position:absolute;right:2px;top:8px;-moz-border-radius:50%;-webkit-border-radius:50%;border-radius:50%;width:14px;height:14px;z-index:1;background-color:#9fbbb4;cursor:pointer}.ngSortButtonUp{position:absolute;top:3px;left:0;right:0;margin-left:auto;margin-right:auto;border-color:gray transparent;border-style:solid;border-width:5px 5px 0 5px;height:0;width:0}.ngHeaderScroller{position:absolute;background-color:inherit}.ngSortPriority{position:absolute;top:-5px;left:1px;font-size:6pt;font-weight:bold}.ngHeaderGrip{cursor:col-resize;width:10px;right:-5px;top:0;height:100%;position:absolute;background-color:transparent}.ngHeaderText{padding:5px;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box;white-space:nowrap;-ms-text-overflow:ellipsis;-o-text-overflow:ellipsis;text-overflow:ellipsis;overflow:hidden}.ngHeaderButtonArrow{position:absolute;top:4px;left:3px;width:0;height:0;border-style:solid;border-width:6.5px 4.5px 0 4.5px;border-color:#4d4d4d transparent transparent transparent}.ngPinnedIcon{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAABGdBTUEAALGPC/xhBQAAAAlwSFlzAAAOwgAADsIBFShKgAAAABp0RVh0U29mdHdhcmUAUGFpbnQuTkVUIHYzLjUuMTAw9HKhAAAAmElEQVQoU33PQapBURjA8UtkwJuaWYGSgfQWYBMvczPmTCzAAGVuaA228BZhRCkDGSmE31FucuRfvzq3vr5zT/JSjSU7DsypEPXDkDVn2hSIytJhw4kWGaLCxgHh2gt/RBuLzNhz5caWPjnSqqw4EraFfwznf8qklWjwy4IRTerkiQoPGtPl40OehcEJvcfXl8LglLfBJLkDcMgbgHlHhK8AAAAASUVORK5CYII=);background-repeat:no-repeat;position:absolute;right:5px;top:5px;height:10px;width:10px}.ngUnPinnedIcon{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAABGdBTUEAALGPC/xhBQAAAAlwSFlzAAAOwgAADsIBFShKgAAAABp0RVh0U29mdHdhcmUAUGFpbnQuTkVUIHYzLjUuMTAw9HKhAAAAlElEQVQoU33PPQrCQBRF4fFnI2KfZVi5ARvdgo1l6mwmkCJVOgluwd5OwUoDtnoOxAei8cLXTN7cvEl/skCNDCMPfsUPO5zQwOHIDEvYtMURHe6wOVLgigvOePRyeDkyR4ln7wZ//7XfFBu8B23+aDJjrHGAwza7hjtHJvDmHg7b7Bru7AMjK7Rw2ObBVHDY5oGk9AKQNB2zy8MBTgAAAABJRU5ErkJggg==);background-repeat:no-repeat;position:absolute;height:10px;width:10px;right:5px;top:5px}.ngColMenu{right:2px;padding:5px;top:25px;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;background-color:#bdd0cb;position:absolute;border:2px solid #d4d4d4;z-index:1}.ngColListCheckbox{position:relative;right:3px;top:4px}.ngColList{list-style-type:none;margin-top:2px;margin-left:8px}.ngColListItem{white-space:nowrap}.ngMenuText{position:relative;top:2px;left:2px}.ngGroupPanel{background-color:#eaeaea;overflow:hidden;border-bottom:1px solid #d4d4d4}.ngGroupPanelDescription{margin-top:5px;margin-left:5px}.ngGroupList{list-style-type:none;margin:0;padding:0}.ngAggHeader{position:absolute;border:0}.ngGroupElement{float:left;height:100%;width:100%}.ngGroupIcon{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAANCAYAAACZ3F9/AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAAadEVYdFNvZnR3YXJlAFBhaW50Lk5FVCB2My41LjEwMPRyoQAAAEFJREFUKFNjoAhISkr+h2J5JDZODNXGwGBsbPwfhIGAA8bGh6HaGBiAGhxAGJmND4M1gQCSM0adCsVQbcPcqQwMALWDGyDvWPefAAAAAElFTkSuQmCC);background-repeat:no-repeat;height:15px;width:15px;position:absolute;right:-2px;top:2px}.ngGroupedByIcon{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAANCAYAAACZ3F9/AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAAadEVYdFNvZnR3YXJlAFBhaW50Lk5FVCB2My41LjEwMPRyoQAAAElJREFUKFNjoAhISkr+R8LyaHwMDNXGwGBsbPwfhoGAA5mPDUO1oWpE52PDYE0gALTFAYbR+dgwWBMIoPlh1I9ADNU2NPzIwAAAFQYI9E4OLvEAAAAASUVORK5CYII=);background-repeat:no-repeat;height:15px;width:15px;position:absolute;right:-2px;top:2px}.ngGroupName{background-color:#fdfdfd;border:1px solid #d4d4d4;padding:3px 10px;float:left;margin-left:0;margin-top:2px;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;font-weight:bold}.ngGroupArrow{width:0;height:0;border-top:6px solid transparent;border-bottom:6px solid transparent;border-left:6px solid black;margin-top:10px;margin-left:5px;margin-right:5px;float:right}.ngGroupingNumber{position:absolute;right:-10px;top:-2px}.ngAggArrowCollapsed{position:absolute;left:8px;bottom:10px;width:0;height:0;border-style:solid;border-width:5px 0 5px 8.7px;border-color:transparent transparent transparent #000}.ngGroupItem{float:left}.ngGroupItem:first-child{margin-left:2px}.ngRemoveGroup{width:5px;-moz-opacity:.4;opacity:.4;margin-top:-1px;margin-left:5px}.ngRemoveGroup:hover{color:black;text-decoration:none;cursor:pointer;-moz-opacity:.7;opacity:.7}.ngAggArrowExpanded{position:absolute;left:8px;bottom:10px;width:0;height:0;border-style:solid;border-width:0 0 9px 9px;border-color:transparent transparent #000 transparent}.ngAggregate{position:absolute;background-color:#c9dde1;border-bottom:1px solid beige;overflow:hidden;top:0;bottom:0;right:-1px;left:0}.ngAggregateText{position:absolute;left:27px;top:5px;line-height:20px;white-space:nowrap}.ngRow{position:absolute;border-bottom:1px solid #d4d4d4}.ngRow.odd{background-color:#fdfdfd}.ngRow.even{background-color:#f3f3f3}.ngRow.selected{background-color:#c9dde1}.ngCell{overflow:hidden;position:absolute;top:0;bottom:0;background-color:inherit}.ngCell.pinned{z-index:1}.ngCellText{padding:5px;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box;white-space:nowrap;-ms-text-overflow:ellipsis;-o-text-overflow:ellipsis;text-overflow:ellipsis;overflow:hidden}.ngSelectionCell{margin-top:9px;margin-left:6px}.ngSelectionHeader{position:absolute;top:11px;left:6px}.ngCellElement:focus{outline:0;background-color:#b3c4c7}.ngRow.canSelect{cursor:pointer}.ngSelectionCheckbox{margin-top:9px;margin-left:6px}.ngFooterPanel{background-color:#eaeaea;padding:0;border-top:1px solid #d4d4d4;position:relative}.nglabel{display:block;float:left;font-weight:bold;padding-right:5px}.ngTotalSelectContainer{float:left;margin:5px;margin-top:7px}.ngFooterSelectedItems{padding:2px}.ngFooterTotalItems.ngnoMultiSelect{padding:0!important}.ngGridMaxPagesNumber{vertical-align:middle}.ngPagerFirstBar{width:10px;border-left:2px solid #4d4d4d;margin-top:-6px;height:12px;margin-left:-3px}.ngPagerButton{height:25px;min-width:26px}.ngPagerFirstTriangle{width:0;height:0;border-style:solid;border-width:5px 8.7px 5px 0;border-color:transparent #4d4d4d transparent transparent;margin-left:2px}.ngPagerNextTriangle{margin-left:1px}.ngPagerPrevTriangle{margin-left:0}.ngPagerLastTriangle{width:0;height:0;border-style:solid;border-width:5px 0 5px 8.7px;border-color:transparent transparent transparent #4d4d4d;margin-left:-1px}.ngPagerLastBar{width:10px;border-left:2px solid #4d4d4d;margin-top:-6px;height:12px;margin-left:1px}.ngFooterTotalItems{padding:2px}
--------------------------------------------------------------------------------
/01.openshift-cheatsheet.md:
--------------------------------------------------------------------------------
1 | ## Red Hat OpenShift Container Platform (OCP 3.11)
2 |
3 | Reference: [OpenShift cheatsheet](https://design.jboss.org/redhatdeveloper/marketing/openshift_cheatsheet/cheatsheet/images/openshift_cheat_sheet_r1v1.pdf)
4 |
5 | - [Collecting OpenShift Cluster, Master, Node and Project Information](https://github.com/fahmifahim/openshift/blob/master/01.openshift-cheatsheet.md#collecting-openshift-cluster-master-node-and-project-information)
6 |
7 | - [Copy specific file inside a container to local host](https://github.com/fahmifahim/openshift/blob/master/01.openshift-cheatsheet.md#copy-specific-file-inside-a-container-to-local-host)
8 |
9 | Show Pod
10 | ```bash
11 | $ oc get pod -n default -o wide
12 | ```
13 |
14 | OpenShift docker/kubelet status-stop-start-status
15 | ```bash
16 | $ systemctl status docker atomic-openshift-node
17 | $ systemctl stop docker atomic-openshift-node
18 | $ systemctl start docker atomic-openshift-node
19 | $ systemctl status docker atomic-openshift-node
20 | ```
21 |
22 | Export all resources to yaml
23 | ```bash
24 | $ oc get all --all-namespaces --export -o yaml > export-file.yaml
25 | # --export remove the timestamp
26 | ```
27 |
28 | ```bash
29 | # Show the current SCC
30 | $ oc get scc
31 |
32 | # Delete the anyuid and restricted SCC
33 | $ oc delete scc anyuid
34 | $ oc delete scc restricted
35 |
36 | $ oc adm policy reconcile-sccs
37 | $ oc adm policy reconcile-sccs --confirm
38 | ```
39 |
40 | Get pod-name and pod-scc
41 | ```bash
42 | $ oc get pods -o=jsonpath='{.metadata.name}{"¥t"}{.metadata.annotations}{"¥n"}'
43 | # output will be as below
44 | # map[openshift.io/scc:anyuid]
45 | ```
46 |
47 | Get name and fsGroups from SCC (Security Context Contrain)
48 | ```bash
49 | $ oc get scc --no-headers | awk '{print "oc get scc "$1" -o jsonpath=@{.metadata.name}{.groups}@; echo ¥n"}' | sed 's/@/"/g' | sh
50 | # .metadata.name = SCC Name
51 | # .groups = SCC fsGroups
52 | # sed to change the @ to " at jsonpath grammar
53 | # sample result:
54 | anyuid[system:cluster-admins]
55 | hostaccess[]
56 | restricted[system:authenticated]
57 | ```
58 |
59 | Open Remote Shell session to a container
60 | ```bash
61 | # Enter into a container, and execute the "id" command
62 | $ oc rsh pod/ id
63 |
64 | # See the configuration of your internal registry
65 | $ oc rsh dc/docker-registry cat config.yaml
66 | ```
67 |
68 | [Check certificate built on OCP](https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html/configuring_clusters/install-config-redeploying-certificates)
69 | ```bash
70 | $ ansible-playbook -i /etc/ansible/hosts /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
71 | # Check the result on the output html/json file
72 | ```
73 | ### How to get a full id of a certain container
74 | ```bash
75 | $ docker ps
76 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
77 | ad6d5d32576a nginx:latest "nginx -g 'daemon of About a minute ago Up About a minute 80/tcp, 443/tcp nostalgic_sammet
78 | 9bab1a42d6a9 nginx:latest "nginx -g 'daemon of About a minute ago Up About a minute 80/tcp, 443/tcp mad_kowalevski
79 |
80 | $ docker ps -q --no-trunc | grep ad6d5d32576a
81 | ad6d5d32576ad3cb1fcaa59b564b8f6f22b079631080ab1a3bbac9199953eb7d
82 |
83 | $ ls -l /var/lib/docker/ad6d5d32576ad3cb1fcaa59b564b8f6f22b079631080ab1a3bbac9199953eb7d
84 | # Directory detail will be showed
85 | ```
86 |
87 | ### Cordon and Uncordon
88 | Cordon
89 | ```bash
90 | # Cordon 1 node
91 | $ oc adm manage-node --schedulable=false
92 |
93 | # Cordon nodes with node-selector (compute, infra, master)
94 | $ oc adm manage-node --selector=node-role.kubernetes.io/compute=true --schedulable=false
95 | $ oc adm manage-node --selector=node-role.kubernetes.io/infra=true --schedulable=false
96 | $ oc adm manage-node --selector=node-role.kubernetes.io/master=true --schedulable=false
97 |
98 | $ oc get nodes
99 | # The status of cordoned nodes will be : Ready,SchedulingDisabled
100 | ```
101 | Uncordon
102 | ```bash
103 | # Uncordon 1 node
104 | $ oc adm manage-node --schedulable=true
105 |
106 | # Cordon nodes with node-selector (compute, infra, master)
107 | $ oc adm manage-node --selector=node-role.kubernetes.io/compute=true --schedulable=true
108 | $ oc adm manage-node --selector=node-role.kubernetes.io/infra=true --schedulable=true
109 | $ oc adm manage-node --selector=node-role.kubernetes.io/master=true --schedulable=true
110 |
111 | $ oc get nodes
112 | # The status of cordoned nodes will be : Ready,SchedulingDisabled
113 | ```
114 |
115 |
116 |
117 | ### OpenShift
118 | ```bash
119 |
120 | ===== Image Stream =====
121 | # oc get is -n openshift is=imagestream
122 | # oc -o yaml new-app php~https://github.com/sandervanvugt/simpleapp --name=simple-app > simple-app.yaml
123 |
124 |
125 | ===== Deployment Config =====
126 | # oc get dc dc=depoloymentconfig
127 | # oc get rc rc=replicationcontroller
128 | # oc get pods
129 | # oc get pods -o wide
130 | # oc get pods --show-labels
131 |
132 | # oc describe dc
133 | # oc describe rc
134 | # oc describe pods
135 | # oc logs
136 |
137 |
138 |
139 |
140 | ===== Templates =====
141 | -List templates
142 | # oc get templates --namespace openshift
143 |
144 |
145 |
146 |
147 | ===== Persistent Storage (PV & PVC) =====
148 | -Access modes for persistent storage:
149 | ReadWriteOnce (RWO)
150 | The volume can be mounted as read/write by a single node.
151 | ReadOnlyMany (ROX)
152 | The volume can be mounted as read-only by many nodes.
153 | ReadWriteMany (RWX)
154 | The volume can be mounted as read/write by many nodes.
155 |
156 | # oc get pvc
157 |
158 | �?1】Setting up the NFS server:
159 | # yum install nfs-server
160 | # mkdir /storage
161 | # chown nfsnobody.nfsnobody /storage
162 | # chmod 7000 /nfsnobody
163 | # echo "/storage *(rw,async,all_squash)" >> /etc/exports
164 | # systemctl enable --now nfs-server
165 | # ufw status
166 |
167 | �?2】Create Persistent Volume (PV)
168 | -Create yaml(nfs-pv.yml) file:
169 | apiVersion: v1
170 | kind: PersistentVolume
171 | metadata:
172 | name: nfs-pv
173 | spec:
174 | capacity:
175 | storage: 2Gi
176 | accessModes:
177 | - ReadWriteMany
178 | persistentVolumeReclaimPolicy: Retain
179 | nfs:
180 | path: /storage
181 | server: 172.17.0.1
182 | readOnly: false
183 |
184 | �?3】Adding Persistent Volume (PV)
185 | # oc login -u system:admin -p anything
186 | # oc create -f nfs-pv.yml
187 | # oc get pv | grep nfs
188 | # oc describe pv nfs-pv
189 |
190 | �?4】Creating Persistent Volume Claim (PVC)
191 | -Create yaml(nfs-pvc.yml) file:
192 | kind: PersistentVolumeClaim
193 | apiVersion: v1
194 | metadata:
195 | name: nfs-pv-claim
196 | spec:
197 | accessModes:
198 | - ReadWriteMany
199 | resources:
200 | requests:
201 | storage: 100Mi
202 |
203 | �?5】Creating PVC
204 | # oc whoami
205 | # oc login -u developer -p anything
206 | # oc create -f nfs-pvc.yml
207 | # oc describe pvc nfs-pv-claim
208 | # oc get pvc
209 |
210 | �?6】Creating the pod
211 | # oc create -f nfs-pv-pod.yaml
212 | # oc describe pod nfs-pv-pod
213 | (check the Volume section, also check Events)
214 | # oc logs pod nfs-pv-pod
215 |
216 | -Create nfs-pv-pod.yaml
217 | kind: Pod
218 | apiVersion: v1
219 | metadata:
220 | name: nfs-pv-pod
221 | spec:
222 | volumes:
223 | - name: nfsvolume (!this name is nothing todo with pv-name)
224 | persistentVolumeClaim:
225 | claimName: nfs-pv-claim
226 | containers:
227 | - name: nfs-client1
228 | image: toccoag/openshift-nginx
229 | ports:
230 | - containerPort: 8081
231 | name: "http-server1"
232 | volumeMounts:
233 | - mountPath: "/mnt"
234 | name: nfsvolume
235 | resources: {}
236 | - name: nfs-client2
237 | image: toccoag/openshift-nginx
238 | ports:
239 | - containerPort: 8082
240 | name: "http-server2"
241 | volumeMounts:
242 | - mountPath: "/nfsshare"
243 | name: nfsvolume
244 | resources: {}
245 |
246 | �?7】Verifying current configuration
247 | # oc describe pod
248 | # oc get pvc
249 | # oc logs
250 | # oc exec -it -- sh
251 | - mount | grep mysql
252 | # oc logs pod/nfs-pv-pod -c nfs-client1
253 | # oc logs pod/nfs-pv-pod -c nfs-client2
254 |
255 |
256 |
257 |
258 | ===== ConfigMaps =====
259 | ConfigMaps can be used to separate Dynamic Data from Static Data in a Pod
260 | ConfigMaps can be used in 3 different ways:
261 | 1. make variables available within a Pod
262 | 2. provide command line arguments
263 | 3. mount them on the location where the application expects to find a configuration file
264 |
265 | # vim variables
266 | VAR_1=Hello
267 | VAR_2=World
268 | esc : wq!
269 | # oc create cm variables --from-env-file=variables
270 | # oc get cm
271 | # oc describe cm variables
272 | # oc create -f test-pod1.yml
273 | # oc get pods
274 | # oc logs pod/example
275 |
276 | -Create test-pod1.yml
277 | apiVersion: v1
278 | kind: Pod
279 | metadata:
280 | name: example
281 | spec:
282 | containers:
283 | - name: example
284 | image: cirros
285 | command: ["/bin/sh", "-c", "env"]
286 | envFrom:
287 | - configMapRef:
288 | name: variables
289 |
290 |
291 |
292 |
293 |
294 | ===== OpenShift Troubleshoot =====
295 | -Show recent events
296 | # oc get events
297 |
298 | -Show what has happened to specific pod
299 | # oc logs
300 |
301 | -Show pod details
302 | # oc describe pod
303 |
304 | -Show current working project
305 | # oc projects
306 |
307 | -Delete everything
308 | # oc delete all --all
309 |
310 | # oc logs -f bc/
311 |
312 |
313 |
314 |
315 | ===== Demo1 =====
316 | oc login -u developer -p anything
317 | oc new-project firstproject
318 | oc new-app --docker-image=nginx:latest --name=nginx
319 | oc status (use it repeatedly to trace the process)
320 | oc get pods
321 | oc describe pod
322 | oc get svc
323 | oc describe service nginx
324 | oc port-forward 33080:80
325 | curl -s http://localhost:33080
326 |
327 |
328 | ===== Demo2 =====
329 | oc whoami
330 | oc new-project mysql
331 | oc new-app --docker-image=mysql:latest --name=mysql-openshift -e MYSQL_USER=myuser -e MYSQL_PASSWORD=password -e MYSQL_DATABASE=mydb -e MYSQL_ROOT_PASSWORD=password
332 | oc status -v
333 | oc get all
334 | oc get pods -o=wide
335 | login to the Webconsoleand see the new application
336 | https://127.0.0.1:8443
337 |
338 |
339 | ```
340 |
341 | #### Creating new app (sample app: open-liberty)
342 | - Procedure ([openliberty.io](https://openliberty.io/guides/cloud-openshift.html#tearing-down-the-environment))
343 | ```bash
344 | # Get the open-liberty image
345 | $ docker save open-liberty > open-liberty.tar
346 | $ ls -l open-liberty.tar
347 |
348 | # Docker Load the open-liberty image
349 | $ docker load -i open-liberty.tar
350 | Loading layer ...
351 | Loading layer ...
352 | Loaded image: docker.io/openliberty/open-liberty:latest
353 |
354 | # Or, you may just pull open-liberty image to your environment
355 | $ docker pull open-liberty
356 | Using default tag: latest
357 | latest: Pulling from library/open-liberty
358 | 5bed26d33875: Pull complete
359 | f11b29a9c730: Pull complete
360 | 930bda195c84: Pull complete
361 | 78bf9a5ad49e: Pull complete
362 | bf7abfeb0680: Pull complete
363 | 72e3fc2f84f3: Pull complete
364 | 22e18c7d3d21: Pull complete
365 | c6a94ffbb4bd: Pull complete
366 | b3d5728c0015: Pull complete
367 | 42a91fcb5bbf: Pull complete
368 | 3fc5267dabfe: Pull complete
369 | 17655dfe9734: Pull complete
370 | Digest: sha256:d0690a004189913f4471e4440c49e7a4360882ef73984132205300067e023f1a
371 | Status: Downloaded newer image for open-liberty:latest
372 | docker.io/library/open-liberty:latest
373 |
374 | # Check the open-liberty images
375 | $ docker images | grep -E "REPO|open-liberty"
376 | REPOSITORY TAG IMAGE ID CREATED SIZE
377 | open-liberty latest f9215bfcd756 5 days ago 429MB
378 |
379 |
380 | # Login to OpenShift console
381 | $ oc login -u [user]
382 |
383 | # Create project for our new-app
384 | $ oc new-project test1-project
385 |
386 | # Login to OpenShift internal registry
387 | $ oc registry info
388 | $ docker login -u `oc whoami` -p `oc whoami -t` `oc registry info`
389 | Login Succeed
390 |
391 | # Tag and push image to internal registry
392 | $ docker tag docker.io/open-liberty:latest `oc registry info`/`oc project -q`/open-liberty:latest
393 | $ docker images | grep open-liberty
394 | $ docker push `oc registry info`/`oc project -q`/open-liberty:latest
395 | # You may specify the `oc registry info` to any project name you like
396 | # this operation pushed the image to the internal registry, and create the image-stream
397 |
398 | # Check the image stream
399 | $ oc get is | grep open-liberty
400 | NAME DOCKER REPO TAGS UPDATED
401 | open-liberty 172.30.1.1:5000/openshift/open-liberty latest 2 minutes ago
402 |
403 | # Check the new-app list
404 | $ oc new-app --list | grep -A 2 open-liberty
405 | open-liberty
406 | Project: project-name
407 | Tags: latest
408 |
409 | # Create new app (using "oc new-app" command)
410 | $ oc new-app --image-stream=`oc project -q`/open-liberty:latest --name=liberty01
411 |
412 | --> Found image f9215bf (5 days old) in image stream "openshift/open-liberty" under tag "latest" for "openshift/open-liberty:latest"
413 | * This image will be deployed in deployment config "liberty01"
414 | * Ports 9080/tcp, 9443/tcp will be load balanced by service "liberty01"
415 | * Other containers can access this service through the hostname "liberty01"
416 | --> Creating resources ...
417 | imagestreamtag.image.openshift.io "liberty01:latest" created
418 | deploymentconfig.apps.openshift.io "liberty01" created
419 | service "liberty01" created
420 | --> Success
421 | Application is not exposed. You can expose services to the outside world by executing one or more of the commands below:
422 | 'oc expose svc/liberty01'
423 | Run 'oc status' to view your app.
424 |
425 | $ oc status --suggest
426 | In project sample01 on server https://192.168.99.102:8443
427 |
428 | http://liberty01-sample01.192.168.99.102.nip.io to pod port 9080-tcp (svc/liberty01)
429 | dc/liberty01 deploys openshift/open-liberty:latest
430 | deployment #1 deployed 11 minutes ago - 1 pod
431 |
432 | Info:
433 | * dc/liberty01 has no readiness probe to verify pods are ready to accept traffic or ensure deployment is successful.
434 | try: oc set probe dc/liberty01 --readiness ...
435 | * dc/liberty01 has no liveness probe to verify pods are still running.
436 | try: oc set probe dc/liberty01 --liveness ...
437 |
438 | View details with 'oc describe /' or list everything with 'oc get all'.
439 |
440 |
441 | $ oc get all
442 | NAME READY STATUS RESTARTS AGE
443 | pod/liberty01-1-mr66r 1/1 Running 0 2m
444 |
445 | NAME DESIRED CURRENT READY AGE
446 | replicationcontroller/liberty01-1 1 1 1 3m
447 |
448 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
449 | service/liberty01 ClusterIP 172.30.193.12 9080/TCP,9443/TCP 3m
450 |
451 | NAME REVISION DESIRED CURRENT TRIGGERED BY
452 | deploymentconfig.apps.openshift.io/liberty01 1 1 1 config,image(open-liberty:latest)
453 |
454 | NAME DOCKER REPO TAGS UPDATED
455 | imagestream.image.openshift.io/liberty01 172.30.1.1:5000/sample01/liberty01 latest
456 |
457 |
458 | # Expose service as route
459 | $ oc expose service liberty01
460 | route.route.openshift.io/liberty01 exposed
461 |
462 | $ oc get route
463 | NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD
464 | liberty01 liberty01-sample01.192.168.99.102.nip.io liberty01 9080-tcp None
465 |
466 |
467 | # Open browser and access the app from the above route url
468 | Go to liberty01-sample01.192.168.99.102.nip.io
469 |
470 | # Test connection from internal pod:
471 | oc rsh `oc get pod -o jsonpath={.items[*].metadata.name}` curl liberty01:9080
472 | # liberty01 = servicename
473 | # 9080 = service port number
474 |
475 |
476 | # 2. Create new app (using yaml)
477 | --inprogress--
478 | ```
479 | - YAML file
480 | ```yaml
481 |
482 |
483 | ```
484 |
485 | ### User, Group, Project, Rolebinding (give authority to project specific )
486 | ##### Scenario:
487 | 1. Group: devgroup01, user: developer01, project: sample01
488 | 2. Group: devgroup02, user: developer02, project: sample02
489 | 3. Each group only has access to their own project. (devgroup01 only has access to sample01)
490 | 4. To realize the No.3, we need to create RoleBinding for project specific authority.
491 | ```bash
492 | # Create group
493 | $ oc adm groups new devgroup01
494 | $ oc adm groups new devgroup02
495 | $ oc get group devgroup01 devgroup02
496 |
497 | # Create user
498 | $ oc create user developer01
499 | $ oc create user developer02
500 | $ oc get user developer01 developer02
501 |
502 | # Add user to the specific group
503 | $ oc adm groups add-users devgroup01 developer01
504 | $ oc adm groups add-users devgroup02 developer02
505 | $ oc get group devgroup01 devgroup02
506 |
507 | # If you want to remove users from group:
508 | # oc adm groups remove-users devgroup01 developer01
509 |
510 | # (MINISHIFT only) Enable and apply the addon for httpasswd addon
511 | $ minishift addon enable htpasswd-identity-provider
512 | $ minishift addon apply htpasswd-identity-provider
513 | $ minishift addon list
514 |
515 | # Set HTTP password to developer01 and developer02 user (htpasswd)
516 | # (do this procedure on all Master nodes)
517 | $ htpasswd -b /etc/origin/master/htpasswd developer01 [password-for-developer01]
518 | $ htpasswd -b /etc/origin/master/htpasswd developer02 [password-for-developer02]
519 | $ cat /etc/origin/master/htpasswd
520 |
521 | # Login test with the created user account
522 | $ oc login -u developer01
523 | $ oc logout
524 | $ oc login -u developer02
525 | $ oc logout
526 |
527 | # Create Project
528 | $ oc new-project sample01
529 | $ oc new-project sample02
530 | $ oc projects | grep sample0
531 | ```
532 |
533 | - Create RoleBinding to give authority for group member to specific Project
534 | ```yaml
535 | apiVersion: rbac.authorization.k8s.io/v1
536 | kind: RoleBinding
537 | metadata:
538 | name: sample01-devgroup -> specify the [projectName]-[groupName]
539 | namespace: sample01 -> specify namespace for specific group
540 | roleRef:
541 | apiGroup: rbac.authorization.k8s.io
542 | kind: ClusterRole
543 | name: admin -> specify type of ClusterRole your user can do as its ClusterRole
544 | subjects:
545 | - apiGroup: rbac.authorization.k8s.io
546 | kind: Group
547 | name: devgroup01 -> specify the group you want to give access
548 | ```
549 | - Create the same yaml for sample02-group02 RoleBinding
550 | ```yaml
551 | apiVersion: rbac.authorization.k8s.io/v1
552 | kind: RoleBinding
553 | metadata:
554 | name: sample02-devgroup
555 | namespace: sample02
556 | roleRef:
557 | apiGroup: rbac.authorization.k8s.io
558 | kind: ClusterRole
559 | name: admin
560 | subjects:
561 | - apiGroup: rbac.authorization.k8s.io
562 | kind: Group
563 | name: devgroup02
564 | ```
565 | - Create the RoleBinding
566 | ```bash
567 | $ oc apply -f sample01-devgroup01.yaml
568 | rolebinding.rbac.authorization.k8s.io/sample01-devgroup created
569 |
570 | $ oc apply -f sample02-devgroup02.yaml
571 | rolebinding.rbac.authorization.k8s.io/sample02-devgroup created
572 |
573 | $ oc get rolebinding -n sample01
574 | NAME AGE
575 | admin 2d
576 | sample01-devgroup 2m -> This one
577 | system:deployers 2d
578 | system:image-builders 2d
579 | system:image-pullers 2d
580 |
581 | $ oc get rolebinding -n sample02
582 | NAME AGE
583 | admin 2d
584 | sample02-devgroup 2m -> This one
585 | system:deployers 2d
586 | system:image-builders 2d
587 | system:image-pullers 2d
588 |
589 | ```
590 | - Access to each user and check they can only view their project.
591 | ```bash
592 | $ oc login -u developer01
593 | Logged into "https://192.168.99.102:8443" as "developer01" using existing credentials.
594 | You have one project on this server: "sample01"
595 | Using project "sample01".
596 |
597 | $ oc new-app --image-stream=openshift/open-liberty:latest --name=liberty1
598 | --> Found image f9215bf (8 days old) in image stream "openshift/open-liberty" under tag "latest" for "openshift/open-liberty:latest"
599 | * This image will be deployed in deployment config "liberty1"
600 | * Ports 9080/tcp, 9443/tcp will be load balanced by service "liberty1"
601 | * Other containers can access this service through the hostname "liberty1"
602 | --> Creating resources ...
603 | imagestreamtag.image.openshift.io "liberty1:latest" created
604 | deploymentconfig.apps.openshift.io "liberty1" created
605 | service "liberty1" created
606 | --> Success
607 | Application is not exposed. You can expose services to the outside world by executing one or more of the commands below:
608 | 'oc expose svc/liberty1'
609 | Run 'oc status' to view your app.
610 |
611 | $ oc get pod -n sample01
612 | NAME READY STATUS RESTARTS AGE
613 | liberty1-1-rj94p 1/1 Running 0 43s
614 |
615 | $ oc get pod -n sample02
616 | No resources found.
617 | Error from server (Forbidden): pods is forbidden: User "developer01" cannot list pods in the namespace "sample02": no RBAC policy matched
618 |
619 | $ oc login -u developer02
620 | Logged into "https://192.168.99.102:8443" as "developer02" using existing credentials.
621 | You have one project on this server: "sample02"
622 | Using project "sample02".
623 |
624 | $ oc get pod -n sample02
625 | No resources found. -> We didn't create any pod
626 |
627 | $ oc get pod -n sample01
628 | No resources found.
629 | Error from server (Forbidden): pods is forbidden: User "developer02" cannot list pods in the namespace "sample01": no RBAC policy matched
630 |
631 | -> This proof our concept that each user can only access their own Projects
632 |
633 | ```
634 |
635 | ***
636 | #### Copy specific file inside a container to local host
637 | ```bash
638 | oc cp /:/path/to/file.txt /localhost/directory/path/file.txt
639 | ```
640 | ***
641 | #### Collecting OpenShift Cluster, Master, Node and Project Information to provide information for Support Team
642 | - Don't forget to specify which project/pod need fixing
643 | - Reference:
644 | - [RedHat Solutions](https://access.redhat.com/solutions/3132711)
645 | - [OpenShift3.11 Diagnostic](https://github.ibm.com/Justin-Kulikauskas/studious-guacamole/blob/master/must-gathers/cloudpak-openshift.md#openshift-311-diagnostics)
646 | - [OpenShift4.x Diagnostic](https://github.ibm.com/Justin-Kulikauskas/studious-guacamole/blob/master/must-gathers/cloudpak-openshift.md#full-gather---openshift-4x)
647 |
648 | 1. OpenShift Cluster
649 | ```bash
650 | $ oc get node,hostsubnet -o wide
651 | $ oc describe nodes
652 | $ oc get all,events -o wide -n default
653 | ```
654 |
655 | 2. OpenShift Master
656 | ```bash
657 | $ sosreport
658 | $ oc get --raw /metrics --server https://`hostname` --config=/etc/origin/master/admin.kubeconfig
659 | $ oc get --raw /metrics --server https://`hostname`:8444 --config=/etc/origin/master/admin.kubeconfig
660 | $ ovs-ofctl -O OpenFlow13 dump-flows br0
661 | ```
662 |
663 | 3. OpenSHift Node
664 | ```bash
665 | $ sosreport
666 | $ ovs-ofctl -O OpenFlow13 dump-flows br0
667 | ```
668 | - Gather Metrics from a Node, run below command on a Master host
669 | ```bash
670 | $ NODE_NAME=
671 | $ oc get --raw /metrics --server https://$NODE_NAME:10250 --config=/etc/origin/master/admin.kubeconfig
672 | $ oc get --raw /metrics/cadvisor --server https://$NODE_NAME:10250 --config=/etc/origin/master/admin.kubeconfig
673 | $ oc describe node $NODE_NAME
674 | ```
675 |
676 | 4. OpenShift Project
677 | ```bash
678 | $ oc get all,events -o wide -n
679 | $ oc get all,events -o yaml -n
680 | ```
681 | - Gather logs from specific pod
682 | ```bash
683 | $ oc logs -n
684 | ```
685 |
686 | 5. Openshift 3.11 diagnostics
687 |
688 | - Openshift kubelet logs are managed through another service. On enterprise openshift, they can be gathered with `journalctl -u atomic-openshift-node`, and on okd they can be gathered with `journalctl -u origin-node`. Docker or crio logs can also be collected on each node if necessary.
689 |
690 | This script will gather information from the system namespaces (kube, openshift, infra, and default):
691 | ```bash
692 | export MGDIR=openshift-diag-$(date -Ihours)
693 | export LOGLIMIT="--tail=1000"
694 | mkdir -p $MGDIR
695 | oc get nodes > $MGDIR/node-list.txt
696 | oc describe nodes > $MGDIR/node-describe.txt
697 | oc get namespaces > $MGDIR/namespaces.txt
698 | oc get pods --all-namespaces -o wide > $MGDIR/all-pods-list.txt
699 | for NS in `oc get ns | awk 'NR>1 && (/openshift/ || /kube/ || /infra/){ORS=" "; print $1}'` default; do
700 | export NS=$NS; mkdir $MGDIR/$NS; echo gathering info from namespace $NS
701 | oc get pods,svc,route,ing,secrets,cm,events -n $NS -o wide &> $MGDIR/$NS/all-list.txt
702 | oc get pods -n $NS | awk 'NR>1{print "oc -n $NS describe pod "$1" > $MGDIR/$NS/"$1"-describe.txt && echo described "$1}' | bash
703 | oc get pods -n $NS -o go-template='{{range $i := .items}}{{range $c := $i.spec.containers}}{{println $i.metadata.name $c.name}}{{end}}{{end}}' > $MGDIR/$NS/container-list.txt
704 | awk '{print "oc -n $NS logs "$1" -c "$2" $LOGLIMIT -p > $MGDIR/$NS/"$1"_"$2"_previous.log && echo gathered previous logs of "$1"_"$2}' $MGDIR/$NS/container-list.txt | bash
705 | awk '{print "oc -n $NS logs "$1" -c "$2" $LOGLIMIT > $MGDIR/$NS/"$1"_"$2".log && echo gathered logs of "$1"_"$2}' $MGDIR/$NS/container-list.txt | bash
706 | oc get svc -n $NS | awk 'NR>1{print "oc -n $NS describe svc "$1" > $MGDIR/$NS/svc-describe-"$1".txt && echo described service "$1}' | bash
707 | done
708 | tar czf $MGDIR.tgz $MGDIR/
709 | ```
710 | There will be some error messages regarding "previous terminated container ... not found," those do not indicate any issues.
711 |
712 | ##### Targeted Mustgather (3.11)
713 | - Collect a sosreport from a master node and any other nodes that have specific issues:
714 | ```bash
715 | # You may ignore the yum update
716 | yum update sos
717 | sosreport
718 | ```
719 | - Log in to openshift as a cluster administrator. If you cannot login, you may be able to use the installer's credentials from the "boot" node with this command:
720 | ```bash
721 | export KUBECONFIG=/etc/origin/master/admin.kubeconfig
722 | ```
723 | You can verify you are logged in correctly with oc whoami.
724 |
725 | Create the initial directory and get some general information:
726 | ```bash
727 | export SUPPDIR=support-mustgather-$(date -Ihours)
728 | mkdir $SUPPDIR
729 | oc get --raw /metrics --config=/etc/origin/master/admin.kubeconfig > $SUPPDIR/oc-raw-metrics.log
730 | oc get nodes > $SUPPDIR/node-list.txt
731 | oc describe nodes > $SUPPDIR/node-describe.txt
732 | ```
733 |
734 | - Gather information from the kube-system namespace, using the template below. Depending on the issue, information for other namespaces should also be collected (this list is incomplete).
735 | Problem Namespace to collect
736 | Any kube-system
737 | Openshift router default
738 | Openshift private registry default
739 | Openshift networking openshift-sdn
740 | Multicloud Management multicluster-endpoint
741 | IBM Cloud Automation Manager services
742 |
743 | ```bash
744 | export NSPACE=
745 | mkdir $SUPPDIR/$NSPACE
746 | oc -n $NSPACE get all,events,secrets,cm,ing -o wide &> $SUPPDIR/$NSPACE/all-list.txt
747 | oc get pods -n $NSPACE | awk 'NR>1{print "oc -n $NSPACE describe pod "$1" > $SUPPDIR/$NSPACE/"$1"-describe.txt && echo described "$1}' | bash
748 | oc get pods -n $NSPACE -o go-template='{{range $i := .items}}{{range $c := $i.spec.containers}}{{println $i.metadata.name $c.name}}{{end}}{{end}}' > $SUPPDIR/$NSPACE/container-list.txt
749 | awk '{print "oc -n $NSPACE logs "$1" -c "$2" --tail=1000 > $SUPPDIR/$NSPACE/"$1"_"$2".log && echo gathered logs of "$1"_"$2}' $SUPPDIR/$NSPACE/container-list.txt | bash
750 | ```
751 |
752 | After the information is collected, you may pack the folder for easier transfer with this command (you will still need to also collect the sosreport separately):
753 | ```bash
754 | tar czf $SUPPDIR.tgz $SUPPDIR/
755 | ```
756 |
--------------------------------------------------------------------------------
/01.openshift-administration.md:
--------------------------------------------------------------------------------
1 | # Red Hat OpenShift Container Platform
2 | ## OpenShift Container Platform Features
3 | 
4 |
5 | ## Openshift Features
6 | 
7 |
8 | ## Openshift Control Plane
9 | 
10 | ##### this document using Openshift ver.4.5
11 |
12 | ## Openshift Cluster Operators
13 | - Kubernetes Operators
14 | - Operators usually define custom resources (CR) that store their settings and configurations.
15 | - The syntax of a custom resource is defined by a custom resource definition (CRD).
16 | - An OpenShift administrator manages an operator by editing its custom resources.
17 |
18 | - Openshift Cluster Operators manages some following list of cluster operator:
19 | - network
20 | - ingress
21 | - storage
22 | - authentication
23 | - console
24 | - monitoring
25 | - image-registry
26 | - cluster-autoscaler
27 | - openshift-apiserver
28 | - dns
29 | - openshift-controller-manager
30 | - cloud-credential
31 | 
32 |
33 | - Operator
34 | - An application that manages Kubernetes resources.
35 | - Operator SDK
36 | - An open source toolkit for building, testing, and packaging operators.
37 | - Operator Catalog
38 | - A repository for discovering and installing operators.
39 | - Custom Resource Definition (CRD)
40 | - An extension of the Kubernetes API that defines the syntax of a custom resource.
41 | - Operator Lifecycle Manager (OLM)
42 | - An application that manages Kubernetes operators.
43 | - OperatorHub
44 | - A public web service where you can publish operators that are compatible with the OLM.
45 | - Operator Image
46 | - The artifact defined by the Operator Framework that you can publish for consumption by an OLM instance.
47 |
48 |
49 | ## Troubleshooting OpenShift Clusters and Applications
50 | ### Verifying the Health of OpenShift Nodes
51 | - `oc get nodes`
52 | ```bash
53 | NAME STATUS ROLES AGE VERSION
54 | master01 Ready master,worker 2d v1.18.3+012b3ec
55 | master02 Ready master,worker 2d v1.18.3+012b3ec
56 | master03 Ready master,worker 2d v1.18.3+012b3ec
57 | ```
58 |
59 | - `oc adm top nodes`
60 | Displays the current CPU and memory usage of each node.
61 | These are actual usage numbers, not the resource requests that the OpenShift scheduler considers as the available and used capacity of the node.
62 | ```bash
63 | NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
64 | master01 499m 14% 3235Mi 21%
65 | master02 769m 21% 4933Mi 33%
66 | master03 1016m 29% 6087Mi 40%
67 | ```
68 |
69 | - `oc describe node my-node-name`
70 | To retrieve the cluster version
71 | ```bash
72 | NAME VERSION AVAILABLE PROGRESSING SINCE STATUS
73 | version 4.5.4 True False 4d23h Cluster version is 4.5.4
74 | ```
75 |
76 | - `oc get clusteroperators`
77 | ```bash
78 | NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE
79 | authentication 4.5.4 True False False 3h58m
80 | cloud-credential 4.5.4 True False False 4d23h
81 | cluster-autoscaler 4.5.4 True False False 4d23h
82 | config-operator 4.5.4 True False False 4d23h
83 | console 4.5.4 True False False 3h58m
84 | csi-snapshot-controller 4.5.4 True False False 4d23h
85 | dns 4.5.4 True False False 4d23h
86 | etcd 4.5.4 True False False 4d23h
87 | image-registry 4.5.4 True False False 4d23h
88 | ...output omitted...
89 | ```
90 |
91 | #### Openshift Nodes
92 | ##### Show logs of Openshift Nodes
93 | - `oc adm node-logs -u crio my-node-name`
94 | - display the crio service logs on my-node-name OCP node
95 | - `oc adm node-logs -u kubelet my-node-name`
96 | - display the kubelet service logs on my-node-name OCP node
97 | - `oc adm node-logs my-node-name`
98 | - display all journal logs of node
99 |
100 | ##### Open a shell prompt on an Openshift Node
101 | - `oc debug node/my-node-name`
102 | ```bash
103 | Starting pod/master01-debug ...
104 | To use host binaries, run `chroot /host`
105 | Pod IP: 192.168.50.10
106 | If you don't see a command prompt, try pressing enter.
107 |
108 | sh-4.2# chroot /host
109 | sh-4.2#
110 | sh-4.2# systemctl status kubelet
111 | ● kubelet.service - MCO environment configuration
112 | Loaded: loaded (/etc/systemd/system/kubelet.service; enabled; vendor preset: enabled)
113 | Drop-In: /etc/systemd/system/kubelet.service.d
114 | └─10-mco-default-env.conf
115 | Active: active (running) since Fri 2020-07-31 16:26:57 UTC; 4h 32min ago
116 | ...output omitted...
117 | sh-4.2#
118 | sh-4.2# systemctl status cri-o
119 | ● crio.service - MCO environment configuration
120 | Loaded: loaded (/usr/lib/systemd/system/crio.service; disabled; vendor preset: disabled)
121 | Drop-In: /etc/systemd/system/crio.service.d
122 | └─10-mco-default-env.conf
123 | Active: active (running) since Fri 2020-07-31 16:26:50 UTC; 4h 35min ago
124 | ...output omitted...
125 |
126 | ```
127 |
128 | #### Troubleshooting Application Deployments
129 | `oc get pod`
130 | `oc status`
131 | `oc describe pod my-pod-name`
132 | `oc logs my-pod-name`
133 | `oc logs my-pod-name -c container-name`
134 | `oc logs my-pod-name --all-containers`
135 |
136 | ##### Creating troubleshooting pod using `oc debug`
137 | `oc debug deployment/my-deployment-name --as-root`
138 |
139 | ##### rsh command
140 | `oc rsh my-pod-name`
141 | Opens a shell inside a pod to run shell commands interactively and non-interactively.
142 |
143 | ##### Copy files
144 | `oc cp /local/path my-pod-name:/container/path`
145 |
146 | ##### Verbose log level
147 | `oc get pod --loglevel 6`
148 | `oc get pod --loglevel 10`
149 |
150 | ##### Get current token
151 | `oc whoami -t`
152 |
153 |
154 |
155 | ##### Port-forward
156 | `oc port-forward my-pod-name local-port:remote-port`
157 | Creates a TCP tunnel from local-port on your workstation to local-port on the pod. The tunnel is alive as long as you keep the oc port-forward running. This allows you to get network access to the pod without exposing it through a route. Because the tunnel starts at your localhost, it cannot be accessed by other machines.
158 |
159 | ##### skopeo inspect
160 | `skopeo inspect docker://registry.access.redhat.com/rhscl/postgresq-96-rhel7:1`
161 | ```bash
162 | FATA[0000] Error parsing image name "docker://registry.access.redhat.com/rhscl/postgresq-96-rhel7:1": Error reading manifest 1 in registry.access.redhat.com/rhscl/postgresq-96-rhel7: name unknown: Repo not found
163 | ```
164 |
165 | `skopeo inspect docker://registry.access.redhat.com/rhscl/postgresql-96-rhel7:1`
166 | This command checks whether the remote registry access is exist or not.
167 |
168 |
169 |
170 | ## OpenShift Dynamic Storage
171 | ### Persistent Storage
172 | Two ways of provisioning storage for the cluster:
173 | - Static
174 | - Static provisioning requires the cluster administrator to create persistent volumes manually.
175 | - Dynamic
176 | - Dynamic provisioning uses storage classes to create the persistent volumes on demand.
177 | - Verifying dynamic storageclass:
178 | ```bash
179 | oc get storageclass
180 | NAME PROVISIONER ...
181 | nfs-storage (default) nfs-storage-provisioner ...
182 | ```
183 |
184 | #### PVC and PV
185 | - PVC: persistent volume claim
186 | - Specify a name for the persistent volume claim.This name is use in the `claimName` field in the volume section of Deployment manifest.
187 | - Important to specify `Access Modes`. If PV persistent volumes are created statically, then an eligible persistent volume must provide this access mode.
188 | - ReadWriteMany(RWX): Kubernetes can mount the volume as read-write on many nodes.
189 | - ReadOnlyMany(ROX): Kubernetes can mount the volume as read-only on many nodes.
190 | - ReadWriteOnce(RWO): Kubernetes can mount the volume as read-write on only a single node.
191 | - Important to specify `Size Request`. If persistent volumes are created statically, then an eligible persistent volume must be at least the requested size.
192 |
193 | ```yaml
194 | apiVersion: v1
195 | kind: PersistentVolumeClaim
196 | metadata:
197 | name: example-pv-claim
198 | labels:
199 | app: example-application
200 | spec:
201 | accessModes:
202 | - ReadWriteOnce
203 | resources:
204 | requests:
205 | storage: 15Gi
206 | ```
207 |
208 | ##### Add PVC on the application
209 | ```yaml
210 | ...
211 | spec:
212 | volumes:
213 | - name: example-pv-storage
214 | persistentVolumeClaim:
215 | claimName: example-pv-claim
216 | containers:
217 | - name: example-application
218 | image: registry.redhat.io/rhel8/example-app
219 | ports:
220 | - containerPort: 1234
221 | volumeMounts:
222 | - mountPath: "/var/lib/example-app"
223 | name: example-pv-storage
224 | ```
225 |
226 | ## # Authentication and Authorization
227 | #### # OpenShift Users and Groups
228 | `User`
229 | In the OpenShift Container Platform architecture, users are entities that interact with the API server. The user resource represents an actor within the system. Assign permissions by adding roles to the user directly or to the groups of which the user is a member.
230 |
231 | `Group`
232 | Groups represent a specific set of users. Users are assigned to one or to multiple groups. Groups are leveraged when implementing authorization policies to assign permissions to multiple users at the same time.
233 |
234 | `Identity`
235 | The identity resource keeps a record of successful authentication attempts from a specific user and identity provider. Any data concerning the source of the authentication is stored on the identity. Only a single user resource is associated with an identity resource.
236 |
237 | `Service Account`
238 | In OpenShift, applications can communicate with the API independently when user credentials cannot be acquired. To preserve the integrity of a regular user's credentials, credentials are not shared and service accounts are used instead. Service accounts enable you to control API access without the need to borrow a regular user's credentials.
239 |
240 | `Role`
241 | A role defines a set of permissions that enables a user to perform API operations over one or more resource types. You grant permissions to users, groups, and service accounts by assigning roles to them.
242 |
243 | #### # Authenticating API Requests
244 | ##### 1. OAuth Access Tokens
245 | ##### 2. X.509 Client Certificates
246 | The installation logs provide the location of the kubeconfig file:
247 | `INFO Run 'export KUBECONFIG=root/auth/kubeconfig' to manage the cluster with 'oc'.`
248 | This kubeconfig file can be used to authenticate to the OCP cluster.
249 |
250 | #### # Authenticating API Requests
251 | To improve OpenShift cluster security, you can remove the kubeadmin user credentials after you define an identity provider, create a new user, and assign that user the cluster-admin role.
252 | ```bash
253 | $ oc delete secret kubeadmin -n kube-system
254 |
255 | ```
256 |
257 | ```yaml
258 | apiVersion: config.openshift.io/v1
259 | kind: OAuth
260 | metadata:
261 | name: cluster
262 | spec:
263 | indentityProviders:
264 | - name: my_httpasswd_provider
265 | mappingMethod: claim
266 | type: HTPasswd
267 | htpasswd:
268 | fileData:
269 | name: htpasswd-secret
270 | ```
271 |
272 | ## # Defining and Applying Permissions Using RBAC (Role-based Access Control)
273 | #### # RBAC Object
274 | - `Rule`
275 | - Allowed actions for objects or groups of objects.
276 | - `Role`
277 | - Sets of rules. Users and groups can be associated with multiple roles.
278 | - `Binding`
279 | - Assignment of users or groups to a role.
280 |
281 | #### # RBAC Scope
282 | - Cluster Role
283 | - Users or groups with this role level can manage the OpenShift cluster.
284 | - Local Role
285 | - Users or groups with this role level can only manage elements at a project level.
286 |
287 | #### # Managing RBAC using CLI
288 | Add/remove a cluster-role to a user:
289 | `oc adm policy add-cluster-role-to-user CLUSTER-ROLE USER-NAME`
290 | `oc adm policy remove-cluster-role-to-user CLUSTER-ROLE USER-NAME`
291 |
292 | Add/remove a role to a user:
293 | `oc policy add-role-to-user ROLE USER-NAME -n PROJECT`
294 | `oc policy remove-role-to-user ROLE USER-NAME -n PROJECT`
295 |
296 | #### # Default Roles
297 | - `cluster-admin`
298 | - Users with this role have superuser access to the cluster resources. These users can perform any action on the cluster, and have full control of all projects.
299 | - `cluster-status`
300 | - Users with this role can get cluster status information.
301 | - `self-provisioner`
302 | - Users with this role can create new projects. This is a cluster role, not a project role.
303 | - `admin`
304 | - Users with this role can manage all project resources, including granting access to other users to access the project.
305 | - `basic-user`
306 | - Users with this role have read access to the project.
307 | - `edit`
308 | - Users with this role can create, change, and delete common application resources from the project, such as services and deployment configurations. These users cannot act on management resources such as limit ranges and quotas, and cannot manage access permissions to the project.
309 | - `view`
310 | - Users with this role can view project resources, but cannot modify project resources.
311 |
312 | ## # QUIZ: Configuring Identity Providers
313 | #### # Follow below instructions:
314 | 1. Create htpasswd file and add `user1` with the password `pass1`. Set the htpasswd file as /tmp/htpasswd.
315 | 2. Add `user2` with the password `pass2`.
316 | 3. Create secret that contains the HTPasswd users file mentioned above. Secret's name is `localusers`.
317 | 4. Update the HTPasswd identity provider for the cluster so that you can authenticate above users mentioned in the htpasswd file.
318 | 5. Assign `user1` as `cluster-admin` role.
319 | 6. Extract the current `localusers` secret to htpasswd file at /tmp directory. Add `manager` user with password `manager`. Change the password for `user2` to `pass222`. Update the secret and make sure you can login with `manager` and `user2`.
320 | 7. Remove the `user2` (user and identity).
321 | 8. VERY CAREFUL ON THIS, IF YOU DELETE kubeadmin, YOU WILL NEVER BE ABLE TO LOGIN WITH kubeadmin. After creating all the users, delete the Virtual user `kubeadmin` to enhance security.
322 |
323 | 
324 |
325 |
326 | #### # Answer: Configuring Identity Providers
327 |
328 | Answer
329 |
330 | 1. Create htpasswd file and add `user1` with the password `pass1`. Set the htpasswd file as /tmp/htpasswd.
331 | ```bash
332 | htpasswd -c -B -b /tmp/htpasswd user1 pass1
333 | ```
334 |
335 | 2. Add `user2` with the password `pass2`.
336 | ```bash
337 | htpasswd -b /tmp/htpasswd user2 pass2
338 | ```
339 |
340 | 3. Create secret that contains the HTPasswd users file mentioned above. Secret's name is `localusers`.
341 | ```bash
342 | #Login with admin user
343 | oc login -u kubeadmin -p $KUBEADM_PASSWD $OCP_CLUSTER
344 |
345 | oc create secret generic localusers --from-file htpasswd=/tmp/htpasswd -n openshift-config
346 |
347 | #Check whether the secret created successfully
348 | oc get secret localusers -n openshift-config
349 | oc extract secret/localusers -n openshift-config --to -
350 | ```
351 |
352 | 4. Update the HTPasswd identity provider for the cluster so that you can authenticate above users mentioned in the htpasswd file. Configure the HTPasswd identity name as `myuser`.
353 | ```bash
354 | #Check the current OAuth setting
355 | oc get oauth
356 | NAME AGE
357 | cluster 234d
358 |
359 | #Check the current OAuth Pod
360 | oc get pod -n openshift-authentication
361 |
362 | # Edit the current OAuth setting on your OpenShift cluster
363 | oc edit oauth cluster
364 | ```
365 | Edit the oauth cluster with the below entries:
366 | ```yaml
367 | apiVersion: config.openshift.io/v1
368 | kind: OAuth
369 | ...
370 | spec:
371 | identityProviders:
372 | - htpasswd:
373 | fileData:
374 | name: localusers #put the secret name here
375 | mappingMethod: claim #set the mappingmethod
376 | name: myusers #set the identity name
377 | type: HTPasswd #set the identity provider
378 | ```
379 |
380 | Check the pod at openshift-authentication project:
381 | ```bash
382 | #The new OAuth Pod should be re-created after the above configuration.
383 | oc get pod -n openshift-authentication
384 | ```
385 |
386 | 5. Assign `user1` as `cluster-admin` role.
387 | ```bash
388 | oc whoami
389 |
390 | oc adm policy add-cluster-role-to-user cluster-admin user1
391 |
392 | oc login -u user1 -p pass1
393 |
394 | oc get nodes
395 | ```
396 |
397 | 6. Extract the current `localusers` secret to htpasswd file at /tmp directory. Add `manager` user with password `manager`. Change the password for `user2` to `pass222`. Update the secret and make sure you can login with `manager` and `user2`.
398 | ```bash
399 | oc extract secret/localusers -n openshift-config --to /tmp/ --confirm
400 |
401 | htpasswd -b /tmp/htpasswd-mgr manager manager
402 | htpasswd -b /tmp/htpasswd-mgr user2 pass222
403 |
404 | oc set data secret/localusers --from-file htpasswd=/tmp/htpasswd-mgr -n openshift-config
405 |
406 | oc get pod -n openshift-authentication
407 |
408 | oc login -u manager -p manager
409 | oc login -u user2 -p pass222
410 | ```
411 |
412 | 7. Remove the `user2` (user and identity).
413 | ```bash
414 | oc login -u user1 -p pass1
415 |
416 | oc extract secret/localusers -n openshift-config --to /tmp/ --confirm
417 |
418 | cat /tmp/htpasswd
419 | htpasswd -D /tmp/htpasswd user2
420 | cat /tmp/htpasswd
421 |
422 | oc get user,identity
423 |
424 | oc set data secret/localusers --from-file htpasswd=/tmp/htpasswd -n openshift-config
425 |
426 | oc get pod -n openshift-authentication
427 |
428 | oc get user,identity
429 |
430 | oc delete user user2
431 |
432 | oc delete identity myusers:user2
433 | ```
434 |
435 | 8. VERY CAREFUL ON THIS, IF YOU DELETE kubeadmin, YOU WILL NEVER BE ABLE TO LOGIN WITH kubeadmin. After creating all the users, delete the Virtual user `kubeadmin` to enhance security.
436 | ```bash
437 | #Login as cluster-admin with user1
438 | oc login -u user1 -p pass1
439 |
440 | oc get secret kubeadmin -n kube-system
441 |
442 | oc delete secret kubeadmin -n kube-system
443 | oc get secret kubeadmin -n kube-system
444 | ```
445 |
446 |
447 |
448 | ## # QUIZ: Authentication and Authorization
449 | #### # This quiz is testing your understanding about HTPasswd identity provider and RBAC :
450 | 1. Create htpasswd file with users `tester`, `leader`, `admin`, `developer` with the password `L@bR3v!ew`. Set the htpasswd file as /tmp/htpasswd.
451 | 2. Configure your cluster to use the HTPasswd identity provider using the configured /tmp/htpasswd.
452 | 3. Make the `admin` user a cluster administrator. Log in as both `admin` and as `developer` to verify HTPasswd user configuration and cluster privileges.
453 | 4. As the `admin` user, remove the ability to create projects cluster wide.
454 | 5. Create a group named `managers`, and add the `leader` user to the group. Grant project creation privileges to the `managers` group. As the `leader` user, create the `auth-review` project.
455 | 6. Create a group named `developers` and grant `edit` privileges on the `auth-review` project. Add the `developer` user to the group.
456 | 7. Create a group named `qa` and grant `view` privileges on the `auth-review` project. Add the `tester` user to the group.
457 |
458 | #### # Answer: Authentication and Authorization
459 |
460 | Answer
461 |
462 | 1. Create htpasswd file with users `tester`, `leader`, `admin`, `developer` with the password `L@bR3v!ew`. Set the htpasswd file as /tmp/htpasswd.
463 | ```bash
464 | htpasswd -c -B -b /tmp/htpasswd tester 'L@bR3v!ew'
465 | htpasswd -c -B -b /tmp/htpasswd leader 'L@bR3v!ew'
466 | htpasswd -c -B -b /tmp/htpasswd admin 'L@bR3v!ew'
467 | htpasswd -c -B -b /tmp/htpasswd developer 'L@bR3v!ew'
468 | ```
469 |
470 | 2. Configure your cluster to use the HTPasswd identity provider using the configured /tmp/htpasswd.
471 | ```bash
472 | oc login -u kubeadmin -p $RHT_OCP4_KUBEADM_PASSWD $RHT_OCP4_HTTPS
473 |
474 | oc create secret generic auth-review --from-file htpasswd=/tmp/htpasswd -n openshift-config
475 |
476 | oc get oauth cluster
477 |
478 | oc edit oauth cluster
479 | ```
480 | Edit the oauth cluster and add the below HTPasswd configuration:
481 | ```yaml
482 | ...
483 | spec:
484 | identityProviders:
485 | - name: htpasswd-provider
486 | httpasswd:
487 | fileData:
488 | name: auth-review # secret name
489 | mappingMethod: claim
490 | type: HTPasswd
491 | ```
492 |
493 | ```bash
494 | #Check the OAuth pod which is redeployed after the HTPasswd configuration
495 | oc get pod -n openshift-authentication
496 | ```
497 |
498 | 3. Make the `admin` user a cluster administrator. Log in as both `admin` and as `developer` to verify HTPasswd user configuration and cluster privileges.
499 | ```bash
500 | oc get user,identity
501 |
502 | oc adm policy add-cluster-role-to-user cluster-admin admin
503 |
504 | oc login -u admin -p 'L@bR3v!ew'
505 | oc whoami; oc get nodes
506 |
507 | oc login -u developer -p 'L@bR3v!ew'
508 | oc whoami; oc get nodes
509 | # oc get nodes will be failed (not authorized)
510 | ```
511 |
512 | 4. As the `admin` user, remove the ability to create projects cluster wide.
513 | ```bash
514 | oc login -u admin -p 'L@bR3v!ew'
515 |
516 | oc get clusterrolebinding | egrep "NAME|self-provisioner"
517 |
518 | oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth
519 |
520 | ```
521 |
522 | 5. Create a group named `managers`, and add the `leader` user to the group. Grant project creation privileges to the `managers` group. As the `leader` user, create the `auth-review` project.
523 | ```bash
524 | oc adm groups new managers
525 |
526 | oc adm groups add-users managers leader
527 |
528 | oc adm policy add-cluster-role-to-group self-provisioner managers
529 |
530 | oc login -u leader -p 'L@bR3v!ew'
531 |
532 | oc new-project auth-review
533 | ```
534 |
535 | 6. Create a group named `developers` and grant `edit` privileges on the `auth-review` project. Add the `developer` user to the group.
536 | ```bash
537 | oc login -u admin -p 'L@bR3v!ew'
538 |
539 | oc adm groups new developers
540 |
541 | oc adm groups add-users developers developer
542 |
543 | oc policy add-role-to-group edit developers -n auth-review
544 | ```
545 |
546 | 7. Create a group named `qa` and grant `view` privileges on the `auth-review` project. Add the `tester` user to the group.
547 | ```bash
548 | oc adm groups new qa
549 |
550 | oc adm groups add-users qa tester
551 |
552 | oc policy add-role-to-group view qa
553 | ```
554 |
555 |
556 |
557 | ## # Configuring Application Security
558 | #### # Managing sensitive information with secrets
559 |
560 | ##### #Features of Secret:
561 | - Secret data can be shared within a project namespace.
562 | - Secret data is referenced independently of secret definition. Administrators can create and manage a secret resource that other team members can reference in their deployment configurations.
563 | - Secret data is injected into pods when OpenShift creates a pod. You can expose a secret as an environment variable or as a mounted file in the pod.
564 | - If the value of a secret changes during pod execution, the secret data in the pod does not update. After a secret value changes, you must create new pods to inject the new secret data.
565 | - Any secret data that OpenShift injects into a pod is ephemeral. If OpenShift exposes sensitive data to a pod as environment variables, then those variables are destroyed when the pod is destroyed.
566 |
567 | ##### #Use Cases of Secret:
568 | 1. Credentials
569 | - Store sensitive information, such as passwords and username for application.
570 | 2. TLS (Transport Layer Security) and Key Pairs
571 | - Use a TLS certificate (tls.crt) and key (tls.key) to secure communication to a pod.
572 |
573 | ```bash
574 | oc create secret generic SECRETNAME --from-literal key1=val1
575 | oc create secret generic SECRETNAME --from-file SECRETFILE=/path-to-secret-file
576 | oc create secret tls SECRETNAME --cert /path-to-certificate --key /path-to-key
577 |
578 | ```
579 |
580 | ##### #Configure secrets inside Pod:
581 |
582 | - Configure secret inside Pod:
583 | ```yaml
584 | ...
585 | env:
586 | - name: MYSQL_ROOT_PASSWORD #environment variable in the Pod
587 | valueFrom:
588 | secretKeyRef:
589 | name: demo-secret #name of secret
590 | key: root_password #name of key inside the secret
591 | ```
592 |
593 | - Inject secret to Pod deployment
594 | ```bash
595 | oc set env deployment/demo --from secret/demo-secret --prefix MYSQL_
596 | ```
597 |
598 | - Secrets as Files in a Pod
599 | ```bash
600 | oc set volume deployment/demo --add --type=secret --secret-name=demo-secret --mount-path=/app-secrets
601 | ```
602 |
603 | #### # Configuration Map
604 | - Similar to secrets, configuration maps decouple configuration information from container images. Unlike secrets, the information contained in configuration maps does not require protection.
605 |
606 | ```bash
607 | oc create configmap CONFIGMAPNAME --from-literal key1=val1
608 | oc create configmap CONFIGMAPNAME --from-file CMFILE=/path-to-configmapfile
609 | ```
610 |
611 | #### # Updating Secrets and Configmap
612 | ```bash
613 | oc extract secret/SECRETNAME -n NAMESPACE --to /path-to-directory/ --confirm
614 |
615 | oc set data secret/SECRETNAME --from-file /path-to-directory/secretfile -n NAMESPACE
616 | ```
617 |
618 |
619 | ## # QUIZ: Debug error Pod (configure applicaton security)
620 | #### # Managing sensitive information with secret and allow application to run in less restrictive environment using SECURITY CONTEXT CONSTRAINTS
621 | 1. Create `test-pj` project with `developer` user.
622 | 2. Configure `secret-pj` secret in `test-pj` with the below entries. This secret will be used MySQL database and WordPress application:
623 | - user = wpuser
624 | - password = redhat123
625 | - database = wordpress
626 | 3. Create Database app with the following details:
627 | - deployment name = mysql
628 | - use image from registry.access.redhat.com/rhscl/mysql-57-rhel7:5.7-47
629 | - Analyze the logs and fix the error. (Modify the mysql deployment to use the secret-pj secrets as environment variable)
630 | 4. Create WordPress application that uses the MySQL database. Details as follow:
631 | - deployment name = wordpress
632 | - use image from quay.io/redhattraining/wordpress:5.3.0
633 | - Analyze the logs and fix the error
634 | - Update your secret and add some additional variables:
635 | - host = mysql
636 | - name = wordpress
637 | - in the real configuration, WordPress container use the variable as WORDPRESS_DB_HOST and WORDPRESS_DB_NAME
638 | - WordPress application has to use the `secret-pj` secret
639 | - Make sure the application can connect to the Database. Check the route configuration which can be access with the `myblog.apps.ocp4.example.com`, etc.
640 |
641 | #### # Answer: Debug error Pod (configure applicaton security)
642 |
643 | Answer
644 |
645 | 1. Create `test-pj` project with `developer` user.
646 | ```bash
647 | oc login -u developer -p $DEVELOPER_PASSWD
648 |
649 | oc new-project test-pj
650 | ```
651 |
652 | 2. Configure `secret-pj` secret in `test-pj` with the below entries. This secret will be used MySQL database and WordPress application:
653 | - user = wpuser
654 | - password = redhat123
655 | - database = wordpress
656 | ```bash
657 | oc create secret generic secret-pj --from-literal user=wpuser --from-literal password=redhat123 --from-literal database=wordpress
658 |
659 | oc get secret secret-pj
660 |
661 | oc describe secret secret-pj
662 | ```
663 |
664 | 3. Create Database app with the following details:
665 | - deployment name = mysql
666 | - use image from registry.access.redhat.com/rhscl/mysql-57-rhel7:5.7-47
667 | - Analyze the logs and fix the error. (Modify the mysql deployment to use the secret-pj secrets as environment variable)
668 | ```bash
669 | oc new-app --name mysql --docker-image registry.access.redhat.com/rhscl/mysql-57-rhel7:5.7-47
670 |
671 | oc get pod
672 |
673 | #Find the details of pod's error info
674 | oc logs PODNAME
675 |
676 | oc get deployment
677 |
678 | #Modify pod's deployment to use the secret mysql
679 | oc set env deployment/mysql --from secret/mysql --prefix MYSQL_
680 |
681 | #Make sure your pod run normal
682 | oc get pod
683 | oc logs PODNAME
684 |
685 | #If needed, you can also mount the secret as file to a pod
686 | oc set volume deployment/mysql --add --type secret --mount-path /secret-mount-path --secret-name mysql
687 |
688 |
689 | ---VERIFY your mysql pod connection---
690 | oc rsh MYSQL-PODNAME
691 | $ mysql -u myuser --password=redhat123 test_secrets -e 'show databases;'
692 |
693 | ```
694 |
695 | 4. Create WordPress application that uses the MySQL database. Details as follow:
696 | - deployment name = wordpress
697 | - use image from quay.io/redhattraining/wordpress:5.3.0
698 | - Analyze the logs and fix the error
699 | - Update your secret and add some additional variables:
700 | - host = mysql
701 | - name = wordpress
702 | - in the real configuration, WordPress container use the variable as WORDPRESS_DB_HOST and WORDPRESS_DB_NAME
703 | - WordPress application has to use the `secret-pj` secret
704 | - Make sure the application can connect to the Database. Check the route configuration which can be access with the `myblog.apps.ocp4.example.com`, etc.
705 | ```bash
706 | oc new-app --name wordpress --docker-image quay.io/redhattraining/wordpress:5.3.0
707 |
708 | #Analyze the pod condition and error logs
709 | oc get pod
710 | oc get deployment
711 |
712 | #Add variables for WordPress app to the secret
713 | oc set data secret/secret-pj --from-literal host=mysql --from-literal name=wordpress
714 |
715 | #Configure secret to pod
716 | oc set env deployment/wordpress --from secret/mysql --prefix WORDPRESS_DB_
717 |
718 | oc get pod
719 | NAME READY STATUS RESTARTS AGE
720 | wordpress-68c49c9d4-wq46g 0/1 CrashLoopBackOff 5 4m30s
721 |
722 | oc logs WORDPRESSPODNAME
723 | #Notice that we need to execute WordPress pod in a less secure SCC
724 | ...output omitted...
725 | (13)Permission denied: AH00072: make_sock: could not bind to address [::]:80
726 | (13)Permission denied: AH00072: make_sock: could not bind to address 0.0.0.0:80
727 | no listening sockets available, shutting down
728 | AH00015: Unable to open logs
729 |
730 | #To modify the SCC, we need to do the operation as admin
731 | oc login -u admin -p $ADMIN_PASSWORD
732 |
733 | #Check whether using a different SCC resolves the permissions problem.
734 | oc get pod WORDPRESSPODNAME -oyaml | oc adm policy scc-subject-review -f -
735 | RESOURCE ALLOWED BY
736 | Pod/wordpress-68c49c9d4-wq46g anyuid
737 |
738 | #Create serviceaccount for WordPress pod
739 | oc create serviceaccount wordpress-sa
740 |
741 | #Grant anyuid SCC to wordpress-sa
742 | oc adm policy add-scc-to-user anyuid wordpress-sa
743 |
744 | #Configure wordpress deployment to use the wordpress-sa serviceaccount
745 | oc set serviceaccount deployment/wordpress wordpress-sa
746 |
747 | #Check the pod status
748 | oc get pod --watch
749 |
750 | #Expose the quotes service
751 | oc get service
752 | oc expose service wordpress --hostname myblog.apps.ocp4.example.com
753 |
754 | #Check
755 | curl -s http://myblog.apps.ocp4.example.com/wp-admin/install.php
756 |
757 | ```
758 |
759 |
760 |
761 |
762 |
763 | #### # Debug Pod and Problem Analyzation
764 | Case scenario:
765 | 1. MySQL Database by using `deployment/mysql`.
766 | 2. Frontend application deploymed by `deployment/frontend` that is connect to the MySQL Database.
767 | 3. `mysql` and `frontend` Pod have their own service resource.
768 | 4. Your task is to check the connection between each pod by using service or pod private IP address.
769 |
770 | ```bash
771 | # Check the connection from frontend -> mysql via mysql-service
772 | oc get service mysql -oyaml | grep -i clusterip
773 |
774 | oc debug -t deployment/frontend
775 | $ curl -v telnet://MYSQLPOD-SERVICE-IP:3306
776 |
777 | # Check the connection from mysql -> frontend via frontend-service
778 | oc get service frontend -oyaml | grep -i clusterip
779 |
780 | oc debug -t deployment/mysql --image registry.access.redhat.com/ubi8/ubi:8.0
781 | $ curl -v http://FRONTEND-SERVICE-IP:8080
782 |
783 | # Check the connection from mysql pod -> frontend pod
784 | oc get pod -owide
785 | : find the ip for the frontend pod
786 |
787 | oc debug -t deployment/mysql --image registry.access.redhat.com/ubi8/ubi:8.0
788 | $ curl -v http://FRONTEND-POD-IP:8080
789 |
790 | ```
791 |
792 | ## # Securing Routes (Exposing application for external access)
793 | ### # Methods for Managing Ingress Traffic
794 | 1. Route
795 | - Routes provide ingress traffic to services in the cluster. Routes were created before Kubernetes ingress objects and provide more features. Routes provide advanced features that may not be supported by Kubernetes ingress controllers through a standard interface, such as TLS re-encryption, TLS passthrough, and split traffic for blue-green deployments.
796 | 2. Ingress
797 | - An ingress is a Kubernetes resource that provides some of the same features as routes (which are an OpenShift resource). Ingresses accept external requests and proxy them based on the route. You can only allow certain types of traffic: HTTP, HTTPS and server name identification (SNI), and TLS with SNI. In OpenShift, routes are generated to meet the conditions specified by the ingress object.
798 |
799 | 
800 |
801 | ### # Securing Routes
802 | 1. Edge
803 | - TLS termination occurs at the router, before the traffic is routed to the pods. So, connections from the router to the endpoints over the internal network are not encrypted.
804 | - In default, OpenShift assigns its own certificate to the router for TLS termination. But, you can customize it by assigning your own certificates.
805 | 2. Re-encryption
806 | - Re-encryption is a variation on edge termination, whereby the router terminates TLS with a certificate, and then re-encrypts its connection to the endpoint, which might have a different certificate. The full path of the connection is encrypted.
807 | 3. Passthrough
808 | - Encrypted traffic is sent straight to the destination pod without the router providing TLS termination.
809 | - Passthrough support mutual authentication between the application and the client that access it.
810 |
811 | 
812 |
813 | ## # QUIZ: Configuring OpenShift Networking for applications
814 | #### # Answer: Configuring OpenShift Networking for applications
815 | 
816 | 1. Create application with below details:
817 | - Create apps with `developer` user and `network-ingress` project
818 | - Create `todo-http1` deployment by using docker image from quay.io/redhattraining/todo-angular:v1.1
819 | - Expose service to be accessed at `todo-http1.apps.ocp4.example.com`
820 | 2. Check whether the connection is not encypted by using `tcpdump` command.
821 | 3. Create new application configured with passthrough-route (encrypted access) using the provided CA and self-signed certificate.
822 | - CA = training-CA.pem, CA key = training-CA.key, passphrase file = passphrase.txt, extension file = training.ext
823 | - Create customized certificate to be valid for 10 years
824 | - Configure the passthrough-route as `todo-https-passthrough.apps.ocp4.example.com`, with port 8443
825 | - `create self-signed certificate -> configure the self-signed certificate as a tls-type secret -> create application pod using the tls-type secret`
826 | - Application yaml (todo-https.yaml):
827 |
828 |
829 | Yaml
830 |
831 | ```yaml
832 | apiVersion: apps/v1
833 | kind: Deployment
834 | metadata:
835 | name: todo-https
836 | labels:
837 | app: todo-https
838 | name: todo-https
839 | spec:
840 | replicas: 1
841 | selector:
842 | matchLabels:
843 | app: todo-https
844 | name: todo-https
845 | template:
846 | metadata:
847 | labels:
848 | app: todo-https
849 | name: todo-https
850 | spec:
851 | containers:
852 | - name: todo-https
853 | image: quay.io/redhattraining/todo-angular:v1.2
854 | ports:
855 | - containerPort: 8080
856 | name: todo-http
857 | - containerPort: 8443
858 | name: todo-https
859 | volumeMounts:
860 | - name: tls-certs
861 | readOnly: true
862 | mountPath: /usr/local/etc/ssl/certs
863 | volumes:
864 | - name: tls-certs
865 | secret:
866 | secretName: todo-certs
867 | ```
868 |
869 |
870 |
871 |
872 | Answer
873 |
874 | 1. Create application with below details:
875 | - Create apps with `developer` user and `network-ingress` project
876 | - Create `todo-http1` deployment by using docker image from `quay.io/redhattraining/todo-angular:v1.1`
877 | - Expose service to be accessed at `todo-http1.apps.ocp4.example.com`
878 | ```bash
879 | oc login -u developer -p developer
880 |
881 | oc new-project network-ingress
882 |
883 | oc new-app --name todo-http1 --docker-image quay.io/redhattraining/todo-angular:v1.1
884 |
885 | oc expose service todo-http1 --hostname todo-http1.apps.ocp4.example.com
886 | ```
887 |
888 | 2. Check whether the connection is not encypted by using `tcpdump` command.
889 | ```bash
890 | sudo tcpdump -i eth0 -A -n port 80
891 | ```
892 |
893 | 3. Create new application configured with passthrough-route (encrypted access) using the provided CA and self-signed certificate.
894 | - CA = training-CA.pem, CA key = training-CA.key, passphrase file = passphrase.txt, extension file = training.ext
895 | - Create customized certificate to be valid for 10 years
896 | - Configure the passthrough-route as `todo-https-passthrough.apps.ocp4.example.com`, with port 8443
897 | - `create self-signed certificate -> configure the self-signed certificate as a tls-type secret -> create application pod using the tls-type secret`
898 | - Application yaml (todo-https.yaml):
899 |
900 |
901 | ```bash
902 | #Generate key
903 | openssl genrsa -out training.key 2048
904 |
905 | #Generate CSR
906 | openssl req -new -key training.key -out training.csr
907 | #-> On the prompt, dont forget to mention the Common Name for todo-https-passthrough.apps.ocp4.example.com
908 |
909 | #Generate the signed certificate
910 | openssl x509 -req -in training.csr ¥
911 | -CA training-CA.pem -CAkey training-CA.key -CAcreateserial ¥
912 | -passin file:passphrase.txt ¥
913 | -out training.crt -days 3650 -sha256 -extfile training.ext
914 |
915 | #Create TLS secret
916 | oc create secret tls todo-secret --cert training.crt --key training.key
917 |
918 | #Deploy application pod
919 | oc create -f todo-https.yaml
920 | #deployment and service created
921 |
922 | #Create passthrough route
923 | oc create route passthrough todo-https --service todo-https --port 8443 --hostname todo-https.apps.ocp4.example.com
924 |
925 | #Check the connection to the route from the nodes
926 | curl -I --cacert training-CA.pem https://todo-https.apps.ocp4.example.com
927 |
928 | #Check the connection with Browser. Notice that your certificate information is the same as you've configured in the above steps
929 | ```
930 |
931 |
932 |
933 | ## # Controlling Pod Scheduling
934 | #### # Labeling Nodes
935 | ```bash
936 | #Add label
937 | oc label node master01 env=dev
938 | oc label node master01 env=dev --overwrite
939 |
940 | #Remove label
941 | oc label node master01 env-
942 |
943 | #Show labels
944 | oc get nodes --show-labels
945 |
946 | ```
947 |
948 | #### # Configuring Node selector for a project
949 | ```bash
950 | oc adm new-project --node-selector "tier=1"
951 | ```
952 |
953 |
954 | ## # Pod Scheduling
955 | #### # Taint and Tolerations
956 |
957 | - Default Tolerations
958 | Default toleration on pods:
959 | ```yaml
960 | kind: Pod
961 | spec:
962 | tolerations:
963 | - effect: NoExecute
964 | key: node.kubernetes.io/not-ready
965 | operator: Exist
966 | tolerationSeconds: 300
967 | - effect: NoExecute
968 | key: node.kubernetes.io/unreachable
969 | operator: Exist
970 | tolerationSeconds: 300
971 | containers:
972 | ...
973 | ```
974 |
975 | - Cordoned node will have below taint
976 | ```bash
977 | Taints: node.kubernetes.io/unschedulable:NoSchedule
978 | ```
979 |
980 |
981 |
982 | ## # Quiz: Create TLS secured route apps
983 | ### # Steps by step
984 | ```bash
985 | 1. Prepare the CA (myCA.key and myCA.crt)
986 | openssl genrsa -out myCA.key 2048
987 | openssl req -x509 -new -key myCA.key -out myCA.crt
988 | CountryName: XX
989 | State: Tokyo
990 | City: Shinagawa
991 | Company: XX
992 | CN:
993 |
994 | 2. Prepare the Keys and Certificate Signing Request (CSR) for Secured-Route (myRoute.key, myRoute.csr)
995 | openssl genrsa -out myRoute.key 2048
996 | openssl req -new -key myRoute.key -out myRoute.csr
997 | CountryName: XX
998 | State: Tokyo
999 | City: Shinagawa
1000 | Company: XX
1001 | CN: todo-https.apps.ocp4.example.com
1002 |
1003 | 3. Signing the Secure-Route certificate with self-created CA (output=myRoute.crt)
1004 | openssl x509 -req -in myRoute.csr -CA myCA.crt -CAkey myCA.key -CAcreateserial -out myRoute.crt
1005 |
1006 | 4. Check the created certificate
1007 | openssl x509 -in myRoute.crt -noout -text
1008 |
1009 | 5. Create TLS secret(todo-certs) using the above certificates
1010 | oc create secret tls todo-certs --cert ./myRoute.crt --key ./myRoute.key
1011 |
1012 | 6. Deploy apps with the provided yaml
1013 | oc create -f secure-app.yaml
1014 |
1015 | 7. Expose apps with secure passthrough-route
1016 | oc create route passthrough todo-https --service todo-https --port 8443 --hostname todo-https.apps.ocp4.example.com
1017 |
1018 | 8. Check the connection
1019 | #from node
1020 | curl -I --cacert ./myCA.crt https://todo-https.apps.ocp4.example.com
1021 | ```
1022 |
1023 |
1024 | ## # AAA
1025 | #### # BBB
1026 |
1027 | #### # Template
1028 |
1029 | template
1030 |
1031 |
--------------------------------------------------------------------------------