├── barista
├── deployment
│ └── .gitkeep
├── src
│ └── main
│ │ ├── java
│ │ └── com
│ │ │ └── sebastian_daschner
│ │ │ └── barista
│ │ │ ├── JAXRSConfiguration.java
│ │ │ ├── boundary
│ │ │ ├── HealthResource.java
│ │ │ ├── CoffeeBrews.java
│ │ │ └── BrewsResource.java
│ │ │ ├── entity
│ │ │ └── CoffeeBrew.java
│ │ │ └── control
│ │ │ └── RandomStatusProcessor.java
│ │ └── webapp
│ │ └── WEB-INF
│ │ └── beans.xml
├── liberty
│ ├── prepare.sh
│ └── server.xml
└── pom.xml
├── coffee-shop
├── deployment
│ └── .gitkeep
├── src
│ └── main
│ │ ├── webapp
│ │ └── WEB-INF
│ │ │ └── beans.xml
│ │ └── java
│ │ └── com
│ │ └── sebastian_daschner
│ │ └── coffee_shop
│ │ ├── JAXRSConfiguration.java
│ │ ├── entity
│ │ ├── BrewLocation.java
│ │ ├── CoffeeType.java
│ │ ├── OrderStatus.java
│ │ └── CoffeeOrder.java
│ │ ├── control
│ │ ├── OrderProcessor.java
│ │ ├── OrderProcessTimer.java
│ │ ├── Orders.java
│ │ └── Barista.java
│ │ ├── CoffeeTypeDeserializer.java
│ │ ├── BrewLocationDeserializer.java
│ │ └── boundary
│ │ ├── HealthResource.java
│ │ ├── CoffeeShop.java
│ │ └── OrdersResource.java
├── liberty
│ ├── prepare.sh
│ └── server.xml
└── pom.xml
├── .gitignore
├── README.adoc
└── workshop
├── 08-conclusion.adoc
├── 01-introduction.adoc
├── 05-istio-observability.adoc
├── 07-resiliency.adoc
├── 06-istio-routing.adoc
├── 00-prerequisites.adoc
├── 04-istio.adoc
├── 02-docker.adoc
└── 03-kubernetes.adoc
/barista/deployment/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/coffee-shop/deployment/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | target/
2 | pom.xml.tag
3 | pom.xml.releaseBackup
4 | pom.xml.versionsBackup
5 | pom.xml.next
6 | release.properties
7 |
8 | *.iml
9 | .idea/
10 |
11 | barista/liberty/extension/
12 | coffee-shop/liberty/extension/
13 |
--------------------------------------------------------------------------------
/barista/src/main/java/com/sebastian_daschner/barista/JAXRSConfiguration.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.barista;
2 |
3 | import javax.ws.rs.ApplicationPath;
4 | import javax.ws.rs.core.Application;
5 |
6 | @ApplicationPath("resources")
7 | public class JAXRSConfiguration extends Application {
8 |
9 | // nothing to configure
10 |
11 | }
12 |
--------------------------------------------------------------------------------
/barista/src/main/webapp/WEB-INF/beans.xml:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/webapp/WEB-INF/beans.xml:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/JAXRSConfiguration.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop;
2 |
3 | import javax.ws.rs.ApplicationPath;
4 | import javax.ws.rs.core.Application;
5 |
6 | @ApplicationPath("resources")
7 | public class JAXRSConfiguration extends Application {
8 |
9 | // nothing to configure
10 |
11 | }
12 |
--------------------------------------------------------------------------------
/barista/liberty/prepare.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cd ${0%/*}
3 | set -eu
4 |
5 | # avoid storing the jar files in VCS
6 |
7 | curl -L https://github.com/WASdev/sample.opentracing.zipkintracer/releases/download/1.3/liberty-opentracing-zipkintracer-1.3-sample.zip \
8 | -o liberty-opentracing-zipkintracer-1.3-sample.zip
9 | unzip liberty-opentracing-zipkintracer-1.3-sample.zip
10 | rm liberty-opentracing-zipkintracer-1.3-sample.zip
11 |
--------------------------------------------------------------------------------
/coffee-shop/liberty/prepare.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cd ${0%/*}
3 | set -eu
4 |
5 | # avoid storing the jar files in VCS
6 |
7 | curl -L https://github.com/WASdev/sample.opentracing.zipkintracer/releases/download/1.3/liberty-opentracing-zipkintracer-1.3-sample.zip \
8 | -o liberty-opentracing-zipkintracer-1.3-sample.zip
9 | unzip liberty-opentracing-zipkintracer-1.3-sample.zip
10 | rm liberty-opentracing-zipkintracer-1.3-sample.zip
11 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/entity/BrewLocation.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop.entity;
2 |
3 | import java.util.stream.Stream;
4 |
5 | public enum BrewLocation {
6 |
7 | OFFICE,
8 | HOME;
9 |
10 | public static BrewLocation of(String string) {
11 | return Stream.of(BrewLocation.values())
12 | .filter(t -> t.name().equalsIgnoreCase(string))
13 | .findAny().orElse(null);
14 | }
15 |
16 | }
17 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/entity/CoffeeType.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop.entity;
2 |
3 | import java.util.stream.Stream;
4 |
5 | public enum CoffeeType {
6 |
7 | ESPRESSO,
8 | LATTE,
9 | POUR_OVER;
10 |
11 | public static CoffeeType fromString(String string) {
12 | return Stream.of(CoffeeType.values())
13 | .filter(t -> t.name().equalsIgnoreCase(string))
14 | .findAny().orElse(null);
15 | }
16 |
17 | }
18 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/entity/OrderStatus.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop.entity;
2 |
3 | import java.util.stream.Stream;
4 |
5 | public enum OrderStatus {
6 |
7 | PREPARING,
8 | FINISHED,
9 | COLLECTED;
10 |
11 | public static OrderStatus fromString(String string) {
12 | return Stream.of(OrderStatus.values())
13 | .filter(t -> t.name().equalsIgnoreCase(string))
14 | .findAny().orElse(null);
15 | }
16 |
17 | }
18 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/control/OrderProcessor.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop.control;
2 |
3 |
4 | import com.sebastian_daschner.coffee_shop.entity.CoffeeOrder;
5 | import com.sebastian_daschner.coffee_shop.entity.OrderStatus;
6 |
7 | import javax.inject.Inject;
8 |
9 | public class OrderProcessor {
10 |
11 | @Inject
12 | Barista barista;
13 |
14 | public void processOrder(CoffeeOrder order) {
15 | OrderStatus status = barista.retrieveBrewStatus(order);
16 | order.setStatus(status);
17 | }
18 |
19 | }
20 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/CoffeeTypeDeserializer.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop;
2 |
3 | import com.sebastian_daschner.coffee_shop.entity.CoffeeType;
4 |
5 | import javax.json.bind.adapter.JsonbAdapter;
6 |
7 | public class CoffeeTypeDeserializer implements JsonbAdapter {
8 |
9 | @Override
10 | public String adaptToJson(CoffeeType type) {
11 | return type.name();
12 | }
13 |
14 | @Override
15 | public CoffeeType adaptFromJson(String type) {
16 | return CoffeeType.fromString(type);
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/BrewLocationDeserializer.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop;
2 |
3 |
4 | import com.sebastian_daschner.coffee_shop.entity.BrewLocation;
5 |
6 | import javax.json.bind.adapter.JsonbAdapter;
7 |
8 | public class BrewLocationDeserializer implements JsonbAdapter {
9 |
10 | @Override
11 | public String adaptToJson(BrewLocation type) {
12 | return type.name();
13 | }
14 |
15 | @Override
16 | public BrewLocation adaptFromJson(String type) {
17 | return BrewLocation.of(type);
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/barista/liberty/server.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | jakartaee-8.0
6 | microProfile-3.0
7 | usr:opentracingZipkin-0.31
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 | admin
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/coffee-shop/liberty/server.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | jakartaee-8.0
6 | microProfile-3.0
7 | usr:opentracingZipkin-0.31
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 | admin
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/barista/src/main/java/com/sebastian_daschner/barista/boundary/HealthResource.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.barista.boundary;
2 |
3 | import org.eclipse.microprofile.health.HealthCheck;
4 | import org.eclipse.microprofile.health.HealthCheckResponse;
5 | import org.eclipse.microprofile.health.Readiness;
6 |
7 | import javax.enterprise.context.ApplicationScoped;
8 |
9 | @Readiness
10 | @ApplicationScoped
11 | public class HealthResource implements HealthCheck {
12 |
13 | @Override
14 | public HealthCheckResponse call() {
15 | return HealthCheckResponse.named("barista").withData("barista", "ok").up().build();
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/control/OrderProcessTimer.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop.control;
2 |
3 | import com.sebastian_daschner.coffee_shop.boundary.CoffeeShop;
4 |
5 | import javax.ejb.Schedule;
6 | import javax.ejb.Singleton;
7 | import javax.ejb.Startup;
8 | import javax.inject.Inject;
9 |
10 | @Singleton
11 | @Startup
12 | public class OrderProcessTimer {
13 |
14 | @Inject
15 | CoffeeShop coffeeShop;
16 |
17 | @Schedule(second = "*/10", minute = "*", hour = "*", persistent = false)
18 | public void processOrder() {
19 | coffeeShop.processUnfinishedOrders();
20 | }
21 |
22 | }
23 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/boundary/HealthResource.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop.boundary;
2 |
3 | import org.eclipse.microprofile.health.HealthCheck;
4 | import org.eclipse.microprofile.health.HealthCheckResponse;
5 | import org.eclipse.microprofile.health.Readiness;
6 |
7 | import javax.enterprise.context.ApplicationScoped;
8 |
9 | @Readiness
10 | @ApplicationScoped
11 | public class HealthResource implements HealthCheck {
12 |
13 | @Override
14 | public HealthCheckResponse call() {
15 | return HealthCheckResponse.named("coffee-shop").withData("coffee-shop", "ok").up().build();
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/README.adoc:
--------------------------------------------------------------------------------
1 | == Enterprise Coffee
2 |
3 | This is a hands-on workshop how to run containerized Java applications in Docker, Kubernetes, and Istio.
4 |
5 | It will guide you through creating containerized workloads and how to deploy your Enterprise Java to the cloud.
6 |
7 | The workshop comprises two applications, _coffee-shop_ and _barista_, built by Java EE and MicroProfile, which are deployed to a Kubernetes and Istio cluster.
8 |
9 | The contents of the workshop start with the link:workshop/00-prerequisites.adoc[prerequisites].
10 |
11 |
12 | === Finish
13 |
14 | After you've finished the walk-through you can compare your result with the https://github.com/sdaschner/istio-workshop/tree/finish[finished version^].
15 |
--------------------------------------------------------------------------------
/barista/src/main/java/com/sebastian_daschner/barista/entity/CoffeeBrew.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.barista.entity;
2 |
3 | public class CoffeeBrew {
4 |
5 | private String status = "PREPARING";
6 | private String type;
7 |
8 | public CoffeeBrew() {
9 | }
10 |
11 | public CoffeeBrew(String type) {
12 | this.type = type;
13 | }
14 |
15 | public String getStatus() {
16 | return status;
17 | }
18 |
19 | public void setStatus(String status) {
20 | this.status = status;
21 | }
22 |
23 | public String getType() {
24 | return type;
25 | }
26 |
27 | public void setType(String type) {
28 | this.type = type;
29 | }
30 |
31 | }
32 |
--------------------------------------------------------------------------------
/barista/src/main/java/com/sebastian_daschner/barista/control/RandomStatusProcessor.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.barista.control;
2 |
3 | import com.sebastian_daschner.barista.entity.CoffeeBrew;
4 |
5 | import java.util.Random;
6 |
7 | public class RandomStatusProcessor {
8 |
9 | private static final double PERCENTAGE = 0.4;
10 |
11 | public String processStatus(CoffeeBrew brew) {
12 | String status = brew.getStatus();
13 | if (randomSelection())
14 | return updateStatus(brew);
15 | return status;
16 | }
17 |
18 | private boolean randomSelection() {
19 | return new Random().nextDouble() <= PERCENTAGE;
20 | }
21 |
22 | private String updateStatus(CoffeeBrew brew) {
23 | switch (brew.getStatus()) {
24 | case "PREPARING":
25 | return "FINISHED";
26 | case "FINISHED":
27 | return "COLLECTED";
28 | case "COLLECTED":
29 | return "COLLECTED";
30 | default:
31 | throw new IllegalArgumentException("Unknown status " + brew.getStatus());
32 | }
33 | }
34 |
35 | }
36 |
--------------------------------------------------------------------------------
/workshop/08-conclusion.adoc:
--------------------------------------------------------------------------------
1 | == Conclusion
2 |
3 | We've now seen some of the benefits of cloud native technologies in action.
4 |
5 | Containers and Docker simplifies and unifies the way how we build and ship our application binaries.
6 | Orchestration and Kubernetes organizes our workloads across large cluster environments and provides abstractions for our microservices.
7 | Services meshes and Istio enables us to enhance our services and add required cross-cutting concerns in a transparent way, without much change on our side.
8 | They also improve the testability of applications, by making it possible to inject failures and delays to provoke error situations.
9 |
10 | You've seen how to integrate our Enterprise Java applications into cloud native technologies.
11 | You have access to all code examples and infrastructure-as-code snippets.
12 | Now, take your learnings further and go build something cool!
13 |
14 |
15 | === Further resources
16 |
17 | - https://docs.docker.com/engine/reference/builder/[Dockerfile reference^]
18 | - https://kubernetes.io/docs/home/[Kubernetes documentation^]
19 | - https://istio.io/docs/[Istio docs^]
20 | - https://github.com/IBM/cloud-native-starter[Cloud-native starter^]
21 |
--------------------------------------------------------------------------------
/barista/src/main/java/com/sebastian_daschner/barista/boundary/CoffeeBrews.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.barista.boundary;
2 |
3 | import com.sebastian_daschner.barista.control.RandomStatusProcessor;
4 | import com.sebastian_daschner.barista.entity.CoffeeBrew;
5 | import org.eclipse.microprofile.config.inject.ConfigProperty;
6 |
7 | import javax.enterprise.context.ApplicationScoped;
8 | import javax.inject.Inject;
9 | import java.util.Map;
10 | import java.util.concurrent.ConcurrentHashMap;
11 |
12 | @ApplicationScoped
13 | public class CoffeeBrews {
14 |
15 | private final Map coffeeBrews = new ConcurrentHashMap<>();
16 |
17 | @Inject
18 | RandomStatusProcessor statusProcessor;
19 |
20 | public CoffeeBrew startBrew(String id, String coffeeType) {
21 | System.out.println("starting to brew: " + coffeeType);
22 |
23 | CoffeeBrew brew = new CoffeeBrew(coffeeType);
24 | coffeeBrews.put(id, brew);
25 |
26 | return brew;
27 | }
28 |
29 | public CoffeeBrew retrieveBrew(String id) {
30 | System.out.println("retrieving brew: " + id);
31 |
32 | CoffeeBrew brew = coffeeBrews.get(id);
33 |
34 | if (brew == null)
35 | return null;
36 |
37 | brew.setStatus(statusProcessor.processStatus(brew));
38 |
39 | return brew;
40 | }
41 |
42 | }
43 |
--------------------------------------------------------------------------------
/barista/pom.xml:
--------------------------------------------------------------------------------
1 |
3 | 4.0.0
4 | com.sebastian-daschner
5 | barista
6 | 1.0-SNAPSHOT
7 | war
8 |
9 |
10 |
11 | jakarta.platform
12 | jakarta.jakartaee-api
13 | 8.0.0
14 | provided
15 |
16 |
17 | org.eclipse.microprofile
18 | microprofile
19 | 3.0
20 | provided
21 | pom
22 |
23 |
24 |
25 |
26 | barista
27 |
28 |
29 |
30 | 1.8
31 | 1.8
32 | false
33 | UTF-8
34 |
35 |
36 |
37 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/control/Orders.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop.control;
2 |
3 | import com.sebastian_daschner.coffee_shop.entity.CoffeeOrder;
4 | import com.sebastian_daschner.coffee_shop.entity.OrderStatus;
5 |
6 | import javax.ejb.ConcurrencyManagement;
7 | import javax.ejb.ConcurrencyManagementType;
8 | import javax.ejb.Singleton;
9 | import java.util.List;
10 | import java.util.Map;
11 | import java.util.UUID;
12 | import java.util.concurrent.ConcurrentHashMap;
13 | import java.util.stream.Collectors;
14 |
15 | @Singleton
16 | @ConcurrencyManagement(ConcurrencyManagementType.BEAN)
17 | public class Orders {
18 |
19 | private final ConcurrentHashMap orders = new ConcurrentHashMap<>();
20 |
21 | public List retrieveAll() {
22 | return orders.entrySet().stream()
23 | .map(Map.Entry::getValue)
24 | .collect(Collectors.toList());
25 | }
26 |
27 | public CoffeeOrder retrieve(UUID id) {
28 | return orders.get(id);
29 | }
30 |
31 | public void store(UUID id, CoffeeOrder order) {
32 | orders.put(id, order);
33 | }
34 |
35 | public List getUnfinishedOrders() {
36 | return orders.values().stream()
37 | .filter(o -> o.getStatus() != OrderStatus.COLLECTED)
38 | .collect(Collectors.toList());
39 | }
40 |
41 | }
42 |
--------------------------------------------------------------------------------
/coffee-shop/pom.xml:
--------------------------------------------------------------------------------
1 |
3 | 4.0.0
4 | com.sebastian-daschner
5 | coffee-shop
6 | 1.0-SNAPSHOT
7 | war
8 |
9 |
10 |
11 | jakarta.platform
12 | jakarta.jakartaee-api
13 | 8.0.0
14 | provided
15 |
16 |
17 | org.eclipse.microprofile
18 | microprofile
19 | 3.0
20 | provided
21 | pom
22 |
23 |
24 |
25 |
26 | coffee-shop
27 |
28 |
29 |
30 | 1.8
31 | 1.8
32 | false
33 | UTF-8
34 |
35 |
36 |
37 |
--------------------------------------------------------------------------------
/barista/src/main/java/com/sebastian_daschner/barista/boundary/BrewsResource.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.barista.boundary;
2 |
3 | import com.sebastian_daschner.barista.entity.CoffeeBrew;
4 |
5 | import javax.inject.Inject;
6 | import javax.json.Json;
7 | import javax.json.JsonObject;
8 | import javax.ws.rs.*;
9 | import javax.ws.rs.core.Response;
10 |
11 | @Path("/brews/{id}")
12 | public class BrewsResource {
13 |
14 | @Inject
15 | CoffeeBrews coffeeBrews;
16 |
17 | @PathParam("id")
18 | String id;
19 |
20 | @GET
21 | public Response retrieveCoffeeBrew() {
22 | CoffeeBrew brew = coffeeBrews.retrieveBrew(id);
23 |
24 | if (brew == null)
25 | return Response.status(Response.Status.NOT_FOUND).build();
26 |
27 | return Response.ok(buildResponse(brew)).build();
28 | }
29 |
30 | @PUT
31 | public Response updateCoffeeBrew(JsonObject jsonObject) {
32 | String coffeeType = jsonObject.getString("type", null);
33 |
34 | if (coffeeType == null)
35 | throw new BadRequestException();
36 |
37 | CoffeeBrew brew = coffeeBrews.startBrew(id, coffeeType);
38 |
39 | return Response.status(Response.Status.CREATED)
40 | .entity(buildResponse(brew))
41 | .build();
42 | }
43 |
44 | private JsonObject buildResponse(CoffeeBrew brew) {
45 | return Json.createObjectBuilder()
46 | .add("status", brew.getStatus())
47 | .add("type", brew.getType())
48 | .build();
49 | }
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/entity/CoffeeOrder.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop.entity;
2 |
3 | import com.sebastian_daschner.coffee_shop.BrewLocationDeserializer;
4 | import com.sebastian_daschner.coffee_shop.CoffeeTypeDeserializer;
5 |
6 | import javax.json.bind.annotation.JsonbTransient;
7 | import javax.json.bind.annotation.JsonbTypeAdapter;
8 | import javax.validation.constraints.NotNull;
9 | import java.util.UUID;
10 |
11 | public class CoffeeOrder {
12 |
13 | @JsonbTransient
14 | private final UUID id = UUID.randomUUID();
15 |
16 | @NotNull
17 | @JsonbTypeAdapter(CoffeeTypeDeserializer.class)
18 | private CoffeeType type;
19 |
20 | private OrderStatus status;
21 |
22 | @JsonbTypeAdapter(BrewLocationDeserializer.class)
23 | private BrewLocation location;
24 |
25 | public UUID getId() {
26 | return id;
27 | }
28 |
29 | public CoffeeType getType() {
30 | return type;
31 | }
32 |
33 | public void setType(CoffeeType type) {
34 | this.type = type;
35 | }
36 |
37 | public OrderStatus getStatus() {
38 | return status;
39 | }
40 |
41 | public void setStatus(OrderStatus status) {
42 | this.status = status;
43 | }
44 |
45 | public BrewLocation getLocation() {
46 | return location;
47 | }
48 |
49 | public void setLocation(BrewLocation location) {
50 | this.location = location;
51 | }
52 |
53 | @Override
54 | public String toString() {
55 | return "CoffeeOrder{" +
56 | "id=" + id +
57 | ", type=" + type +
58 | ", status=" + status +
59 | ", location=" + location +
60 | '}';
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/boundary/CoffeeShop.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop.boundary;
2 |
3 | import com.sebastian_daschner.coffee_shop.control.Barista;
4 | import com.sebastian_daschner.coffee_shop.control.OrderProcessor;
5 | import com.sebastian_daschner.coffee_shop.control.Orders;
6 | import com.sebastian_daschner.coffee_shop.entity.BrewLocation;
7 | import com.sebastian_daschner.coffee_shop.entity.CoffeeOrder;
8 | import com.sebastian_daschner.coffee_shop.entity.OrderStatus;
9 | import org.eclipse.microprofile.config.inject.ConfigProperty;
10 |
11 | import javax.ejb.Stateless;
12 | import javax.inject.Inject;
13 | import java.util.List;
14 | import java.util.UUID;
15 |
16 | @Stateless
17 | public class CoffeeShop {
18 |
19 | @Inject
20 | Orders orders;
21 |
22 | @Inject
23 | Barista barista;
24 |
25 | @Inject
26 | OrderProcessor orderProcessor;
27 |
28 | @Inject
29 | @ConfigProperty(name = "location", defaultValue = "HOME")
30 | private BrewLocation location;
31 |
32 | public List getOrders() {
33 | return orders.retrieveAll();
34 | }
35 |
36 | public CoffeeOrder getOrder(UUID id) {
37 | return orders.retrieve(id);
38 | }
39 |
40 | public CoffeeOrder orderCoffee(CoffeeOrder order) {
41 | setDefaultLocation(order);
42 | OrderStatus status = barista.brewCoffee(order);
43 | order.setStatus(status);
44 |
45 | orders.store(order.getId(), order);
46 | return order;
47 | }
48 |
49 | private void setDefaultLocation(CoffeeOrder order) {
50 | if (order.getLocation() == null)
51 | order.setLocation(location);
52 | }
53 |
54 | public void processUnfinishedOrders() {
55 | orders.getUnfinishedOrders().forEach(orderProcessor::processOrder);
56 | }
57 |
58 | }
59 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/boundary/OrdersResource.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop.boundary;
2 |
3 | import com.sebastian_daschner.coffee_shop.entity.CoffeeOrder;
4 |
5 | import javax.inject.Inject;
6 | import javax.json.Json;
7 | import javax.json.JsonArray;
8 | import javax.json.JsonObject;
9 | import javax.json.stream.JsonCollectors;
10 | import javax.servlet.http.HttpServletRequest;
11 | import javax.validation.Valid;
12 | import javax.validation.constraints.NotNull;
13 | import javax.ws.rs.*;
14 | import javax.ws.rs.core.Context;
15 | import javax.ws.rs.core.MediaType;
16 | import javax.ws.rs.core.Response;
17 | import javax.ws.rs.core.UriInfo;
18 | import java.net.URI;
19 | import java.util.UUID;
20 |
21 | @Path("/orders")
22 | @Produces(MediaType.APPLICATION_JSON)
23 | @Consumes(MediaType.APPLICATION_JSON)
24 | public class OrdersResource {
25 |
26 | @Inject
27 | CoffeeShop coffeeShop;
28 |
29 | @Context
30 | UriInfo uriInfo;
31 |
32 | @Context
33 | HttpServletRequest request;
34 |
35 | @GET
36 | public JsonArray getOrders() {
37 | return coffeeShop.getOrders().stream()
38 | .map(this::buildOrder)
39 | .collect(JsonCollectors.toJsonArray());
40 | }
41 |
42 | private JsonObject buildOrder(CoffeeOrder order) {
43 | return Json.createObjectBuilder()
44 | .add("type", order.getType().name())
45 | .add("status", order.getStatus().name())
46 | .add("_self", buildUri(order).toString())
47 | .build();
48 | }
49 |
50 | @GET
51 | @Path("{id}")
52 | public CoffeeOrder getOrder(@PathParam("id") UUID id) {
53 | return coffeeShop.getOrder(id);
54 | }
55 |
56 | @POST
57 | public Response orderCoffee(@Valid @NotNull CoffeeOrder order) {
58 | final CoffeeOrder storedOrder = coffeeShop.orderCoffee(order);
59 | return Response.created(buildUri(storedOrder)).build();
60 | }
61 |
62 | private URI buildUri(CoffeeOrder order) {
63 | return uriInfo.getBaseUriBuilder()
64 | .host(request.getServerName())
65 | .port(request.getServerPort())
66 | .path(OrdersResource.class)
67 | .path(OrdersResource.class, "getOrder")
68 | .build(order.getId());
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/workshop/01-introduction.adoc:
--------------------------------------------------------------------------------
1 | == Introduction
2 |
3 | As part of this workshop, we will see how to develop cloud-native microservices using Jakarta EE and MicroProfile, that are deployed using Docker, Kubernetes, and Istio.
4 | We'll examine the basics of modern Enterprise Java services, as well as scalability, resiliency, observability, traffic routing, configuration.
5 |
6 |
7 | === Example application
8 |
9 | Our microservices example consists of two services, _coffee-shop_ and _barista_ which are both developed as enterprise applications using Jakarta EE and MicroProfile.
10 | Both services are developed and deployed individually; they communicate via HTTP (REST) and are not further coupled, logically nor technologically.
11 |
12 | The applications are deployed to Open Liberty, an open-source application server that supports a huge variety of Enterprise Java technologies, including Jakarta EE and MicroProfile.
13 |
14 |
15 | === Getting started
16 |
17 | To get started, clone this Git repository and use the projects that are provided inside:
18 |
19 | ----
20 | git clone https://github.com/sdaschner/istio-workshop
21 | cd istio-workshop/
22 | ----
23 |
24 |
25 | === Building
26 |
27 | In order to build our applications, we issue the following Maven command
28 |
29 | Our applications are built using Maven.
30 | Both microservice projects contains a `pom.xml` file, which defines the project build configuration and dependencies.
31 |
32 | To begin, we build both microservice applications by navigating to the corresponding directories and issuing the Maven build command:
33 |
34 | ----
35 | cd coffee-shop/
36 | mvn package
37 |
38 | cd ../barista/
39 | mvn package
40 | ----
41 |
42 | These commands will build both applications using Maven's `package` phase which creates the deployment artifacts, the WAR files containing our applications, under the corresponding `target` directories.
43 | Take a moment to have a look at the `pom.xml` files and the resulting WAR files, `coffee-shop.war` and `barista.war`.
44 | These WAR files will be deployed to our application containers.
45 |
46 |
47 | === Running
48 |
49 | In order to run our applications, we could deploy them to (locally) running application containers.
50 | However, since we aim to run in a cloud-native setting, we will deploy the applications as Linux containers, here using Docker.
51 |
52 | In order to run Docker containers, we need to create corresponding Docker images, which contain everything that our applications need to just start-up and run.
53 |
54 | Let's see how we can achieve that in the link:02-docker.adoc[next section].
55 |
--------------------------------------------------------------------------------
/coffee-shop/src/main/java/com/sebastian_daschner/coffee_shop/control/Barista.java:
--------------------------------------------------------------------------------
1 | package com.sebastian_daschner.coffee_shop.control;
2 |
3 | import com.sebastian_daschner.coffee_shop.entity.CoffeeOrder;
4 | import com.sebastian_daschner.coffee_shop.entity.OrderStatus;
5 |
6 | import javax.annotation.PostConstruct;
7 | import javax.annotation.PreDestroy;
8 | import javax.enterprise.context.ApplicationScoped;
9 | import javax.json.Json;
10 | import javax.json.JsonObject;
11 | import javax.ws.rs.client.Client;
12 | import javax.ws.rs.client.ClientBuilder;
13 | import javax.ws.rs.client.Entity;
14 | import javax.ws.rs.client.WebTarget;
15 | import javax.ws.rs.core.Response;
16 | import java.util.concurrent.TimeUnit;
17 |
18 | @ApplicationScoped
19 | public class Barista {
20 |
21 | private Client client;
22 | private WebTarget target;
23 |
24 | @PostConstruct
25 | private void initClient() {
26 | client = ClientBuilder.newBuilder()
27 | .connectTimeout(1, TimeUnit.SECONDS)
28 | .readTimeout(5, TimeUnit.SECONDS)
29 | .build();
30 | target = client.target("http://barista:9080/barista/resources/brews/{id}");
31 | }
32 |
33 | public OrderStatus brewCoffee(CoffeeOrder order) {
34 | JsonObject requestBody = createRequestBody(order);
35 | Response response = sendBrewRequest(requestBody, order.getId().toString());
36 | return readStatus(response);
37 | }
38 |
39 | private JsonObject createRequestBody(CoffeeOrder order) {
40 | return Json.createObjectBuilder()
41 | .add("type", order.getType().name().toLowerCase())
42 | .build();
43 | }
44 |
45 | private Response sendBrewRequest(JsonObject requestBody, String id) {
46 | try {
47 | return target.resolveTemplate("id", id)
48 | .request()
49 | .put(Entity.json(requestBody));
50 | } catch (Exception e) {
51 | throw new IllegalStateException("Could not retrieve brew status, reason: " + e.getMessage(), e);
52 | }
53 | }
54 |
55 | public OrderStatus retrieveBrewStatus(CoffeeOrder order) {
56 | Response response = getBrewStatus(order.getId().toString());
57 | return readStatus(response);
58 | }
59 |
60 | private Response getBrewStatus(String id) {
61 | return target.resolveTemplate("id", id)
62 | .request().get();
63 | }
64 |
65 | private OrderStatus readStatus(Response response) {
66 | if (response.getStatusInfo().getFamily() != Response.Status.Family.SUCCESSFUL)
67 | throw new IllegalStateException("Could not retrieve brew status, response: " + response.getStatus());
68 |
69 | JsonObject entity = response.readEntity(JsonObject.class);
70 | final OrderStatus status = OrderStatus.fromString(entity.getString("status", null));
71 |
72 | if (status == null)
73 | throw new RuntimeException("Could not read known status from response: " + entity);
74 |
75 | return status;
76 | }
77 |
78 | @PreDestroy
79 | private void closeClient() {
80 | client.close();
81 | }
82 |
83 | }
84 |
--------------------------------------------------------------------------------
/workshop/05-istio-observability.adoc:
--------------------------------------------------------------------------------
1 | == Observability with Istio
2 |
3 | The fact that the sidecar proxy containers intercept all traffic enables them to inspect the information and attributes of HTTP connections.
4 | Istio adds observability to our microservices via this technique without requiring us to change the applications.
5 | It ships with monitoring, service graph dependency information, logging, and distributed tracing out of the box.
6 |
7 | We can have a look at the pods and services that have been created under the `istio-system` namespace:
8 |
9 | ----
10 | kubectl get pods -n istio-system
11 | kubectl get services -n istio-system
12 | ----
13 |
14 | Now we have to create new coffee orders in the coffee-shop application, that is we connect to the application through the gateway, again:
15 |
16 | ----
17 | while true; do
18 | curl :/coffee-shop/resources/orders -i -XPOST \
19 | -H 'Content-Type: application/json' \
20 | -d '{"type":"Espresso"}' \
21 | | grep HTTP
22 | sleep 1
23 | done
24 | ----
25 |
26 | This will create a coffee order every second and thus generate a constant load on our microservices.
27 |
28 |
29 | === Monitoring (Grafana)
30 |
31 | Our Istio installation comes with default monitoring and Grafana dashboards out of the box.
32 | In order to access the monitoring dashboards, we have to establish a connection to the Grafana pod.
33 |
34 | We could either create a dedicated service, and gateway that route to the pod, but for testing purposes, we establish a port forwarding from local port `3000` to the Grafana pod:
35 |
36 | ----
37 | kubectl -n istio-system port-forward \
38 | $(kubectl -n istio-system get pod -l app=grafana -o jsonpath='{.items[0].metadata.name}') \
39 | 3000:3000 &
40 | ----
41 |
42 | Once that forwarding is established, we browse to http://localhost:3000 and navigate to the Istio Mesh Dashboard by clicking on the Home menu on the top left.
43 |
44 | You can explore all technical metrics that have been made available in Istio by default.
45 |
46 |
47 | === Service Graph (Kiali)
48 |
49 | Our Istio installation also ships with a service graph which shows the dependencies and interactions between the individual services.
50 |
51 | We establish a port forwarding from local port `20001` to the service graph pod:
52 |
53 | ----
54 | kubectl -n istio-system port-forward \
55 | $(kubectl -n istio-system get pod -l app=kiali -o jsonpath='{.items[0].metadata.name}') \
56 | 20001:20001 &
57 | ----
58 |
59 | We browse to http://localhost:20001/ and explore the service graph instances under "`Graph`".
60 | Explore the available options under "`Display`" and "`Graph Type`".
61 |
62 |
63 | === Tracing (Jaeger)
64 |
65 | Our Istio installation also ships with distributed tracing which allows to trace down individual requests that occurred between our microservice instances.
66 |
67 | Distributed tracing is the only out-of-the-box observability feature that we look at that doesn't work without any involvement on our side.
68 | By default, Istio would have no chance to know that the two individual HTTP requests, the one between the ingress gateway and the coffee-shop service, and the one between the coffee-shop application and barista, are in fact correlated.
69 |
70 | What has to happen internally is that the coffee-shop application must retrieve and pass certain tracing header, that is HTTP headers (`x-b3-traceid`, `x-b3-parentspanid`, `x-b3-spanid`, ...).
71 | The sidecar containers can then observe and correlate this additional information.
72 |
73 | Initially, this would mean that our coffee-shop application would have to get the tracing headers from the incoming HTTP request, keep the information in the local request (thread) scope and add it to the outgoing HTTP request in the client that connects to the barista service, again.
74 | Luckily, with the help of MicroProfile OpenTracing, we don't have to do that manually.
75 |
76 | Our running application can be configured to use MicroProfile OpenTracing that passes this kind of information through all incoming and outgoing HTTP requests, if tracing headers have been available on the first request.
77 |
78 | In order to do that, we only have to instruct our Open Liberty servers to activate the corresponding feature.
79 | We don't have to change our Java code, nor the application build.
80 | This is solely configured on the infrastructure layer.
81 |
82 | Have a look at the `server.xml` configuration of our coffee-shop application:
83 | Besides the `jakartaee-8.0` feature it already contains the features for `microProfile-3.0` and `usr:opentracingZipkin-0.31`.
84 | This will automatically pass the tracing HTTP headers.
85 |
86 | [source,xml]
87 | ----
88 | ...
89 |
90 |
91 | jakartaee-8.0
92 | microProfile-3.0
93 | usr:opentracingZipkin-0.31
94 |
95 |
96 | ...
97 | ----
98 |
99 | Now, we establish a port forwarding from local port 16686 to the tracing, i.e. Jaeger pod:
100 |
101 | ----
102 | kubectl port-forward -n istio-system \
103 | $(kubectl get pod -n istio-system -l app=jaeger -o jsonpath='{.items[0].metadata.name}') \
104 | 16686:16686 &
105 | ----
106 |
107 | We navigate our browser to http://localhost:16686, select `istio-ingressgateway` as service, and click on the '`Find Traces`' button to see the recent traces.
108 |
109 | If we examine the correct traces, we can see that our coffee-shop application synchronously and asynchronously connects to the barista backend.
110 |
111 | In the link:06-istio-routing.adoc[next section] we'll see how we instruct Istio to route our mesh traffic according to certain criteria.
112 |
--------------------------------------------------------------------------------
/workshop/07-resiliency.adoc:
--------------------------------------------------------------------------------
1 | == Resiliency
2 |
3 | In this section we'll discuss resiliency in the world of microservices.
4 | We'll have a look at what solutions Istio offers us already, transparent to the application.
5 | Furthermore, we could implement resiliency in our application as well.
6 |
7 | In order to not get confused by different behavior of coffee-shop `v1` and `v2`, let's first revert the changes of the last section, and only have a single subset `v1`.
8 | This means, we change the coffee-shop virtual service, destination rule to the original one (`kubectl apply`), and remove the deployment `coffee-shop-v2` (`kubectl delete deployment coffee-shop-v2`).
9 | After the deployment has been deleted, the pods with version 2 will also be terminated.
10 |
11 | NOTE: Although we can always change infrastructure-as-code files and apply them via `kubectl apply`, deletions of resources in files won't be detected by Kubernetes, since it only re-applies and updates the currently provided resources.
12 | Thus we need to explicitly `kubectl delete` obsolete resources.
13 |
14 |
15 | === Istio
16 |
17 | Istio ships with some resiliency features, such as timeouts, circuit breakers, or retries.
18 | Since the sidecar proxy containers intercept all connection from and to the application containers, they have full control over HTTP traffic.
19 |
20 | That means, if our applications do not include for example connection timeouts, Istio can, as a last resort, ensure that a timeout will be triggered on a slow connection.
21 | This enables us to add at least certain resiliency patterns without changing applications.
22 |
23 | === Timeouts
24 |
25 | Istio enables us to add simple timeout definitions on rules.
26 | For this to work, we need to ensure that the traffic is actually routed through virtual service rules, and not just uses the default rules, that is, the instances' labels need to match the specific subsets.
27 |
28 | We'll enhance our coffee-shop virtual service rules with a timeout of 1 second:
29 |
30 | [source,yaml]
31 | ----
32 | apiVersion: networking.istio.io/v1alpha3
33 | kind: VirtualService
34 | metadata:
35 | name: coffee-shop
36 | spec:
37 | hosts:
38 | - "*"
39 | gateways:
40 | - coffee-shop-gateway
41 | http:
42 | - route:
43 | - destination:
44 | host: coffee-shop
45 | port:
46 | number: 9080
47 | subset: v1
48 | timeout: 1s
49 | ---
50 | ----
51 |
52 | NOTE: The `timeout` definition is contained in a specific route YAML object and will only be taken into account on that very rule.
53 |
54 | We update the virtual service to our cluster.
55 | Now, the overall time of the coffee order requests will not exceed one second.
56 | If the coffee-shop application, or any backend with which we communicate synchronously takes longer than that the sidecar proxy will respond with an HTTP error instead.
57 |
58 | ==== Testing resiliency
59 |
60 | The challenge, however, is now to see whether our changes took effect as desired.
61 | We'd expect our applications to respond in much less than one second, therefore we would not see that error situation until it's in production.
62 | Luckily, Istio ships with functionality that purposely produces error situations, in order to test the resiliency of our services.
63 |
64 | The sidecars have two main means to do that: adding artificial delays, and failing requests.
65 | We can instruct the routing rules to add these fault scenarios, if required only on a given percentage of the requests.
66 |
67 | We modify the barista virtual service to add a 3 seconds delay for 50% of the requests:
68 |
69 | [source,yaml]
70 | ----
71 | apiVersion: networking.istio.io/v1alpha3
72 | kind: VirtualService
73 | metadata:
74 | name: barista
75 | spec:
76 | hosts:
77 | - barista
78 | http:
79 | - route:
80 | - destination:
81 | host: barista
82 | subset: v1
83 | fault:
84 | delay:
85 | fixedDelay: 3s
86 | percent: 50
87 | ---
88 | ----
89 |
90 | If we apply this updated resource to the cluster, we will notice that some of the connections will in fact fail, after roughly 1 second.
91 | We don't see any request taking the whole 3 seconds, due to the timeout on the coffee-shop routing rules:
92 |
93 | ----
94 | while true; do
95 | curl :/coffee-shop/resources/orders -i -XPOST \
96 | -H 'Content-Type: application/json' \
97 | -d '{"type":"Espresso"}' \
98 | | grep HTTP
99 | sleep 1
100 | done
101 | ----
102 |
103 | NOTE: The `fault` property is only meant for testing purposes. Please don't apply this to any other environment where you don't want connections to be slowed down or to randomly fail.
104 |
105 | Besides the obvious responses, we can also use our observability tools to inspect what is happening.
106 | Have a look at the Grafana dashboards and the Jaeger traces again, to see how the failing requests are made visible.
107 |
108 | This lab only covers timeouts and basic faults.
109 | Istio also offers functionality for retries and circuit breakers which are also applied and configured declaratively via Istio resources.
110 | Have a look at the further resources to learn more.
111 |
112 |
113 | === Application level
114 |
115 | Building a resilient microservice is key when designing microservices.
116 | Apart from the infrastructure resilience, sometimes more fine-grained application level resilience is required.
117 |
118 | https://github.com/eclipse/microprofile-fault-tolerance/[MicroProfile Fault Tolerance^] provides a simple yet flexible solution to build a resilient microservice at the application-level.
119 | It offers capabilities for timeouts, retries, bulkheads, circuit breakers and fallback.
120 |
121 | In general, application-level resiliency is more fine-grained while Istio's behavior is more coarse-grained.
122 | As a recommendation, we can use MicroProfile together with Istio's fault handling.
123 |
124 | Looks like we've finished the last section! link:08-conclusion.adoc[Conclusion^].
125 |
--------------------------------------------------------------------------------
/workshop/06-istio-routing.adoc:
--------------------------------------------------------------------------------
1 | == Traffic management
2 |
3 | In a previous part, we've examined how the basic building blocks of the Istio Networking API, virtual services, destination rules, and gateways work.
4 | In this section, we'll have a look at some more advanced traffic routing performed by Istio, rather than by standard Kubernetes means.
5 |
6 |
7 | === Updated coffee-shop
8 |
9 | In order to make some changes visible, we introduce a second version of our coffee-shop.
10 | So far we've used the base version (`version: v1`) only.
11 | For the following we add a second version of our microservice example which behaves slightly differently.
12 |
13 | However, this version is not simply a new version of the same application, which could be updated and deployed in a zero-downtime manner (i.e. transition `v1` -> `v2`).
14 | Rather, we specifically want two distinct versions to be available for a longer period of time, for example to perform A/B testing or canary releases.
15 |
16 | Therefore, we make a change in the coffee-shop application to create a new version.
17 | The orders resource currently returns the coffee type in all-caps, since that is the name of the `CoffeeType` enum.
18 | We now change the Java code in the `OrdersResource` class to output the coffee type in lower case.
19 | Once we've re-built the Maven project, we build the coffee-shop Docker image, now with name `coffee-shop:2`.
20 |
21 | We create a new deployment definition that will deploy our new coffee-shop application, with and leave the previous one untouched:
22 |
23 | [source,yaml]
24 | ----
25 | kind: Deployment
26 | apiVersion: apps/v1
27 | metadata:
28 | name: coffee-shop-v2
29 | spec:
30 | selector:
31 | matchLabels:
32 | app: coffee-shop
33 | version: v2
34 | replicas: 1
35 | template:
36 | metadata:
37 | labels:
38 | app: coffee-shop
39 | version: v2
40 | spec:
41 | containers:
42 | - name: coffee-shop
43 | image: de.icr.io/istio-ws-/coffee-shop:2
44 | imagePullPolicy: Always
45 | ports:
46 | - containerPort: 9080
47 | livenessProbe:
48 | exec:
49 | command: ["sh", "-c", "curl -f http://localhost:9080/"]
50 | initialDelaySeconds: 20
51 | readinessProbe:
52 | exec:
53 | command: ["sh", "-c", "curl -s http://localhost:9080/health | grep -q coffee-shop"]
54 | initialDelaySeconds: 40
55 | restartPolicy: Always
56 | ----
57 |
58 | Pods managed by this deployment will receive the labels `app: coffee-shop` and `version: v2`.
59 | The coffee-shop service would simply forward requests to this pod as well, however, since our traffic is routed by the Istio proxies, only the previous instances will receive traffic for now.
60 |
61 | We ensure that our new deployment and pods have been created:
62 |
63 | ----
64 | kubectl get deployments
65 | kubectl get pods
66 | ----
67 |
68 |
69 | === A/B testing with Istio
70 |
71 | A/B testing is a method of running tests against two separate service versions in order to determine which performs better according to a set of defined metrics.
72 | To add a second version of our coffee-shop service we enhance the coffee-shop destination rule and add a second subset:
73 |
74 | [source,yaml]
75 | ----
76 | apiVersion: networking.istio.io/v1alpha3
77 | kind: DestinationRule
78 | metadata:
79 | name: coffee-shop
80 | spec:
81 | host: coffee-shop
82 | subsets:
83 | - name: v1
84 | labels:
85 | version: v1
86 | - name: v2
87 | labels:
88 | version: v2
89 | ----
90 |
91 | We apply the updated destination rule to the cluster.
92 | As you will notice, this will have no effect on the current scenario.
93 | If you have a look at the current coffee-shop virtual service, you'll notice that we route all incoming traffic to `v1` subset:
94 |
95 | ----
96 | kubectl get virtualservice coffee-shop --output yaml
97 | ----
98 |
99 | We enhance the routing rule to route to subset `v2` as well, but only for traffic which matches the HTTP header of a Firefox user agent:
100 |
101 | [source,yaml]
102 | ----
103 | apiVersion: networking.istio.io/v1alpha3
104 | kind: VirtualService
105 | metadata:
106 | name: coffee-shop
107 | spec:
108 | hosts:
109 | - "*"
110 | gateways:
111 | - coffee-shop-gateway
112 | http:
113 | - match:
114 | - headers:
115 | user-agent:
116 | regex: '.*Firefox.*'
117 | route:
118 | - destination:
119 | host: coffee-shop
120 | port:
121 | number: 9080
122 | subset: v2
123 | - route:
124 | - destination:
125 | host: coffee-shop
126 | port:
127 | number: 9080
128 | subset: v1
129 | ---
130 | ----
131 |
132 | The newly introduce rule will route the traffic from Firefox browsers to all instances that are contained in the `v2` service subset and leave the rest untouched, that is, everything else still routes to the `v1` subset.
133 | In Istio `VirtualService` rules, there can be only one rule for each service and therefore when defining multiple https://istio.io/docs/reference/config/istio.networking.v1alpha3/#HTTPRoute[HTTPRoute^] blocks, the order in which they are defined in the YAML file matters.
134 |
135 | If we apply these changes to the cluster, we can now see a different behavior for requests that originate from a Firefox browser.
136 | Similarly, we can simulate that behavior from the command line, if we pass a corresponding header to `curl`:
137 |
138 | ----
139 | curl :/coffee-shop/resources/orders -i -XPOST \
140 | -H 'User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0' \
141 | -H 'Content-Type: application/json' \
142 | -d '{"type":"Espresso"}'
143 |
144 | curl :/coffee-shop/resources/orders \
145 | -H 'User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0'
146 | ----
147 |
148 |
149 | === Canary Deployments
150 |
151 | In canary deployments, newer versions of services are incrementally rolled out to users to minimize the risk and impact of any bugs introduced by the newer version.
152 | To begin incrementally routing traffic to the newer version of the coffee-shop service, we modify its virtual service:
153 |
154 | [source,yaml]
155 | ----
156 | apiVersion: networking.istio.io/v1alpha3
157 | kind: VirtualService
158 | metadata:
159 | name: coffee-shop
160 | spec:
161 | hosts:
162 | - "*"
163 | gateways:
164 | - coffee-shop-gateway
165 | http:
166 | - route:
167 | - destination:
168 | host: coffee-shop
169 | port:
170 | number: 9080
171 | subset: v2
172 | weight: 30
173 | - destination:
174 | host: coffee-shop
175 | port:
176 | number: 9080
177 | subset: v1
178 | weight: 70
179 | ---
180 | ----
181 |
182 | In this modified rule, the routed traffic is split between the two subsets of the coffee-shop service (70% to `v1` and 30% to `v2`).
183 | Traffic to the modernized version of our service is controlled on a percentage basis to limit the impact of any unforeseen bugs.
184 | This rule can be modified over time until eventually all traffic is directed to the newer version of the service.
185 | This would be part of an automated process, typically realized by a Continuous Deployment pipeline.
186 |
187 | We can see this rule in action by accessing our application again.
188 | If you're accessing the example through a browser, make sure that you're performing a hard refresh to remove any browser IP address caching.
189 | You should notice that the coffee-shop should swap between the first and second version at roughly the weight you specified.
190 |
191 | Now that we've managed some traffic with Istio, let's have a closer look how to make our microservices more resilient in the link:07-resiliency.adoc[next section].
192 |
--------------------------------------------------------------------------------
/workshop/00-prerequisites.adoc:
--------------------------------------------------------------------------------
1 | == Prerequisites
2 |
3 | As prerequisites to perform the workshop, you need to have the following tools installed on your laptop:
4 |
5 | - Any JDK version 8 or later, e.g. https://adoptopenjdk.net/?variant=openjdk8&jvmVariant=openj9
6 | - https://maven.apache.org/download.cgi[Maven 3^]
7 | - https://git-scm.com/book/en/v2/Getting-Started-Installing-Git[git^]
8 | - https://curl.haxx.se/download.html[curl^] (should exist on most environments already)
9 | - https://docs.docker.com/install/[docker^]
10 | - https://kubernetes.io/docs/tasks/tools/install-kubectl/[kubectl^]
11 | - https://cloud.ibm.com/docs/home/tools[IBM Cloud CLI^], including plugins kubernetes-service and container-registry
12 | - https://istio.io/docs/setup/getting-started/#download[istioctl (in version 1.5.0)^]
13 |
14 | === Verify
15 |
16 | To verify, run the following commands in your terminal:
17 |
18 | - `git version`
19 | - `curl -iL https://www.ibm.com`
20 | - `docker run --rm hello-world`
21 | - `kubectl version`
22 | - `istioctl version`
23 |
24 |
25 | === IBM Cloud access
26 |
27 | ==== Registering IBM Cloud Account
28 |
29 | Register for an https://ibm.biz/cloud-reg-istio-ws[IBM Cloud account^].
30 |
31 | Fill in the registration form and follow the link in the validation email when it arrives.
32 |
33 | Then you can https://ibm.biz/cloud-login-istio-ws[Login into IBM Cloud^] using the account credentials you have registered.
34 |
35 | NOTE: IBM Cloud offers a Free Kubernetes cluster type that will run for 30 days at no costs.
36 | You can use this type once you have entered your credit card or a corresponding promo code.
37 |
38 |
39 | ==== IBM Cloud CLI
40 |
41 | We log into the IBM Cloud CLI tool: `ibmcloud login`.
42 | If you have a federated account, include the `--sso` flag: `ibmcloud login --sso`.
43 |
44 | Create an API key to access the IBM Cloud CLI more conveniently.
45 |
46 | ----
47 | ibmcloud iam api-key-create cloud-native \
48 | -d cloud-native \
49 | --file cloud-native.json
50 | cat cloud-native.json
51 | ----
52 |
53 | Write down the API key under the `apikey` property.
54 |
55 | Now we set the locale and unset any configuration that might exist if you have used the CLI in the past:
56 |
57 | ----
58 | ibmcloud config --locale en_US
59 | ibmcloud config --check-version=false
60 | ibmcloud api --unset
61 | ibmcloud api https://cloud.ibm.com
62 | ibmcloud login --apikey -r eu-de
63 | ----
64 |
65 | To verify that the plugins are installed properly, run `ibmcloud plugin list`.
66 | The Container Service plugin and Registry plugin are displayed as `container-service/kubernetes-service` and `container-registry`.
67 |
68 |
69 | === IBM Kubernetes Service
70 |
71 | Now we create our Kubernetes cluster (free or standard) using the https://ibm.biz/create-cluster-istio-ws[Cloud Console^] or CLI.
72 | A free cluster is sufficient for this workshop but feel free to create a standard cluster with your desired configuration.
73 |
74 | If you want to use the CLI continue with one of the following:
75 |
76 | For a Free cluster use the following command:
77 |
78 | ----
79 | ibmcloud ks cluster create classic \
80 | --name cloud-native
81 | ----
82 |
83 | For a standard cluster, in standard setting with 3 workers, use the following command:
84 |
85 | ----
86 | ibmcloud ks cluster create classic \
87 | --name cloud-native \
88 | --zone fra02 \
89 | --flavor b3c.4x16 \
90 | --workers 3
91 | ----
92 |
93 |
94 | [NOTE]
95 | ====================
96 | If the command fails because you have existing VLANs in that zone, you need to specify the VLANs in your command:
97 |
98 | ----
99 | ibmcloud ks vlans --zone fra02
100 | ibmcloud ks cluster create classic \
101 | --name cloud-native \
102 | --zone fra02 \
103 | --flavor b3c.4x16 \
104 | --workers 3 \
105 | --private-vlan \
106 | --public-vlan
107 | ----
108 | ====================
109 |
110 |
111 | ==== Accessing the cluster
112 |
113 | Now, we'll see how to set the context to work with our clusters by using the `kubectl` CLI, access the Kubernetes dashboard, and gather basic information about our cluster.
114 |
115 | List the available clusters: `ibmcloud ks clusters`.
116 | This command should now show your cluster which is being created.
117 |
118 |
119 | After the cluster is up-and-running, download the configuration file and certificates for the cluster using the `cluster config` command:
120 |
121 | ----
122 | ibmcloud ks cluster config --cluster cloud-native
123 | ----
124 |
125 | Get basic information about the cluster and its worker nodes.
126 | This information can help you managing the cluster and troubleshoot issues.
127 |
128 | Get the details of your cluster: `ibmcloud ks cluster get --cluster cloud-native`
129 |
130 | Verify the nodes in the cluster:
131 |
132 | ----
133 | ibmcloud ks workers --cluster cloud-native
134 | kubectl get nodes
135 | ----
136 |
137 | View the currently available services, deployments, and pods:
138 |
139 | ----
140 | kubectl get svc,deploy,po --all-namespaces
141 | ----
142 |
143 | ==== Installing Istio
144 |
145 | Now, we're installing Istio:
146 |
147 | ----
148 | istioctl manifest apply --set profile=demo
149 | ----
150 |
151 | This will install Istio 1.5.0 with distributed tracing, Grafana monitoring, and Kiali.
152 |
153 | Check that all pods are running before continuing.
154 |
155 | ----
156 | kubectl get pod -n istio-system
157 | ----
158 |
159 | We can verify our Istio installation as follows:
160 |
161 | ----
162 | istioctl manifest generate --set profile=demo > generated-manifest.yaml
163 | istioctl verify-install -f generated-manifest.yaml
164 | ----
165 |
166 | The result of the second command (last 3 lines) looks like this:
167 |
168 | ----
169 | Checked 25 crds
170 | Checked 3 Istio Deployments
171 | Istio is installed successfully
172 | ----
173 |
174 | We then enable the automatic sidecar injection:
175 |
176 | ----
177 | kubectl label namespace default istio-injection=enabled
178 | ----
179 |
180 | This will enable automatic sidecar injection for the `default` namespace.
181 |
182 | Congratulations! We now successfully installed Istio into our cluster.
183 |
184 |
185 | ==== Container Registry
186 |
187 | In order to build and distribute Docker images, we need a Docker registry.
188 | We can use the IBM Container Registry which can be accessed right away from our Kubernetes cluster.
189 |
190 | We log into the Container Registry service via the `ibmcloud` CLI and obtain the information about our registry:
191 |
192 | ----
193 | ibmcloud cr login
194 | ibmcloud cr region-set eu-central
195 | ibmcloud cr region
196 | You are targeting region 'eu-central', the registry is 'de.icr.io'.
197 | ----
198 |
199 | We use the CLI to create a unique namespace in the Container Registry service (`cr`) for our workshop:
200 |
201 | ----
202 | ibmcloud cr namespace-add istio-ws-
203 | ibmcloud cr namespaces
204 | ----
205 |
206 | In order to test our new registry namespace, we pull a public image, re-tag it for our own registry, for our region, and push it:
207 |
208 | ----
209 | docker pull hello-world
210 | docker tag hello-world de.icr.io/istio-ws-/hello-world:1
211 | docker push de.icr.io/istio-ws-/hello-world:1
212 | ----
213 |
214 | Let's see whether our image is now in the private registry:
215 |
216 | ----
217 | ibmcloud cr images
218 | ----
219 |
220 | NOTE: In all following examples, you will need to adapt the image / namespace name!
221 | This is important to take into consideration, otherwise the examples won't work since the images won't exist in your account.
222 |
223 | === Local Docker setup
224 |
225 | If you want to try out the example locally, you have to create a Docker network similar to the following:
226 |
227 | ----
228 | docker network create --subnet=192.168.42.0/24 dkrnet
229 | ----
230 |
231 | Now, we've finished all preparations.
232 | Let's get started with the link:01-introduction.adoc[workshop].
233 |
--------------------------------------------------------------------------------
/workshop/04-istio.adoc:
--------------------------------------------------------------------------------
1 | == Istio
2 |
3 | Istio is a service mesh technology which enables us to add cross-cutting concerns to our microservices without changing our implementations.
4 | Istio add sidecar containers to each running pod, that act as proxies to our applications.
5 | Every running pod will get a distinct proxy sidecar container which intercepts, inspects or enhances the connection.
6 |
7 | Istio adds default routes in between the Kubernetes services.
8 | However, it's a best practice to explicitly add Istio routes between the desired services.
9 |
10 | The way how Istio integrates with Kubernetes is that it enhances Kubernetes' API model to add Istio resource types.
11 | This enables developers to use Istio resources with the same experience as if they were included with Kubernetes, including the `kubectl` CLI.
12 |
13 | We will see that the following Istio resources are defined in the YAML format as well and look very much like Kubernetes resources.
14 | We place the following files in the same directories as the Kubernetes definitions, either in single files or one per resource.
15 |
16 |
17 | === Virtual Service
18 |
19 | We create an Istio virtual service for the barista application:
20 |
21 | [source,yaml]
22 | ----
23 | apiVersion: networking.istio.io/v1alpha3
24 | kind: VirtualService
25 | metadata:
26 | name: barista
27 | spec:
28 | hosts:
29 | - barista
30 | http:
31 | - route:
32 | - destination:
33 | host: barista
34 | subset: v1
35 | ---
36 | ----
37 |
38 | A virtual service defines the routing rules for a service that is part of the mesh.
39 | By default, all Kubernetes services are part of the service mesh.
40 |
41 | The routing to the barista service specifies that all requests that access the barista service are routed to the barista instances with the subset (i.e. additional label) `v1`.
42 | In practice this acts as a default route.
43 | The subsets, among other request policies are defined in the destination rules of the corresponding services.
44 |
45 |
46 | === Destination Rule
47 |
48 | We create a destination rule for the barista application which defines the subset `v1`.
49 |
50 | [source,yaml]
51 | ----
52 | apiVersion: networking.istio.io/v1alpha3
53 | kind: DestinationRule
54 | metadata:
55 | name: barista
56 | spec:
57 | host: barista
58 | subsets:
59 | - name: v1
60 | labels:
61 | version: v1
62 | ---
63 | ----
64 |
65 | The subset defines that pods that contain the desired label, here `version` are considered to be part of the subset.
66 | Including a `version` label is another best practice.
67 |
68 | We can put these two Istio resources into a single file, for example named `routing.yaml` or into separate ones.
69 |
70 | In order to make this example work, our pods have to include the label.
71 | Therefore, we have to change and re-apply both deployment definitions.
72 |
73 | We will add the `version: v1` label to the labels section of the pod template metadata, again, to both our application deployments (the YAML files).
74 | Then, we update the deployment definitions against our cluster again (`kubectl apply`).
75 |
76 |
77 | === Gateway
78 |
79 | In order to make our application accessible from outside the service mesh, we define a gateway resource.
80 | Instead of Kubernetes ingress resources which we defined before, Istio gateways enable ingress traffic to be already inspected and routed by Istio.
81 |
82 | We create an Istio routing definition with the following contents to define a gateway that routes traffic into the cluster:
83 |
84 | [source,yaml]
85 | ----
86 | apiVersion: networking.istio.io/v1alpha3
87 | kind: Gateway
88 | metadata:
89 | name: coffee-shop-gateway
90 | spec:
91 | selector:
92 | istio: ingressgateway
93 | servers:
94 | - port:
95 | number: 80
96 | name: http
97 | protocol: HTTP
98 | hosts:
99 | - "*"
100 | ---
101 | ----
102 |
103 | The gateway specifies the wildcard host (`*`) which matches all host names or IP addresses.
104 | It needs to be bound to a virtual service via the `gateways` property.
105 | This will be part of the coffee-shop's virtual service specification:
106 |
107 | [source,yaml]
108 | ----
109 | apiVersion: networking.istio.io/v1alpha3
110 | kind: VirtualService
111 | metadata:
112 | name: coffee-shop
113 | spec:
114 | hosts:
115 | - "*"
116 | gateways:
117 | - coffee-shop-gateway
118 | http:
119 | - route:
120 | - destination:
121 | host: coffee-shop
122 | port:
123 | number: 9080
124 | subset: v1
125 | ---
126 | ----
127 |
128 | That routing now looks slightly different.
129 | Since we define a specific gateway, only traffic from that gateway will be routed to that virtual service, not from any other service inside the mesh (this would require us to explicitly add the default `mesh` gateway as well).
130 |
131 | The HTTP routing rule also requires us the specify the port number, since the incoming traffic originated from a different port number (`80`).
132 |
133 | The following contents define the resource for the barista destination rule, which looks similar to the barista's.
134 |
135 | [source,yaml]
136 | ----
137 | apiVersion: networking.istio.io/v1alpha3
138 | kind: DestinationRule
139 | metadata:
140 | name: coffee-shop
141 | spec:
142 | host: coffee-shop
143 | subsets:
144 | - name: v1
145 | labels:
146 | version: v1
147 | ---
148 | ----
149 |
150 | ==== Accessing our applications
151 |
152 | If we now want to run our application and access it through the service mesh, we will access the gateway from outside the cluster.
153 | This requires, of course, that the gateway resource and all virtual service and destination rule resources have been applied to the mesh.
154 |
155 | If we have created a free cluster we, again, need to access the (gateway) service through the cluster's node IP address.
156 | Thus, we retrieve the node port of the `istio-ingressgateway` service, a service that's shipped by the Istio installation:
157 |
158 | ----
159 | kubectl get services -n istio-system istio-ingressgateway
160 | ----
161 |
162 | We can get the HTTP/2 node port directly by using the following Go template:
163 |
164 | ----
165 | kubectl get services -n istio-system istio-ingressgateway --template '{{range .spec.ports}}{{if eq .name "http2"}}{{.nodePort}}{{end}}{{end}}'
166 | ----
167 |
168 | [NOTE]
169 | ====================
170 | As a reminder, as seen in the last section, we'll retrieve the node IP address with one of the following commands.
171 |
172 | ----
173 | ibmcloud ks workers cloud-native
174 | ----
175 |
176 | If you have the `jq` CLI tool installed, you can also directly extract the IP address by invoking:
177 |
178 | ----
179 | ibmcloud ks workers cloud-native --json | yq -r '.[0].publicIP'
180 | ----
181 | ====================
182 |
183 | We can then access the service using the node IP address and the node port of the `istio-ingressgateway` service:
184 |
185 | ----
186 | curl :/health -i
187 | ...
188 | curl :/coffee-shop/resources/orders -i
189 | ----
190 |
191 | This scenario works completely without the Kubernetes ingress resource.
192 | Now, only the Istio resources would be required, besides the deployments and services.
193 |
194 | We can similarly use the `/orders` resource to create new coffee orders:
195 |
196 | ----
197 | curl :/coffee-shop/resources/orders -i -XPOST \
198 | -H 'Content-Type: application/json' \
199 | -d '{"type":"Espresso"}'
200 | ----
201 |
202 | [NOTE]
203 | ====================
204 | If we have a paid cluster, we can acquire the gateway IP address through the load balancer IP address of the `istio-ingressgateway` service:
205 |
206 | ----
207 | kubectl get services -n istio-system istio-ingressgateway \
208 | -o jsonpath='{.status.loadBalancer.ingress[0].ip}'
209 | ----
210 |
211 | We use this IP address and the default HTTP ports (`80` or `443`, respectively) to access the application from outside the cluster:
212 |
213 | ----
214 | curl /health -i
215 | ----
216 | ====================
217 |
218 | In theory, this means that both of our services are working as expected and can communicate with each other.
219 | However, this assumption, or observation is hardly enough for a system that runs in production.
220 |
221 | Let's see how Istio improves our observability in the link:05-istio-observability.adoc[next section].
222 |
--------------------------------------------------------------------------------
/workshop/02-docker.adoc:
--------------------------------------------------------------------------------
1 | == Docker
2 |
3 | In order to run our applications in Docker containers, we need Docker images first which are built from ``Dockerfile``s.
4 | Therefore, we create two `Dockerfile` files, one under each project directory.
5 |
6 |
7 | === Docker images
8 |
9 | In order to fetch the required dependencies, we need to execute the following scripts once:
10 |
11 | ----
12 | coffee-shop/liberty/prepare.sh
13 | barista/liberty/prepare.sh
14 | ----
15 |
16 | Let's start with the coffee-shop application.
17 | Under `coffee-shop/`, we create a `Dockerfile` with the following contents:
18 |
19 | [source,Dockerfile]
20 | ----
21 | FROM open-liberty:20.0.0.3-full-java8-openj9
22 |
23 | COPY liberty/extension /liberty/usr/extension/
24 | COPY liberty/server.xml /config/
25 |
26 | COPY target/coffee-shop.war /config/dropins/
27 | ----
28 |
29 | The image declaration after `FROM` specifies the base image for our application.
30 | We use that as a starting ground.
31 | For now, we can assume that this base image already includes an application server, here Open Liberty, and a Java runtime.
32 |
33 | `/config/` and `/config/dropins/` are directories inside the container, where we can place our configuration and deployment artifacts.
34 |
35 | The `COPY` command copies resources from the host to the container, that is, they will end up in the resulting image.
36 | The `liberty/server.xml` file contains the application-specific server configuration including the server features that we want to use in our service.
37 | Since we deploy only a single applications to each running server and thus running container, the server configuration is exclusively used for the individual applications.
38 |
39 | Take a moment to have a brief look at the `liberty/server.xml` file.
40 | We'll come back to it later.
41 | Now, we create a similar `Dockerfile` for the barista application, under `barista/`:
42 |
43 | [source,Dockerfile]
44 | ----
45 | FROM open-liberty:20.0.0.3-full-java8-openj9
46 |
47 | COPY liberty/extension /liberty/usr/extension/
48 | COPY liberty/server.xml /config/
49 |
50 | COPY target/barista.war /config/dropins/
51 | ----
52 |
53 |
54 | === Building
55 |
56 | In order to build the images, we issue the following Docker command: `docker build -t : .` (including the dot at the end).
57 |
58 | For the coffee-shop application this looks as follows:
59 |
60 | ----
61 | cd coffee-shop/
62 | docker build -t de.icr.io/istio-ws-/coffee-shop:1 .
63 | ----
64 |
65 | The image name, here `de.icr.io/istio-ws-/coffee-shop:1`, implicitly contains the location of the image registry, image name, and version (or tag).
66 | If an image with that name is being pulled or pushed, downloaded or uploaded, respectively, Docker thus already knows the destination.
67 |
68 | The `de.icr.io/...` part indicates the used cloud service and our user name and namespace.
69 |
70 | Again, we repeat this step for the barista application:
71 |
72 | ----
73 | cd ../barista/
74 | docker build -t de.icr.io/istio-ws-/barista:1 .
75 | ----
76 |
77 | Congratulations, now we have two images stored locally, for the coffee-shop and barista applications that we can now run as Docker containers!
78 |
79 |
80 | === Running
81 |
82 | If we want, we can already run the created Docker images locally on our machines.
83 |
84 | In order do to that, we have to keep in mind, that the coffee-shop application will connect to the barista backend, thus needs to resolve and access the second running container.
85 | If we have a look at the source code of the coffee-shop application, specifically the `Barista` gateway, we notice that the application will simply connect to `http://barista:9080` -- host name `barista`.
86 |
87 | This will be possible since we specify that our microservices will run in a cloud-native environment, where we can control the environment independent of the applications.
88 | Thus we make sure that our applications will be reachable under logical names, such as `coffee-shop` or `barista`.
89 |
90 | For locally running container, we can assign names to the running containers that are resolvable within other containers that run inside the same local Docker network.
91 | This requires to specify both the container names and the network at runtime.
92 | We start our containers as follows:
93 |
94 | ----
95 | docker run --rm -d \
96 | --name barista \
97 | --network dkrnet \
98 | de.icr.io/istio-ws-/barista:1
99 |
100 | docker run --rm -d \
101 | --name coffee-shop \
102 | --network dkrnet \
103 | -p 9080:9080 \
104 | de.icr.io/istio-ws-/coffee-shop:1
105 | ----
106 |
107 | These commands run our two applications as Docker containers.
108 | The `--rm` flag will remove the containers after they have been stopped, `-d` runs the containers in the background, `--name` specifies the logical container names, `--network` makes sure both containers will run in the correct network, and the `-p` declarations forward the local containers ports to the localhost.
109 |
110 | The two running containers can now resolve and access each other by their logical container names, which allows our coffee-shop application to connect to the barista backend using the `barista` host name.
111 | The reason why we're doing this, is because Kubernetes will support a very similar resolution of logical names.
112 |
113 |
114 | ==== Accessing our applications
115 |
116 | Now we can access and test our running microservices for the first time!
117 | Since they communicate using HTTP we can use any REST client of our choice, for example `curl`.
118 |
119 |
120 | ==== Health check resources
121 |
122 | We could simply ask the application for the coffee orders in the system but for our cloud-native environment we might want a more basic way to check whether our application is up and running.
123 | For that we have health check resources.
124 | We could create health checks by using plain REST resources, for example implemented by JAX-RS.
125 | What's also possible, and done with minimal effort, is to use MicroProfile Health to create HTTP health checks.
126 |
127 | Therefore, we have health resources in both the barista and coffee shop projects, under the `*.boundary` package:
128 |
129 | [source,java]
130 | ----
131 | import org.eclipse.microprofile.health.*;
132 | import javax.enterprise.context.ApplicationScoped;
133 |
134 | @Readiness
135 | @ApplicationScoped
136 | public class HealthResource implements HealthCheck {
137 |
138 | @Override
139 | public HealthCheckResponse call() {
140 | return HealthCheckResponse.named("barista").withData("barista", "ok").up().build();
141 | }
142 | }
143 | ----
144 |
145 | We're able to access the coffee shop health check via the local port `9080` and the default MicroProfile health resource:
146 |
147 | ----
148 | curl localhost:9080/health -i
149 | ----
150 |
151 | This accesses the health check resource and will hopefully give us a successful HTTP response.
152 | The `-i` flag causes the HTTP response headers to be printed.
153 |
154 |
155 | ==== Ordering coffee
156 |
157 | Now, we can finally ask for the coffee orders:
158 |
159 | ----
160 | curl localhost:9080/coffee-shop/resources/orders
161 | ----
162 |
163 | This will give us the coffee orders that are in the system returned as JSON.
164 | No orders have been created, thus the array is empty.
165 |
166 | Let's change this and create a coffee order!
167 |
168 | If we have a look at the JAX-RS resource in the coffee-shop application, we can see that to create a new coffee order, we have to POST a JSON object containing the coffee _type_.
169 | Using `curl` this looks as follows:
170 |
171 | ----
172 | curl localhost:9080/coffee-shop/resources/orders -i -XPOST \
173 | -H 'Content-Type: application/json' \
174 | -d '{"type":"Espresso"}'
175 | ----
176 |
177 | `-XPOST` specifies the `POST` HTTP method, `-H` the HTTP header, so the service knows that we're sending the JSON content type, and `-d` specifies the data that we send as HTTP request body.
178 | Sending this command hopefully yields us a successful `201 Created` response, the information that our coffee order is in the system.
179 |
180 | We can double-check this by querying the resource for all coffee orders again, similar to before, which now should respond with a JSON array that contains our order.
181 |
182 | If that's the case, congratulations!
183 | You've just built, run, and manually tested cloud-native microservices running in Docker containers.
184 |
185 |
186 | === Pushing
187 |
188 | In order to make our Docker images not just locally accessible, we will push them to a container registry in the cloud.
189 | Then we can later pull them from any environment, like a managed Kubernetes cluster.
190 |
191 | We push our Docker images with the following commands:
192 |
193 | ----
194 | docker push de.icr.io/istio-ws-/coffee-shop:1
195 | docker push de.icr.io/istio-ws-/barista:1
196 | ----
197 |
198 | You will notice, that the second `push` commands runs much faster and outputs that almost all layers already exist in the remote repository.
199 | This thanks to the copy-on-write file system which Docker uses internally and save us developers an enormous amount of time and bandwidth.
200 | The same is true for re-building images.
201 | Docker recognizes which commands of the Docker build need to be re-executed, and only performs these and the following.
202 |
203 | This is the reason why especially for cloud-native applications it makes sense to craft thin deployment artifacts.
204 | The WAR files that comprise our applications only contain the business logic that is part of our application, no implementation details.
205 | The base image, i.e. the application server or its configuration doesn't change that frequently, therefore we're mostly shipping our (small) application only.
206 |
207 | Now, that our microservices are running as Docker containers already, let's see how we bring Kubernetes into the game in the link:03-kubernetes.adoc[next section].
208 |
--------------------------------------------------------------------------------
/workshop/03-kubernetes.adoc:
--------------------------------------------------------------------------------
1 | == Kubernetes
2 |
3 | In order to run our applications on a Kubernetes cluster, we will need to create a few resources.
4 | Ultimately, we define our environment declaratively, using Infrastructure-as-Code, similar to what we did using the `Dockerfile` files, but this time for the whole environment.
5 | Thus, we want to define how many replicas of our individual application images we want to run, how they are connected and configured, etc.
6 | All of that as code which should reside in our project.
7 |
8 | Let's examine the resources we need to create.
9 |
10 | === Pods
11 |
12 | A pod is the atomic workload inside Kubernetes clusters.
13 | It is an abstraction over our application instance and contains _usually_ a single container.
14 | That is, that the Docker containers we had in the last part will now run inside Kubernetes pods, one pod per container instance.
15 | If we want to have multiple replicas of our applications, we create multiple pods.
16 |
17 | For your information, the following shows a snippet of a YAML pod definition.
18 | This is not something that we create yet, but make sure that you can follow along the semantics of the definitions.
19 |
20 | [source,yaml]
21 | ----
22 | # ...
23 | metadata:
24 | labels:
25 | app: coffee-shop
26 | spec:
27 | containers:
28 | - name: coffee-shop
29 | image: de.icr.io/istio-ws-/coffee-shop:1
30 | ports:
31 | - containerPort: 9080
32 | restartPolicy: Always
33 | # ...
34 | ----
35 |
36 | The snippet defines the specification of one pod that should contain a single container which is created from our coffee-shop Docker image.
37 |
38 | Kubernetes pods are mortal and once they terminate, they are usually not re-started, but recreated.
39 | To make sure that we don't have to recreate the pod resources manually, we use controllers that manage a desired number of pod replicas for us, here Kubernetes deployments.
40 |
41 | === Deployments
42 |
43 | We create a Kubernetes deployment that manages one or more replicas of our microservices for us.
44 | Have a look at the following Kubernetes deployment definition:
45 |
46 | [source,yaml]
47 | ----
48 | kind: Deployment
49 | apiVersion: apps/v1
50 | metadata:
51 | name: coffee-shop
52 | spec:
53 | selector:
54 | matchLabels:
55 | app: coffee-shop
56 | version: v1
57 | replicas: 1
58 | template:
59 | metadata:
60 | labels:
61 | app: coffee-shop
62 | spec:
63 | containers:
64 | - name: coffee-shop
65 | image: de.icr.io/istio-ws-/coffee-shop:1
66 | ports:
67 | - containerPort: 9080
68 | restartPolicy: Always
69 | ----
70 |
71 | This definition looks similar to the pod snippet before, but it encapsulates the pod as a template, from which it'll create the pod replicas.
72 | The deployment will ensure that the desired number of replicas will be met.
73 | If a pod is terminated, whether deliberately or not, the deployment will automatically create a replacement, that is a new pod.
74 |
75 | If we want to scale our application, change the deployment definition in another way, or deploy a different version (that is Docker image), we simply change the YAML definition and update it against the cluster.
76 | Kubernetes will try to make sure that the new changes will be reflected, while keeping the disruption minimal.
77 | It ships with zero-downtime deployment support out of the box.
78 |
79 |
80 | ==== Life cycle & probes
81 |
82 | A pod that contains a Java application will need a few moments to be fully up-and-running.
83 | Since Kubernetes has no knowledge of the contents of the running container it could only assume that running pods are immediately able to handle traffic.
84 | However, that is not the case and therefore we need to signal once a container is ready to do some meaningful work.
85 |
86 | For that reason, we include liveness and readiness probes into our deployment definition.
87 |
88 | A liveness probe tells Kubernetes whether the whole pod (for us the application server) is still running.
89 | If that is not the case, Kubernetes will immediately terminate and replace it.
90 | A readiness probe indicates whether the pod is ready to do some work, that is, handle traffic.
91 |
92 | There are multiple types of https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes[container probes^].
93 | We use an exec action which can execute an arbitrary binary inside the container.
94 | The `curl` commands will connect to the application servers, and the health check resources, respectively.
95 |
96 | Now, have a look at the resulting deployment definition:
97 |
98 | [source,yaml]
99 | ----
100 | kind: Deployment
101 | apiVersion: apps/v1
102 | metadata:
103 | name: coffee-shop
104 | spec:
105 | selector:
106 | matchLabels:
107 | app: coffee-shop
108 | version: v1
109 | replicas: 1
110 | template:
111 | metadata:
112 | labels:
113 | app: coffee-shop
114 | spec:
115 | containers:
116 | - name: coffee-shop
117 | image: de.icr.io/istio-ws-/coffee-shop:1
118 | ports:
119 | - containerPort: 9080
120 | livenessProbe:
121 | exec:
122 | command: ["sh", "-c", "curl -f http://localhost:9080/"]
123 | initialDelaySeconds: 20
124 | readinessProbe:
125 | exec:
126 | command: ["sh", "-c", "curl -s http://localhost:9080/health | grep -q coffee-shop"]
127 | initialDelaySeconds: 40
128 | restartPolicy: Always
129 | ----
130 |
131 | We create YAML files with this content under the `deployment/` folders of both microservice projects.
132 | One deployment will be called `coffee-shop`, like the one displayed, and the other one `barista`.
133 | Make sure that all names, labels, image, and URL declarations are correct.
134 |
135 | Now, we finally want to create these resources in our Kubernetes cluster.
136 | We simply apply the files with the `kubectl apply` command:
137 |
138 | ----
139 | kubectl apply -f coffee-shop/deployments/
140 | kubectl apply -f barista/deployments/
141 | ----
142 |
143 | The command will apply, that is create or update, all resources that resides under the corresponding directory.
144 |
145 | You can check whether the resources have been created successfully, by querying the current deployments and pods:
146 |
147 | ----
148 | kubectl get pods
149 | kubectl get deployments
150 | ----
151 |
152 | After a short startup phase, you should see two pods, one for coffee-shop and one for barista, that are ready, i.e. `READY: ... 1/1`.
153 |
154 | Now our two applications apparently are running in the cloud, but how to connect to them?
155 |
156 |
157 | === Services
158 |
159 | A Kubernetes service is a logical abstraction over "`applications`" (whatever these are) and the replicas of these.
160 | Services are single points of entry when we want to connect to our microservices.
161 | They act like load balancers and transparently distribute the requests to the individual pods.
162 |
163 | Inside clusters, services are resolvable via a cluster-internal virtual IP address and via DNS by their name.
164 | The latter enables us to simply connect to host names such as `barista`, if a service `barista` exists within the cluster.
165 |
166 | Let's have a look at the coffee-shop service definition:
167 |
168 | [source,yaml]
169 | ----
170 | kind: Service
171 | apiVersion: v1
172 | metadata:
173 | name: coffee-shop
174 | labels:
175 | app: coffee-shop
176 | spec:
177 | selector:
178 | app: coffee-shop
179 | ports:
180 | - port: 9080
181 | name: http
182 | type: NodePort
183 | ----
184 |
185 | The service resource only defines a name, some meta data labels, and where to route traffic to: all pods that match the given selector.
186 | If you have a look at our deployment definitions again, you will see that all pods define an identical `app` label.
187 | This is the connection how the services know, which pods to distribute the requests to.
188 | This service will connect to all pods with label `app: coffee-shop` via port `9080`.
189 | Furthermore, services only connect to pods which are ready.
190 |
191 | Now, we create YAML definitions for the coffee-shop and barista services, also under the `deployment/` directories.
192 | You can either create a new file alongside the deployment definition, or put all Kubernetes resources in a single YAML file, with the resources (that is, YAML objects) being separated by a line of three dashes (`---`).
193 | Again, make sure that the name, label, and selector definition match either the coffee-shop or barista application.
194 |
195 | We create these resources on the cluster as well, by issuing the same commands like before:
196 |
197 | ----
198 | kubectl apply -f coffee-shop/deployments/
199 | kubectl apply -f barista/deployments/
200 | ----
201 |
202 | This is the nice story about declarative Infrastructure-as-Code files: we specify the desired state, and let Kubernetes _apply_ the definitions against the cluster.
203 | Our directories now contain the service definitions, as well.
204 |
205 | You can now verify whether the services have been created correctly:
206 |
207 | ----
208 | kubectl get services
209 | ----
210 |
211 |
212 | === Accessing our applications
213 |
214 | Now, we will connect to our coffee-shop application from outside the cluster.
215 |
216 | If we have created a lite cluster we have to connect to our application via the IP address of the (only) node and the node port of the service.
217 | Therefore, we retrieve the public IP address of our cluster:
218 |
219 | ----
220 | ibmcloud ks workers --cluster cloud-native
221 | ID Public IP Private IP Machine Type State Status Zone Version
222 | kube-xxx 159.122.186.7 10.144.188.64 free normal Ready mil01 1.10.12_1541
223 | ----
224 |
225 | And the node port of our coffee-shop application:
226 |
227 | ----
228 | kubectl get service coffee-shop
229 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
230 | coffee-shop NodePort 172.21.23.149 9080:30995/TCP 2m
231 | ----
232 |
233 | With the example details, we can access our coffee-shop application using the URL `159.122.186.7:30995`, by combining the public IP address and the node port of the service:
234 |
235 | ----
236 | curl :/coffee-shop/resources/orders -i
237 | ----
238 |
239 | NOTE: If you have created a standard cluster, you can use a Kubernetes ingress resources.
240 | However, in this workshop, we'll focus on Istio networking and thus will demonstrate Istio gateway resources instead (part of the next section).
241 |
242 |
243 | ==== Kubernetes Config Maps
244 |
245 | We can define environment variables directly in Kubernetes deployment definitions, or configure them in so called config maps.
246 | A config map is a Kubernetes resources that stores configuration properties in the cluster.
247 | It can be mapped to files or, as in our example, environment variables.
248 |
249 | We create the following Kubernetes YAML definition:
250 |
251 | [source,yaml]
252 | ----
253 | kind: ConfigMap
254 | apiVersion: v1
255 | metadata:
256 | name: coffee-config
257 | data:
258 | location: OFFICE
259 | ----
260 |
261 | This defines the config map `coffee-config`, which contains the property `location` with the value `OFFICE`.
262 |
263 | In order to make that property available to the running pods later on, we include the value in our Kubernetes deployment definition:
264 |
265 | [source,yaml]
266 | ----
267 | # ...
268 | containers:
269 | - name: coffee-shop
270 | image: de.icr.io/istio-ws-/coffee-shop:1
271 | ports:
272 | - containerPort: 9080
273 | env:
274 | - name: location
275 | valueFrom:
276 | configMapKeyRef:
277 | name: coffee-config
278 | key: location
279 | livenessProbe:
280 | # ...
281 | ----
282 |
283 | The above example maps the config map values to environment variables in the pods.
284 | As MicroProfile Config ships with a default config source for environment variables, this property will automatically be available to our application.
285 | Thus, the injected value for the `location` will be the enum value `OFFICE`.
286 |
287 | You can have a look at the coffee order locations under the resource for single coffee orders.
288 | You retrieve the URL of a single coffee order from the response of all orders:
289 |
290 | ----
291 | curl :/coffee-shop/resources/orders
292 | curl :/coffee-shop/resources/orders/
293 | ----
294 |
295 |
296 | === 12 factors
297 |
298 | The https://12factor.net/[12 factors^] of modern software-as-a-service applications describe what aspects developers should take into account.
299 | Have a look at the described factors and contemplate, where we've already covered these aspects by using Enterprise Java with cloud-native technologies.
300 | With MicroProfile and its programming model, combined with Docker and Kubernetes, we can easily build 12-factor microservices.
301 | We'll discuss the impact of the 12 factors together.
302 |
303 | Now, we've setup a Kubernetes environment that orchestrates our microservices.
304 | Let's see how we can integrate Istio in the link:04-istio.adoc[next section].
305 |
--------------------------------------------------------------------------------