├── .gitignore
├── src
├── test
│ ├── resources
│ │ └── org
│ │ │ └── apache
│ │ │ └── kafka
│ │ │ └── connect
│ │ │ └── cli
│ │ │ └── expected-connect-distributed.properties
│ └── java
│ │ └── org
│ │ └── apache
│ │ └── kafka
│ │ └── connect
│ │ └── cli
│ │ └── ConnectDistributedWrapperTest.java
└── main
│ ├── resources
│ ├── log4j2.xml
│ └── connect-distributed.properties
│ └── java
│ └── org
│ └── apache
│ └── kafka
│ └── connect
│ └── cli
│ └── ConnectDistributedWrapper.java
├── .dockerignore
├── chart
├── templates
│ ├── serviceaccount.yaml
│ ├── service.yaml
│ ├── tests
│ │ └── test-connection.yaml
│ ├── hpa.yaml
│ ├── NOTES.txt
│ ├── _helpers.tpl
│ └── deployment.yaml
├── .helmignore
├── Chart.yaml
├── values.yaml
└── README.md
├── Dockerfile.confluent-hub
├── Dockerfile.confluent-hub-alpine
├── .mvn
└── wrapper
│ └── maven-wrapper.properties
├── .github
└── workflows
│ └── maven.yml
├── version-bump.py
├── docker-compose.yml
├── lipsum.txt
├── Makefile
├── docker-compose.cluster.yml
├── mvnw.cmd
├── mvnw
├── LICENSE
├── README.md
└── pom.xml
/.gitignore:
--------------------------------------------------------------------------------
1 | target/
2 |
3 | .mvn/wrapper/maven-wrapper.jar
4 |
5 | .idea/
6 | *.iml
7 |
8 | lsp/
9 | .vscode/
10 |
--------------------------------------------------------------------------------
/src/test/resources/org/apache/kafka/connect/cli/expected-connect-distributed.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=localhost:9092
2 | group.id=junit
3 | key.converter=org.apache.kafka.connect.converters.ByteArrayConverter
4 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter
5 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | *.iml
3 |
4 | .vscode/
5 |
6 | .mvn/
7 | mvnw*
8 | pom.xml
9 | src/
10 | target/
11 |
12 | Makefile
13 |
14 | .git*
15 | .gitignore
16 |
17 | Dockerfile.*
18 | docker-compose.*.yml
19 |
20 | LICENSE
21 | *.txt
22 | *.md
23 |
24 | *.py
25 |
26 | chart/
27 |
--------------------------------------------------------------------------------
/chart/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.serviceAccount.create -}}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ include "kafka-connect.serviceAccountName" . }}
6 | labels:
7 | {{- include "kafka-connect.labels" . | nindent 4 }}
8 | {{- with .Values.serviceAccount.annotations }}
9 | annotations:
10 | {{- toYaml . | nindent 4 }}
11 | {{- end }}
12 | {{- end }}
13 |
--------------------------------------------------------------------------------
/chart/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 |
6 | # Common VCS dirs
7 | .git/
8 | .gitignore
9 |
10 | # Common backup files
11 | *.swp
12 | *.bak
13 | *.tmp
14 | *.orig
15 | *~
16 |
17 | # Various IDEs
18 | .project
19 | .idea/
20 |
21 | .vscode/
22 |
--------------------------------------------------------------------------------
/chart/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ include "kafka-connect.fullname" . }}
5 | labels:
6 | {{- include "kafka-connect.labels" . | nindent 4 }}
7 | spec:
8 | type: {{ .Values.service.type }}
9 | ports:
10 | - port: {{ .Values.service.port }}
11 | targetPort: http
12 | protocol: TCP
13 | name: http
14 | selector:
15 | {{- include "kafka-connect.selectorLabels" . | nindent 4 }}
16 |
--------------------------------------------------------------------------------
/chart/templates/tests/test-connection.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: "{{ include "kafka-connect.fullname" . }}-test-connection"
5 | labels:
6 | {{- include "kafka-connect.labels" . | nindent 4 }}
7 | annotations:
8 | "helm.sh/hook": test
9 | spec:
10 | containers:
11 | - name: wget
12 | image: busybox
13 | command: ['wget']
14 | args: ['{{ include "kafka-connect.fullname" . }}:{{ .Values.service.port }}']
15 | restartPolicy: Never
16 |
--------------------------------------------------------------------------------
/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/Dockerfile.confluent-hub:
--------------------------------------------------------------------------------
1 | FROM cricketeerone/apache-kafka-connect:latest
2 |
3 | # Get curl
4 | RUN apt-get -y update && apt-get install -y \
5 | ca-certificates \
6 | curl \
7 | unzip \
8 | && rm -rf /var/lib/apt/lists/*
9 |
10 | # Install Confluent Hub CLI
11 | RUN mkdir -p /opt/confluent-hub-client \
12 | && curl -vv -kL "https://client.hub.confluent.io/confluent-hub-client-latest.tar.gz" \
13 | | tar -xzv -C /opt/confluent-hub-client
14 | ENV PATH="/opt/confluent-hub-client/bin:${PATH}"
15 |
16 | # Example connector installation
17 | # RUN confluent-hub install --no-prompt \
18 | # --component-dir /app/libs --worker-configs /app/resources/connect-distributed.properties -- \
19 | #
--------------------------------------------------------------------------------
/Dockerfile.confluent-hub-alpine:
--------------------------------------------------------------------------------
1 | FROM cricketeerone/apache-kafka-connect:alpine
2 |
3 | # Get curl and bash needed for confluent-hub
4 | RUN apk update && apk add --no-cache \
5 | ca-certificates \
6 | curl \
7 | unzip \
8 | bash
9 |
10 | # Install Confluent Hub CLI
11 | RUN mkdir -p /opt/confluent-hub-client \
12 | && curl -vv -kL "https://client.hub.confluent.io/confluent-hub-client-latest.tar.gz" \
13 | | tar -xzv -C /opt/confluent-hub-client
14 | ENV PATH="/opt/confluent-hub-client/bin:${PATH}"
15 |
16 | # Example connector installation
17 | # RUN confluent-hub install --no-prompt \
18 | # --component-dir /app/libs --worker-configs /app/resources/connect-distributed.properties -- \
19 | #
--------------------------------------------------------------------------------
/chart/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: kafka-connect
3 | description: A Helm chart for Apache Kafka Connect on Kubernetes
4 | type: application
5 |
6 | # This is the chart version. This version number should be incremented each time you make changes
7 | # to the chart and its templates, including the app version.
8 | # Versions are expected to follow Semantic Versioning (https://semver.org/)
9 | version: 0.1.0
10 |
11 | # This is the version number of the application being deployed. This version number should be
12 | # incremented each time you make changes to the application. Versions are not expected to
13 | # follow Semantic Versioning. They should reflect the version the application is using.
14 | # It is recommended to use it with quotes.
15 | appVersion: "4.0.0"
16 |
--------------------------------------------------------------------------------
/.mvn/wrapper/maven-wrapper.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing,
12 | # software distributed under the License is distributed on an
13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | # KIND, either express or implied. See the License for the
15 | # specific language governing permissions and limitations
16 | # under the License.
17 | wrapperVersion=3.3.2
18 | distributionType=only-script
19 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.9/apache-maven-3.9.9-bin.zip
20 |
--------------------------------------------------------------------------------
/chart/templates/hpa.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.autoscaling.enabled }}
2 | apiVersion: autoscaling/v2
3 | kind: HorizontalPodAutoscaler
4 | metadata:
5 | name: {{ include "kafka-connect.fullname" . }}
6 | labels:
7 | {{- include "kafka-connect.labels" . | nindent 4 }}
8 | spec:
9 | scaleTargetRef:
10 | apiVersion: apps/v1
11 | kind: Deployment
12 | name: {{ include "kafka-connect.fullname" . }}
13 | minReplicas: {{ .Values.autoscaling.minReplicas }}
14 | maxReplicas: {{ .Values.autoscaling.maxReplicas }}
15 | metrics:
16 | {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
17 | - type: Resource
18 | resource:
19 | name: cpu
20 | target:
21 | type: Utilization
22 | averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
23 | {{- end }}
24 | {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
25 | - type: Resource
26 | resource:
27 | name: memory
28 | target:
29 | type: Utilization
30 | averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
31 | {{- end }}
32 | {{- end }}
33 |
--------------------------------------------------------------------------------
/.github/workflows/maven.yml:
--------------------------------------------------------------------------------
1 | # This workflow will build a Java project with Maven
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/building-and-testing-java-with-maven
3 |
4 | name: Java CI with Maven
5 |
6 | on:
7 | push:
8 | branches:
9 | - master
10 | - main
11 | - 'v[0-9].[0-9].[0-9]'
12 |
13 | jobs:
14 | deploy:
15 | if: github.repository == 'OneCricketeer/apache-kafka-connect-docker'
16 | runs-on: ubuntu-latest
17 | env:
18 | BUILDX_PLATFORMS: linux/amd64,linux/arm64
19 | steps:
20 | - uses: actions/checkout@v4
21 |
22 | - name: Set up JDK
23 | uses: actions/setup-java@v4
24 | with:
25 | java-version: '21'
26 | distribution: 'temurin'
27 | cache: maven
28 |
29 | - name: Log in to Docker Hub
30 | uses: docker/login-action@v3
31 | with:
32 | username: cricketeerone
33 | password: ${{ secrets.DOCKER_PASSWORD }}
34 |
35 | - name: Set up QEMU
36 | uses: docker/setup-qemu-action@v3
37 |
38 | - name: Set up Docker Buildx
39 | uses: docker/setup-buildx-action@v3
40 | with:
41 | platforms: ${{ env.BUILDX_PLATFORMS }}
42 |
43 | - name: Build & Push - Alpine
44 | run: make buildx-confluent-hub-alpine
45 |
46 | # overrides 'latest' tag created in previous step
47 | - name: Build & Push - Ubuntu (override latest)
48 | run: make
49 |
--------------------------------------------------------------------------------
/chart/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | 1. Get the application URL by running these commands:
2 |
3 | {{- if contains "NodePort" .Values.service.type }}
4 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kafka-connect.fullname" . }})
5 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
6 | echo http://$NODE_IP:$NODE_PORT
7 | {{- else if contains "LoadBalancer" .Values.service.type }}
8 | NOTE: It may take a few minutes for the LoadBalancer IP to be available.
9 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kafka-connect.fullname" . }}'
10 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kafka-connect.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
11 | echo http://$SERVICE_IP:{{ .Values.service.port }}
12 | {{- else if contains "ClusterIP" .Values.service.type }}
13 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-connect.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
14 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
15 | echo "Visit http://127.0.0.1:8083 to use your application"
16 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8083:$CONTAINER_PORT
17 | {{- end }}
18 |
--------------------------------------------------------------------------------
/version-bump.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import xml.etree.ElementTree as ET
3 |
4 | def __bump(filename, old, new, preserve=None):
5 | lines = []
6 | with open(filename, 'r') as f:
7 | for line in f:
8 | if preserve and not preserve(line.rstrip()):
9 | line = line.replace(old, new)
10 | lines.append(line)
11 | with open(filename, 'w') as f:
12 | for line in lines:
13 | f.write(line)
14 |
15 | def __bump_xhtml(filename, old, new):
16 | __bump(filename, old, new, preserve=lambda s: s.endswith(''))
17 |
18 | def __bump_yaml(filename, old, new):
19 | __bump(filename, old, new, preserve=lambda s: s.endswith('# hold-version'))
20 |
21 | def pom(old, new):
22 | __bump_xhtml('pom.xml', old, new)
23 |
24 | def readme(old, new):
25 | __bump_xhtml('README.md', old, new)
26 |
27 | def docker_compose(old, new):
28 | __bump_yaml('docker-compose.yml', old, new)
29 | __bump_yaml('docker-compose.cluster.yml', old, new)
30 |
31 | def helm(old, new):
32 | __bump_yaml('chart/Chart.yaml', old, new)
33 | __bump_xhtml('chart/README.md', old, new)
34 |
35 | parser = argparse.ArgumentParser(description='Version bumper')
36 |
37 | pom_tree = ET.parse('pom.xml')
38 | pom_version = pom_tree.find('{http://maven.apache.org/POM/4.0.0}version').text
39 |
40 | parser.add_argument('--old', help='Old version. Defaults to parse from pom.xml version field', default=pom_version)
41 | parser.add_argument('--new', help='New Version')
42 | args = parser.parse_args()
43 |
44 | if not args.new:
45 | parser.print_help()
46 | raise ValueError('missing new version argument')
47 |
48 | for f in [pom, readme, docker_compose, helm]:
49 | f(args.old, args.new)
50 |
--------------------------------------------------------------------------------
/chart/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "kafka-connect.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "kafka-connect.fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | Create chart name and version as used by the chart label.
28 | */}}
29 | {{- define "kafka-connect.chart" -}}
30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 |
33 | {{/*
34 | Common labels
35 | */}}
36 | {{- define "kafka-connect.labels" -}}
37 | helm.sh/chart: {{ include "kafka-connect.chart" . }}
38 | {{ include "kafka-connect.selectorLabels" . }}
39 | {{- if .Chart.AppVersion }}
40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
41 | {{- end }}
42 | app.kubernetes.io/managed-by: {{ .Release.Service }}
43 | {{- end }}
44 |
45 | {{/*
46 | Selector labels
47 | */}}
48 | {{- define "kafka-connect.selectorLabels" -}}
49 | app.kubernetes.io/name: {{ include "kafka-connect.name" . }}
50 | app.kubernetes.io/instance: {{ .Release.Name }}
51 | {{- end }}
52 |
53 | {{/*
54 | Create the name of the service account to use
55 | */}}
56 | {{- define "kafka-connect.serviceAccountName" -}}
57 | {{- if .Values.serviceAccount.create }}
58 | {{- default (include "kafka-connect.fullname" .) .Values.serviceAccount.name }}
59 | {{- else }}
60 | {{- default "default" .Values.serviceAccount.name }}
61 | {{- end }}
62 | {{- end }}
63 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | x-connect-image: &connect-image cricketeerone/apache-kafka-connect:4.0.0
4 |
5 | x-connect: &connect-vars
6 | CONNECT_BOOTSTRAP_SERVERS: kafka:29092
7 |
8 | CONNECT_GROUP_ID: cg_connect-jib
9 | CONNECT_CONFIG_STORAGE_TOPIC: connect-jib_config
10 | CONNECT_OFFSET_STORAGE_TOPIC: connect-jib_offsets
11 | CONNECT_STATUS_STORAGE_TOPIC: connect-jib_status
12 | # Cannot be higher than the number of brokers in the Kafka cluster
13 | CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
14 | CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
15 | CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
16 | # Defaults for all connectors
17 | CONNECT_KEY_CONVERTER: org.apache.kafka.connect.converters.ByteArrayConverter
18 | CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.converters.ByteArrayConverter
19 | # Where Jib places classes
20 | CONNECT_PLUGIN_PATH: /app/libs
21 |
22 | # Connect client overrides
23 | CONNECT_TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS: 30000
24 | CONNECT_OFFSET_FLUSH_INTERVAL_MS: 900000
25 | # Connect consumer overrides
26 | CONNECT_CONSUMER_MAX_POLL_RECORDS: 500
27 |
28 | services:
29 | kafka:
30 | image: bitnami/kafka:4.0.0
31 | restart: unless-stopped
32 | ports:
33 | - '9092:9092'
34 | volumes:
35 | - 'kafka_data:/bitnami/kafka'
36 | - $PWD/lipsum.txt:/data/lipsum.txt:ro # Some data to produce
37 | environment:
38 | BITNAMI_DEBUG: yes
39 | ALLOW_PLAINTEXT_LISTENER: yes
40 | # BEGIN: Kraft
41 | KAFKA_ENABLE_KRAFT: yes
42 | KAFKA_CFG_PROCESS_ROLES: controller,broker
43 | KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER
44 | KAFKA_CFG_NODE_ID: 1
45 | KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 1@kafka:9093
46 | # END: Kraft
47 | KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true'
48 | KAFKA_CFG_LOG_RETENTION_HOURS: 48 # 2 days of retention for demo purposes
49 | # https://rmoff.net/2018/08/02/kafka-listeners-explained/
50 | KAFKA_CFG_LISTENERS: INTERNAL://:29092,CONTROLLER://:9093,EXTERNAL://0.0.0.0:9092
51 | KAFKA_CFG_INTER_BROKER_LISTENER_NAME: INTERNAL
52 | KAFKA_CFG_ADVERTISED_LISTENERS: INTERNAL://kafka:29092,EXTERNAL://127.0.0.1:9092
53 | KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT
54 |
55 | # Jib app
56 | connect-jib-1:
57 | image: *connect-image
58 | hostname: connect-jib-1
59 | depends_on:
60 | - kafka
61 | ports:
62 | - '8083:8083'
63 | environment:
64 | <<: *connect-vars
65 | CONNECT_REST_ADVERTISED_HOST_NAME: connect-jib-1
66 |
67 | volumes:
68 | kafka_data:
69 | driver: local
70 |
--------------------------------------------------------------------------------
/lipsum.txt:
--------------------------------------------------------------------------------
1 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque finibus purus in ipsum venenatis sodales. Aliquam facilisis, lectus a elementum convallis, odio est sagittis tellus, quis ultricies augue dui quis erat. Ut lacinia lacinia erat, sed dignissim dolor dignissim quis. Donec hendrerit ultricies pharetra. Morbi fermentum quam nunc, a pharetra turpis mollis sit amet. Integer euismod finibus urna non vulputate. Quisque at feugiat purus, semper sagittis ligula. Interdum et malesuada fames ac ante ipsum primis in faucibus. Vestibulum sollicitudin bibendum enim. Donec rhoncus semper vehicula. Pellentesque vel urna vehicula, blandit metus vitae, convallis ligula.
2 |
3 | Morbi lobortis vulputate dui, vitae aliquet lectus consectetur ac. Curabitur eget pellentesque elit, quis venenatis nibh. Duis sem tortor, laoreet quis turpis ac, eleifend cursus sem. Aenean non ante vitae magna pellentesque auctor a id arcu. Morbi sem sem, luctus finibus dignissim ac, elementum ac nunc. Ut pharetra eu turpis at consequat. Integer ut ipsum eget ligula viverra ultrices. Sed vel urna sed est condimentum mollis faucibus sit amet metus. Nunc eu venenatis neque. Sed posuere nec arcu non dictum. Curabitur non erat lobortis, facilisis tellus quis, tincidunt tellus. Mauris finibus ante nec varius ornare. Nulla a metus posuere, laoreet neque non, tristique nibh. Nulla maximus maximus euismod. Maecenas elit leo, aliquet et cursus a, varius nec eros. Donec tempor risus ac erat ullamcorper efficitur.
4 |
5 | Curabitur eget quam quam. Aliquam et dignissim nunc. Maecenas nec volutpat tellus. Nam non risus eu tortor efficitur placerat. Nullam a lobortis tellus. Ut venenatis nisi lectus, quis dictum enim consequat a. Integer at elementum quam.
6 |
7 | Morbi eu pharetra dolor. Cras pellentesque dictum porta. Donec elementum iaculis auctor. Praesent dui odio, condimentum quis metus sed, tempor accumsan turpis. Nam nibh magna, elementum vel arcu viverra, maximus mattis mi. Proin posuere ligula massa, in bibendum eros porta eget. Aliquam faucibus urna dapibus euismod condimentum. Ut vulputate, tortor non lobortis venenatis, ligula ligula tempor leo, eget laoreet nulla ligula non eros. Nulla varius porttitor nisl at tristique. Etiam semper velit faucibus, malesuada felis gravida, elementum risus. Quisque feugiat condimentum feugiat. Proin in tristique eros. Aliquam sed cursus augue, non tristique massa. Nunc lacinia nisl non erat sollicitudin suscipit. Pellentesque sollicitudin sodales dui quis porttitor.
8 |
9 | Nullam mauris sapien, vestibulum et cursus at, placerat vel ante. Aenean pharetra neque sed sapien suscipit, non sollicitudin urna sagittis. Curabitur et laoreet nulla. Proin mi quam, eleifend ut dictum pulvinar, tempus sed nulla. Proin ut odio ac enim sagittis dapibus. Sed eu orci ac nisi ultrices condimentum et ac libero. Donec id tortor sed velit tempus sollicitudin ut id eros. Duis fringilla, est quis aliquet pulvinar, lacus magna fringilla elit, quis placerat metus magna ac tortor. Ut malesuada, metus ut semper scelerisque, massa diam tincidunt dui, sed blandit eros magna sit amet elit. Integer semper lorem et eros efficitur condimentum. Praesent at sem tempor odio faucibus porttitor. In venenatis eros urna, ac varius justo gravida non. Aenean vitae risus velit. Interdum et malesuada fames ac ante ipsum primis in faucibus. Aliquam nec lobortis sapien, ac consequat sem. Aliquam varius lectus id nisl sodales, id scelerisque nisl bibendum.
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | DOCKER_REGISTRY ?=
2 | DOCKER_USER ?= cricketeerone
3 | DOCKER_IMAGE ?= apache-kafka-connect
4 | DOCKER_FQN = $(DOCKER_REGISTRY)$(DOCKER_USER)/$(DOCKER_IMAGE)
5 | VERSION = $(shell ./mvnw org.apache.maven.plugins:maven-help-plugin:3.4.0:evaluate -Dexpression=project.version -q -DforceStdout)
6 |
7 | DOCKER_TAG_CONFLUENT_HUB = confluent-hub
8 | DOCKERFILE_CONFLUENT_HUB = Dockerfile.$(DOCKER_TAG_CONFLUENT_HUB)
9 |
10 | # Override with 'linux/amd64,linux/arm64' to do multi-platform builds
11 | # Requires running 'docker buildx create --use' to do multi-platform
12 | # ref. https://www.docker.com/blog/how-to-rapidly-build-multi-architecture-images-with-buildx/
13 | BUILDX_PLATFORMS ?= linux/amd64
14 | BUILDX_DO_PUSH ?= 1
15 |
16 | # Defaults to build and push. Requires 'docker login'.
17 | # Other supported option: 'compile jib:dockerBuild'
18 | MVN_BUILD_CMD ?= compile jib:build
19 | MAVEN = ./mvnw -B --errors --file pom.xml clean $(MVN_BUILD_CMD)
20 |
21 | # Supports arm64; builds and pushes. Refer blog above for setup
22 | # Requires 'docker login'
23 | ifneq (,$(findstring arm64,$(BUILDX_PLATFORMS)))
24 | ifeq ($(BUILDX_DO_PUSH),1)
25 | BUILDX_PUSH = --push --platform=$(BUILDX_PLATFORMS)
26 | endif
27 | buildx-confluent-hub: build-multi-arch
28 | @docker buildx build -f $(DOCKERFILE_CONFLUENT_HUB) -t $(DOCKER_FQN):$(VERSION)-$(DOCKER_TAG_CONFLUENT_HUB) $(BUILDX_PUSH) .
29 | @docker buildx build -f $(DOCKERFILE_CONFLUENT_HUB) -t $(DOCKER_FQN):latest-$(DOCKER_TAG_CONFLUENT_HUB) $(BUILDX_PUSH) .
30 | buildx-confluent-hub-alpine: build-multi-arch-alpine
31 | @docker buildx build -f $(DOCKERFILE_CONFLUENT_HUB)-alpine -t $(DOCKER_FQN):$(VERSION)-alpine-$(DOCKER_TAG_CONFLUENT_HUB) $(BUILDX_PUSH) .
32 | @docker buildx build -f $(DOCKERFILE_CONFLUENT_HUB)-alpine -t $(DOCKER_FQN):alpine-$(DOCKER_TAG_CONFLUENT_HUB) $(BUILDX_PUSH) .
33 | else
34 | build-confluent-hub: build
35 | @docker build -f $(DOCKERFILE_CONFLUENT_HUB) -t $(DOCKER_FQN):$(VERSION)-$(DOCKER_TAG_CONFLUENT_HUB) .
36 | @docker tag $(DOCKER_FQN):$(VERSION)-$(DOCKER_TAG_CONFLUENT_HUB) $(DOCKER_FQN):latest-$(DOCKER_TAG_CONFLUENT_HUB)
37 | build-confluent-hub-alpine: build-alpine
38 | @docker build -f $(DOCKERFILE_CONFLUENT_HUB)-alpine -t $(DOCKER_FQN):$(VERSION)-alpine-$(DOCKER_TAG_CONFLUENT_HUB) .
39 | @docker tag $(DOCKER_FQN):$(VERSION)-alpine-$(DOCKER_TAG_CONFLUENT_HUB) $(DOCKER_FQN):alpine-$(DOCKER_TAG_CONFLUENT_HUB)
40 | endif
41 |
42 | build: # default machine architecture build
43 | @$(MAVEN)
44 | build-alpine:
45 | @$(MAVEN) -Palpine-temurin
46 | ifneq (,$(findstring arm64,$(BUILDX_PLATFORMS)))
47 | build-multi-arch: # refer pom.xml for built platforms
48 | @$(MAVEN) -Pubuntu,ubuntu-multi-arch
49 | build-multi-arch-alpine: # refer pom.xml for built platforms
50 | @$(MAVEN) -Palpine-multi-arch
51 | endif
52 |
53 | # required targets if using `mvn jib:dockerBuild`
54 | push: build-confluent-hub
55 | ifneq (jib:build,$(findstring jib:build,$(MVN_BUILD_CMD)))
56 | @docker push $(DOCKER_FQN):latest
57 | @docker push $(DOCKER_FQN):$(VERSION)
58 | endif
59 | @docker push $(DOCKER_FQN):latest-$(DOCKER_TAG_CONFLUENT_HUB)
60 | @docker push $(DOCKER_FQN):$(VERSION)-$(DOCKER_TAG_CONFLUENT_HUB)
61 | push-alpine: build-confluent-hub-alpine # separated command as jib is overriding 'latest' tag
62 | ifneq (jib:build,$(findstring jib:build,$(MVN_BUILD_CMD)))
63 | @docker push $(DOCKER_FQN):alpine
64 | @docker push $(DOCKER_FQN):$(VERSION)-alpine
65 | endif
66 | @docker push $(DOCKER_FQN):alpine-$(DOCKER_TAG_CONFLUENT_HUB)
67 | @docker push $(DOCKER_FQN):$(VERSION)-alpine-$(DOCKER_TAG_CONFLUENT_HUB)
68 |
69 | clean:
70 | @docker rmi -f $(DOCKER_FQN):latest
71 | @docker rmi -f $(DOCKER_FQN):latest-$(DOCKER_TAG_CONFLUENT_HUB)
72 | @docker rmi -f $(DOCKER_FQN):$(VERSION)
73 | @docker rmi -f $(DOCKER_FQN):$(VERSION)-$(DOCKER_TAG_CONFLUENT_HUB)
74 | @docker rmi -f $(DOCKER_FQN):alpine
75 | @docker rmi -f $(DOCKER_FQN):alpine-$(DOCKER_TAG_CONFLUENT_HUB)
76 | @docker rmi -f $(DOCKER_FQN):$(VERSION)-alpine
77 | @docker rmi -f $(DOCKER_FQN):$(VERSION)-alpine-$(DOCKER_TAG_CONFLUENT_HUB)
--------------------------------------------------------------------------------
/docker-compose.cluster.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | x-connect-image: &connect-image cricketeerone/apache-kafka-connect:4.0.0
4 |
5 | x-connect: &connect-vars
6 | CONNECT_BOOTSTRAP_SERVERS: kafka:29092
7 |
8 | CONNECT_GROUP_ID: cg_connect-jib
9 | CONNECT_CONFIG_STORAGE_TOPIC: connect-jib_config
10 | CONNECT_OFFSET_STORAGE_TOPIC: connect-jib_offsets
11 | CONNECT_STATUS_STORAGE_TOPIC: connect-jib_status
12 | # Cannot be higher than the number of brokers in the Kafka cluster
13 | CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
14 | CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
15 | CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
16 | # Defaults for all connectors
17 | CONNECT_KEY_CONVERTER: org.apache.kafka.connect.converters.ByteArrayConverter
18 | CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.converters.ByteArrayConverter
19 | # Where Jib places classes
20 | CONNECT_PLUGIN_PATH: /app/libs
21 |
22 | # Connect client overrides
23 | CONNECT_TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS: 30000
24 | CONNECT_OFFSET_FLUSH_INTERVAL_MS: 900000
25 | # Connect consumer overrides
26 | CONNECT_CONSUMER_MAX_POLL_RECORDS: 500
27 |
28 | x-kraft: &kraft-vars
29 | KAFKA_KRAFT_CLUSTER_ID: WNfE3WMTRRGBs35BikbfRg # Run 'kafka-storage random-uuid'
30 | BITNAMI_DEBUG: yes
31 | ALLOW_PLAINTEXT_LISTENER: yes
32 | KAFKA_ENABLE_KRAFT: yes
33 | KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER
34 | KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true'
35 | KAFKA_CFG_LOG_RETENTION_HOURS: 48 # 2 days of retention for demo purposes
36 |
37 | services:
38 | kafka-controller:
39 | image: &kafka-image bitnami/kafka:4.0.0
40 | restart: unless-stopped
41 | volumes:
42 | - 'kafka_controller_data:/bitnami/kafka'
43 | environment:
44 | <<: *kraft-vars
45 | KAFKA_CFG_PROCESS_ROLES: controller
46 | KAFKA_CFG_NODE_ID: 1
47 | KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT
48 | KAFKA_CFG_LISTENERS: CONTROLLER://:9093
49 | KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: &kraft-quorum 1@kafka-controller:9093
50 |
51 | kafka:
52 | image: *kafka-image
53 | restart: unless-stopped
54 | depends_on:
55 | - kafka-controller
56 | volumes:
57 | - 'kafka_data:/bitnami/kafka'
58 | - $PWD/lipsum.txt:/data/lipsum.txt:ro # Some data to produce
59 | ports:
60 | - 9092:9092
61 | environment:
62 | <<: *kraft-vars
63 | KAFKA_CFG_PROCESS_ROLES: broker
64 | KAFKA_CFG_NODE_ID: 10 # cannot conflict with controllers
65 | KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: *kraft-quorum
66 | KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT
67 | KAFKA_CFG_INTER_BROKER_LISTENER_NAME: INTERNAL
68 | # https://rmoff.net/2018/08/02/kafka-listeners-explained/
69 | KAFKA_CFG_LISTENERS: INTERNAL://:29092,EXTERNAL://0.0.0.0:9092
70 | KAFKA_CFG_ADVERTISED_LISTENERS: INTERNAL://kafka:29092,EXTERNAL://127.0.0.1:9092
71 |
72 | # Jib app
73 | connect-jib-1:
74 | image: *connect-image
75 | hostname: connect-jib-1
76 | labels:
77 | - traefik.enable=true
78 | - "traefik.http.routers.connect-jib-1.rule=Host(`connect-jib.docker.localhost`)"
79 | - traefik.http.services.connect-jib-1.loadbalancer.server.port=8083
80 | depends_on:
81 | - kafka
82 | environment:
83 | <<: *connect-vars
84 | CONNECT_REST_ADVERTISED_HOST_NAME: connect-jib-1
85 |
86 | connect-jib-2:
87 | image: *connect-image
88 | hostname: connect-jib-2
89 | labels:
90 | - traefik.enable=true
91 | - "traefik.http.routers.connect-jib-2.rule=Host(`connect-jib.docker.localhost`)"
92 | - traefik.http.services.connect-jib-2.loadbalancer.server.port=8083
93 | depends_on:
94 | - kafka
95 | environment:
96 | <<: *connect-vars
97 | CONNECT_REST_ADVERTISED_HOST_NAME: connect-jib-2
98 |
99 | reverse-proxy:
100 | image: traefik:v2.10
101 | # Enables the web UI and tells Traefik to listen to docker
102 | command: --api.insecure=true --providers.docker --providers.docker.exposedByDefault=false
103 | ports:
104 | # The HTTP port
105 | - "80:80"
106 | # The Web UI (enabled by --api.insecure=true)
107 | - "8080:8080"
108 | volumes:
109 | # So that Traefik can listen to the Docker events
110 | - /var/run/docker.sock:/var/run/docker.sock
111 |
112 | volumes:
113 | kafka_controller_data:
114 | driver: local
115 | kafka_data:
116 | driver: local
117 |
--------------------------------------------------------------------------------
/chart/values.yaml:
--------------------------------------------------------------------------------
1 | global:
2 | imagePullSecrets: []
3 |
4 | replicaCount: 1
5 |
6 | image:
7 | repository: cricketeerone/apache-kafka-connect
8 | pullPolicy: IfNotPresent
9 | # Overrides the image tag whose default is the chart appVersion.
10 | tag: ""
11 |
12 | nameOverride: ""
13 | fullnameOverride: ""
14 |
15 | serviceAccount:
16 | # Specifies whether a service account should be created
17 | create: true
18 | # Annotations to add to the service account
19 | annotations: {}
20 | # The name of the service account to use.
21 | # If not set and create is true, a name is generated using the fullname template
22 | name: ""
23 |
24 | podAnnotations: {}
25 |
26 | podSecurityContext: {}
27 | # fsGroup: 2000
28 |
29 | ## See `kubectl explain deployment.spec.strategy` for more
30 | ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
31 | deploymentStrategy:
32 | type: RollingUpdate
33 |
34 | securityContext: {}
35 | # capabilities:
36 | # drop:
37 | # - ALL
38 | # readOnlyRootFilesystem: true
39 | # runAsNonRoot: true
40 | # runAsUser: 1000
41 |
42 | service:
43 | type: ClusterIP
44 | port: 8083
45 |
46 | resources: {}
47 | # We usually recommend not to specify default resources and to leave this as a conscious
48 | # choice for the user. This also increases chances charts run on environments with little
49 | # resources, such as Minikube. If you do want to specify resources, uncomment the following
50 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
51 | # limits:
52 | # cpu: 100m
53 | # memory: 128Mi
54 | # requests:
55 | # cpu: 100m
56 | # memory: 128Mi
57 |
58 | ## List of volumeMounts for connect server container
59 | ## ref: https://kubernetes.io/docs/concepts/storage/volumes/
60 | volumeMounts:
61 | # - name: credentials
62 | # mountPath: /etc/creds-volume
63 |
64 | ## List of volumeMounts for connect server container
65 | ## ref: https://kubernetes.io/docs/concepts/storage/volumes/
66 | volumes:
67 | # - name: credentials
68 | # secret:
69 | # secretName: creds
70 |
71 | ## Kafka Connect JVM Heap Option
72 | heapOptions: "-Xms256M -Xmx2G"
73 |
74 | ## Kafka Connect properties
75 | ## ref: https://kafka.apache.org/documentation/#connectconfigs
76 |
77 | # bootstrapServers defines the Kafka bootstrap.servers to connect to
78 | bootstrapServers: ""
79 | # groupId sets the Kafka Connect cluster group.id
80 | groupId: ""
81 | # configurationOverrides defines additional connect-distributed.properties files
82 | configurationOverrides:
83 | "plugin.path": "/app/libs"
84 | "key.converter": "org.apache.kafka.connect.converters.ByteArrayConverter"
85 | "value.converter": "org.apache.kafka.connect.converters.ByteArrayConverter"
86 | "config.storage.replication.factor": "3"
87 | "offset.storage.replication.factor": "3"
88 | "status.storage.replication.factor": "3"
89 |
90 | jmx:
91 | enabled: false
92 | port: 5555
93 |
94 | ## Additional env variables
95 | customEnv: {}
96 |
97 | ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated.
98 | ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core
99 | ## Renders in container spec as:
100 | ## env:
101 | ## ...
102 | ## - name:
103 | ## valueFrom:
104 | ##
105 | envValueFrom: {}
106 | # ENV_NAME:
107 | # configMapKeyRef:
108 | # name: configmap-name
109 | # key: value_key
110 |
111 | ## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
112 | ## This can be useful for auth tokens, etc. Value is templated.
113 | envFromSecret: ""
114 |
115 | ## Sensible environment variables that will be rendered as new secret object
116 | ## This can be useful for auth tokens, etc.
117 | ## If the secret values contains "{{", they'll need to be properly escaped so that they are not interpreted by Helm
118 | ## ref: https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function
119 | envRenderSecret: {}
120 |
121 | ## The names of secrets in the same kubernetes namespace which contain values to be added to the environment
122 | ## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key.
123 | ## Name is templated.
124 | envFromSecrets: []
125 | ## - name: secret-name
126 | ## optional: true
127 |
128 | autoscaling:
129 | enabled: false
130 | minReplicas: 1
131 | maxReplicas: 10
132 | targetCPUUtilizationPercentage: 80
133 | # targetMemoryUtilizationPercentage: 80
134 |
135 | nodeSelector: {}
136 |
137 | tolerations: []
138 |
139 | affinity: {}
140 |
--------------------------------------------------------------------------------
/src/main/java/org/apache/kafka/connect/cli/ConnectDistributedWrapper.java:
--------------------------------------------------------------------------------
1 | package org.apache.kafka.connect.cli;
2 |
3 | import org.slf4j.Logger;
4 | import org.slf4j.LoggerFactory;
5 |
6 | import java.io.File;
7 | import java.io.FileOutputStream;
8 | import java.io.IOException;
9 | import java.io.PrintWriter;
10 | import java.util.AbstractMap;
11 | import java.util.Map;
12 | import java.util.function.Predicate;
13 |
14 | /**
15 | * Wrapper class for starting connect-distributed using 'CONNECT_' properties from
16 | * {@link System#getenv()}
17 | **/
18 | public class ConnectDistributedWrapper implements Runnable {
19 |
20 | private static final Logger log = LoggerFactory.getLogger(ConnectDistributedWrapper.class);
21 |
22 | /**
23 | * Environment variables for Kafka Connect properties start with 'CONNECT_',
24 | * then are upper-cased and separated with underscores instead of periods.
25 | */
26 | static final String CONNECT_ENV_PREFIX = "CONNECT_";
27 |
28 | /**
29 | * Predicate for filtering environment variables.
30 | */
31 | private static final Predicate> CONNECT_ENV_FILTER =
32 | e -> e.getKey().startsWith(CONNECT_ENV_PREFIX);
33 |
34 | public static void main(final String[] args) {
35 | log.debug("Starting Connect Wrapper");
36 | final ConnectDistributedWrapper wrapper = new ConnectDistributedWrapper();
37 | Runtime.getRuntime().addShutdownHook(new Thread(wrapper::stop));
38 | wrapper.run();
39 | }
40 |
41 | /**
42 | * Take an environment variable starting with 'CONNECT_' and convert it into a
43 | * {@link org.apache.kafka.connect.runtime.WorkerConfig} or
44 | * {@link org.apache.kafka.connect.runtime.ConnectorConfig} value.
45 | *
46 | * @param k An Environment variable key
47 | * @return A config value from {@link org.apache.kafka.connect.runtime.WorkerConfig}
48 | * or {@link org.apache.kafka.connect.runtime.ConnectorConfig}
49 | */
50 | static String connectEnvVarToProp(String k) {
51 | if (k == null || k.isEmpty()) {
52 | throw new IllegalArgumentException("Input cannot be null or empty");
53 | }
54 | final int prefixLength = CONNECT_ENV_PREFIX.length();
55 | if (k.length() < prefixLength || k.equals(CONNECT_ENV_PREFIX)) {
56 | throw new IllegalArgumentException(String.format(
57 | "Input does not start with '%s' or does not define a property", CONNECT_ENV_PREFIX));
58 | }
59 | return k.toLowerCase().substring(prefixLength).replace('_', '.');
60 | }
61 |
62 | /**
63 | * Write all Environment variables starting with 'CONNECT_' into a temporary property file to be
64 | * used with {@link ConnectDistributed}.
65 | *
66 | * @param env A Map containing key-value pairs. Any key's starting with 'CONNECT_' will end up in the output file.
67 | * @return A {@link File} instance to be used with {@link ConnectDistributed#main(String[])}
68 | * @throws IOException If the property file cannot be created.
69 | */
70 | static File createConnectProperties(Map env) throws IOException {
71 | if (env == null || env.isEmpty()) {
72 | throw new IllegalArgumentException("Provided argument cannot be null or empty");
73 | }
74 | final File workerPropFile = File.createTempFile("tmp-connect-distributed", ".properties");
75 | workerPropFile.deleteOnExit();
76 | try (PrintWriter pw = new PrintWriter(new FileOutputStream(workerPropFile))) {
77 | log.trace("Writing Connect worker properties '{}'", workerPropFile.getAbsolutePath());
78 | env.entrySet()
79 | .stream()
80 | .filter(CONNECT_ENV_FILTER)
81 | .map(e -> new AbstractMap.SimpleEntry<>(connectEnvVarToProp(e.getKey()), e.getValue()))
82 | .forEach(e -> {
83 | final String k = e.getKey();
84 | final String v = e.getValue();
85 | log.debug("{}={}", k, v);
86 | pw.printf("%s=%s%n", k, v);
87 | });
88 | pw.flush();
89 | log.trace("Connect worker properties written");
90 | return workerPropFile;
91 | }
92 | }
93 |
94 | @Override
95 | public void run() {
96 | try {
97 | ConnectDistributed.main(new String[]{createConnectProperties(System.getenv()).getAbsolutePath()});
98 | } catch (Exception e) {
99 | log.error("Error starting {}", ConnectDistributed.class.getSimpleName(), e);
100 | }
101 | }
102 |
103 | private void stop() {
104 | log.debug("Stopping Connect Wrapper");
105 | }
106 | }
107 |
--------------------------------------------------------------------------------
/chart/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: {{ include "kafka-connect.fullname" . }}
5 | labels:
6 | {{- include "kafka-connect.labels" . | nindent 4 }}
7 | spec:
8 | {{- if not .Values.autoscaling.enabled }}
9 | replicas: {{ .Values.replicaCount }}
10 | {{- end }}
11 | selector:
12 | matchLabels:
13 | {{- include "kafka-connect.selectorLabels" . | nindent 6 }}
14 | {{- with .Values.deploymentStrategy }}
15 | strategy:
16 | {{- toYaml . | trim | nindent 4 }}
17 | {{- end }}
18 | template:
19 | metadata:
20 | {{- with .Values.podAnnotations }}
21 | annotations:
22 | {{- toYaml . | nindent 8 }}
23 | {{- end }}
24 | labels:
25 | {{- include "kafka-connect.selectorLabels" . | nindent 8 }}
26 | spec:
27 | {{- with .Values.global.imagePullSecrets }}
28 | imagePullSecrets:
29 | {{- toYaml . | nindent 8 }}
30 | {{- end }}
31 | serviceAccountName: {{ include "kafka-connect.serviceAccountName" . }}
32 | securityContext:
33 | {{- toYaml .Values.podSecurityContext | nindent 8 }}
34 | containers:
35 | - name: {{ .Chart.Name }}
36 | securityContext:
37 | {{- toYaml .Values.securityContext | nindent 12 }}
38 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
39 | imagePullPolicy: {{ .Values.image.pullPolicy }}
40 | ports:
41 | - name: http
42 | containerPort: {{ .Values.service.port }}
43 | protocol: TCP
44 | {{- if .Values.jmx.enabled }}
45 | - containerPort: {{ .Values.jmx.port }}
46 | name: jmx
47 | {{- end }}
48 | livenessProbe:
49 | httpGet:
50 | path: /
51 | port: http
52 | readinessProbe:
53 | httpGet:
54 | path: /
55 | port: http
56 | resources:
57 | {{- toYaml .Values.resources | nindent 12 }}
58 | {{- if .Values.volumeMounts }}
59 | volumeMounts:
60 | {{- toYaml .Values.volumeMounts | nindent 12 }}
61 | {{- end }}
62 | env:
63 | - name: CONNECT_REST_ADVERTISED_HOST_NAME
64 | valueFrom:
65 | fieldRef:
66 | fieldPath: status.podIP
67 | - name: CONNECT_BOOTSTRAP_SERVERS
68 | value: {{ required "The bootstrapServers cannot be empty." .Values.bootstrapServers }}
69 | - name: CONNECT_GROUP_ID
70 | value: {{ required "The groupId cannot be empty." .Values.groupId }}
71 | - name: CONNECT_CONFIG_STORAGE_TOPIC
72 | value: {{ template "kafka-connect.fullname" . }}-config
73 | - name: CONNECT_OFFSET_STORAGE_TOPIC
74 | value: {{ template "kafka-connect.fullname" . }}-offset
75 | - name: CONNECT_STATUS_STORAGE_TOPIC
76 | value: {{ template "kafka-connect.fullname" . }}-status
77 | - name: KAFKA_HEAP_OPTS
78 | value: "{{ .Values.heapOptions }}"
79 | {{- range $key, $value := .Values.configurationOverrides }}
80 | - name: {{ printf "CONNECT_%s" $key | replace "." "_" | upper | quote }}
81 | value: {{ $value | quote }}
82 | {{- end }}
83 | {{- range $key, $value := .Values.envValueFrom }}
84 | - name: {{ $key | quote }}
85 | valueFrom:
86 | {{- tpl (toYaml $value) $ | nindent 10 }}
87 | {{- end }}
88 | {{- range $key, $value := .Values.customEnv }}
89 | - name: {{ $key | quote }}
90 | value: {{ $value | quote }}
91 | {{- end }}
92 | {{- if and .Values.jmx.enabled .Values.jmx.port }}
93 | - name: KAFKA_JMX_PORT
94 | value: "{{ .Values.jmx.port }}"
95 | {{- end }}
96 | {{- if or .Values.envFromSecret (or .Values.envRenderSecret .Values.envFromSecrets) }}
97 | envFrom:
98 | {{- if .Values.envFromSecret }}
99 | - secretRef:
100 | name: {{ tpl .Values.envFromSecret . }}
101 | {{- end }}
102 | {{- if .Values.envRenderSecret }}
103 | - secretRef:
104 | name: {{ include "kafka-connect.fullname" . }}-env
105 | {{- end }}
106 | {{- range .Values.envFromSecrets }}
107 | - secretRef:
108 | name: {{ tpl .name $ }}
109 | optional: {{ .optional | default false }}
110 | {{- end }}
111 | {{- end }}
112 | {{- with .Values.nodeSelector }}
113 | nodeSelector:
114 | {{- toYaml . | nindent 8 }}
115 | {{- end }}
116 | {{- with .Values.affinity }}
117 | affinity:
118 | {{- toYaml . | nindent 8 }}
119 | {{- end }}
120 | {{- with .Values.tolerations }}
121 | tolerations:
122 | {{- toYaml . | nindent 8 }}
123 | {{- end }}
124 |
--------------------------------------------------------------------------------
/src/main/resources/connect-distributed.properties:
--------------------------------------------------------------------------------
1 | ##
2 | # Licensed to the Apache Software Foundation (ASF) under one or more
3 | # contributor license agreements. See the NOTICE file distributed with
4 | # this work for additional information regarding copyright ownership.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | ##
17 |
18 | # This file contains some of the configurations for the Kafka Connect distributed worker. This file is intended
19 | # to be used with the examples, and some settings may differ from those used in a production system, especially
20 | # the `bootstrap.servers` and those specifying replication factors.
21 |
22 | # A list of host/port pairs to use for establishing the initial connection to the Kafka cluster.
23 | bootstrap.servers=kafka:9092
24 |
25 | # unique name for the cluster, used in forming the Connect cluster group. Note that this must not conflict with consumer group IDs
26 | group.id=connect-cluster
27 |
28 | # The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will
29 | # need to configure these based on the format they want their data in when loaded from or stored into Kafka
30 | key.converter=org.apache.kafka.connect.json.JsonConverter
31 | value.converter=org.apache.kafka.connect.json.JsonConverter
32 | # Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply
33 | # it to
34 | key.converter.schemas.enable=true
35 | value.converter.schemas.enable=true
36 |
37 | # Topic to use for storing offsets. This topic should have many partitions and be replicated and compacted.
38 | # Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
39 | # the topic before starting Kafka Connect if a specific topic configuration is needed.
40 | # Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
41 | # Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
42 | # to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
43 | offset.storage.topic=connect-offsets
44 | offset.storage.replication.factor=1
45 | #offset.storage.partitions=25
46 |
47 | # Topic to use for storing connector and task configurations; note that this should be a single partition, highly replicated,
48 | # and compacted topic. Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
49 | # the topic before starting Kafka Connect if a specific topic configuration is needed.
50 | # Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
51 | # Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
52 | # to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
53 | config.storage.topic=connect-configs
54 | config.storage.replication.factor=1
55 |
56 | # Topic to use for storing statuses. This topic can have multiple partitions and should be replicated and compacted.
57 | # Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
58 | # the topic before starting Kafka Connect if a specific topic configuration is needed.
59 | # Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
60 | # Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
61 | # to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
62 | status.storage.topic=connect-status
63 | status.storage.replication.factor=1
64 | #status.storage.partitions=5
65 |
66 | # Flush much faster than normal, which is useful for testing/debugging
67 | offset.flush.interval.ms=10000
68 |
69 | # These are provided to inform the user about the presence of the REST host and port configs
70 | # Hostname & Port for the REST API to listen on. If this is set, it will bind to the interface used to listen to requests.
71 | #rest.host.name=
72 | #rest.port=8083
73 |
74 | # The Hostname & Port that will be given out to other workers to connect to i.e. URLs that are routable from other servers.
75 | #rest.advertised.host.name=
76 | #rest.advertised.port=
77 |
78 | # Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins
79 | # (connectors, converters, transformations). The list should consist of top level directories that include
80 | # any combination of:
81 | # a) directories immediately containing jars with plugins and their dependencies
82 | # b) uber-jars with plugins and their dependencies
83 | # c) directories immediately containing the package directory structure of classes of plugins and their dependencies
84 | # Examples:
85 | plugin.path=/app/libs
--------------------------------------------------------------------------------
/chart/README.md:
--------------------------------------------------------------------------------
1 | # Kafka Connect Helm Chart
2 |
3 |   
4 |
5 | A Helm chart for Apache Kafka Connect on Kubernetes
6 |
7 | ## Prerequisites
8 | - Kubernetes 1.9.2+ (tested on 1.25)
9 | - Helm 3+
10 | - A healthy and accessible Kafka Cluster. (Tested with [Strimzi](https://strimzi.io/))
11 |
12 | ## Docker Image Source:
13 | - [DockerHub -> cricketeerone](https://hub.docker.com/r/cricketeerone/apache-kafka-connect)
14 |
15 | ## Installing the Chart
16 |
17 | ### Install with an existing Kafka cluster
18 |
19 | ```sh
20 | git clone https://github.com/OneCricketeer/apache-kafka-connect-docker.git
21 | helm install --set bootstrapServers="PLAINTEXT://external.kafka:9092",groupId="connect-group" apache-kafka-connect-docker/chart
22 | ```
23 |
24 | Or supply your own values file
25 |
26 | ```sh
27 | helm install --values /path/to/custom-values.yaml
28 | ```
29 |
30 | ## Values
31 |
32 | Refer [`values.yaml`](./values.yaml) for defaults.
33 |
34 | ### Required Values
35 |
36 | | Key | Type | Default | Description |
37 | |-----|------|---------|-------------|
38 | | bootstrapServers | string | `""` | Kafka cluster to communicate with. In the form of `PROTOCOL://fqdn.kafka:9092` |
39 | | groupId | string | `""` | |
40 |
41 | ### Configuration
42 |
43 | In general, use `configurationOverrides` to modify the worker config. Here are the defaults.
44 |
45 | See
46 |
47 | | Key | Type | Default | Description |
48 | |-----|------|---------|-------------|
49 | | configurationOverrides."config.storage.replication.factor" | string | `"3"` | |
50 | | configurationOverrides."key.converter" | string | `"org.apache.kafka.connect.converters.ByteArrayConverter"` | |
51 | | configurationOverrides."offset.storage.replication.factor" | string | `"3"` | |
52 | | configurationOverrides."plugin.path" | string | `"/app/libs"` | |
53 | | configurationOverrides."status.storage.replication.factor" | string | `"3"` | |
54 | | configurationOverrides."value.converter" | string | `"org.apache.kafka.connect.converters.ByteArrayConverter"` | |
55 |
56 | Otherwise, there are a few other ways to supply configuration
57 |
58 | | Key | Type | Default | Description |
59 | |-----|------|---------|-------------|
60 | | customEnv | object | `{}` | Environment variable mapping |
61 | | envValueFrom | object | `{}` | Allows using `valueFrom`. Useful for K8s Downward API |
62 | | envFromSecret | string | `""` | Allows using `secretRef` |
63 | | envFromSecrets | list | `[]` | List option of `envFromSecret` |
64 | | envRenderSecret | object | `{}` | |
65 |
66 | ### JVM Configuration
67 |
68 | | Key | Type | Default | Description |
69 | |-----|------|---------|-------------|
70 | | heapOptions | string | `"-Xms256M -Xmx2G"` | JVM Heap Options |
71 |
72 | Use `KAFKA_JMX_OPTS` or `KAFKA_OPTS` in `customEnv` to set arbitrary values.
73 |
74 | ### Utilization
75 |
76 | | Key | Type | Default | Description |
77 | |-----|------|---------|-------------|
78 | | replicaCount | int | `1` | |
79 | | resources | object | `{}` | |
80 |
81 | ### Registry Mirrors
82 |
83 | See
84 |
85 | | Key | Type | Default | Description |
86 | |-----|------|---------|-------------|
87 | | global.imagePullSecrets | list | `[]` | |
88 |
89 | ### Deployment
90 |
91 | | Key | Type | Default | Description |
92 | |-----|------|---------|-------------|
93 | | image.repository | string | `"cricketeerone/apache-kafka-connect"` | |
94 | | image.tag | string | `""` | Defaults to the Chart `appVersion` |
95 | | image.pullPolicy | string | `"IfNotPresent"` | |
96 |
97 | ### Monitoring
98 |
99 | | Key | Type | Default | Description |
100 | |-----|------|---------|-------------|
101 | | jmx.enabled | bool | `false` | Exposes Kafka Connect JMX port |
102 | | jmx.port | int | `5555` | The port to expose |
103 |
104 | See [JVM configuration](#jvm-configuration) for setting JMX settings.
105 |
106 | ### Metadata
107 |
108 | | Key | Type | Default | Description |
109 | |-----|------|---------|-------------|
110 | | fullnameOverride | string | `""` | |
111 | | nameOverride | string | `""` | |
112 | | podAnnotations | object | `{}` | |
113 |
114 | ### Autoscaling
115 |
116 | | Key | Type | Default | Description |
117 | |-----|------|---------|-------------|
118 | | autoscaling.enabled | bool | `false` | |
119 | | autoscaling.maxReplicas | int | `10` | |
120 | | autoscaling.minReplicas | int | `1` | |
121 | | autoscaling.targetCPUUtilizationPercentage | int | `80` | |
122 |
123 | ### Deployment Strategy
124 |
125 | | Key | Type | Default | Description |
126 | |-----|------|---------|-------------|
127 | | deploymentStrategy.type | string | `"RollingUpdate"` | |
128 |
129 | ### Security
130 |
131 | | Key | Type | Default | Description |
132 | |-----|------|---------|-------------|
133 | | podSecurityContext | object | `{}` | |
134 | | securityContext | object | `{}` | |
135 |
136 | ### Networking
137 |
138 | | Key | Type | Default | Description |
139 | |-----|------|---------|-------------|
140 | | service.port | int | `8083` | |
141 | | service.type | string | `"ClusterIP"` | |
142 |
143 | ### RBAC
144 |
145 | | Key | Type | Default | Description |
146 | |-----|------|---------|-------------|
147 | | serviceAccount.annotations | object | `{}` | |
148 | | serviceAccount.create | bool | `true` | |
149 | | serviceAccount.name | string | `""` | |
150 |
151 | ### Placement
152 |
153 | | Key | Type | Default | Description |
154 | |-----|------|---------|-------------|
155 | | tolerations | list | `[]` | |
156 | | nodeSelector | object | `{}` | |
157 | | affinity | object | `{}` | |
158 |
159 | ### Storage
160 |
161 | This container is meant to be ephemeral. Only use this feature to mount config files or extensions.
162 |
163 | | Key | Type | Default | Description |
164 | |-----|------|---------|-------------|
165 | | volumeMounts | string | `nil` | |
166 | | volumes | string | `nil` | |
167 |
168 | ----------------------------------------------
169 | Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)
170 |
--------------------------------------------------------------------------------
/src/test/java/org/apache/kafka/connect/cli/ConnectDistributedWrapperTest.java:
--------------------------------------------------------------------------------
1 | package org.apache.kafka.connect.cli;
2 |
3 | import org.apache.kafka.common.utils.Utils;
4 | import org.apache.kafka.connect.converters.ByteArrayConverter;
5 | import org.apache.kafka.connect.runtime.WorkerConfig;
6 | import org.apache.kafka.connect.runtime.distributed.DistributedConfig;
7 | import org.apache.kafka.connect.runtime.rest.RestServerConfig;
8 | import org.assertj.core.api.WithAssertions;
9 | import org.junit.jupiter.api.Test;
10 | import org.junit.jupiter.params.ParameterizedTest;
11 | import org.junit.jupiter.params.provider.MethodSource;
12 |
13 | import java.io.File;
14 | import java.io.IOException;
15 | import java.net.URI;
16 | import java.net.URISyntaxException;
17 | import java.util.*;
18 | import java.util.stream.Collectors;
19 | import java.util.stream.Stream;
20 |
21 | class ConnectDistributedWrapperTest implements WithAssertions {
22 |
23 | @SuppressWarnings({"unused", "ConstantConditions"})
24 | @Test
25 | void connectEnvVarToProp_nullOrEmpty_throws() {
26 | final String ex = "Input cannot be null or empty";
27 | assertThatThrownBy(() -> {
28 | String prop = ConnectDistributedWrapper.connectEnvVarToProp(null);
29 | }).isInstanceOf(IllegalArgumentException.class).hasMessage(ex);
30 |
31 | assertThatThrownBy(() -> {
32 | String prop = ConnectDistributedWrapper.connectEnvVarToProp("");
33 | }).isInstanceOf(IllegalArgumentException.class).hasMessage(ex);
34 | }
35 |
36 | @SuppressWarnings("unused")
37 | @Test
38 | void connectEnvVarToProp_nonCONNECTShort_throws() {
39 | final String ex = "Input does not start with '" + ConnectDistributedWrapper.CONNECT_ENV_PREFIX + "'";
40 | String input = "kafka";
41 | assertThatThrownBy(() -> {
42 | String prop = ConnectDistributedWrapper.connectEnvVarToProp(input);
43 | }).isInstanceOf(IllegalArgumentException.class).hasMessageStartingWith(ex);
44 |
45 | String input2 = ConnectDistributedWrapper.CONNECT_ENV_PREFIX;
46 | String ex2 = ex + " or does not define a property";
47 | assertThatThrownBy(() -> {
48 | String prop = ConnectDistributedWrapper.connectEnvVarToProp(input2);
49 | }).isInstanceOf(IllegalArgumentException.class).hasMessage(ex2);
50 | }
51 |
52 | private String propToConnectEnv(String prop) {
53 | return ConnectDistributedWrapper.CONNECT_ENV_PREFIX
54 | .concat(prop
55 | .replace('.', '_')
56 | .toUpperCase()
57 | );
58 | }
59 |
60 | @ParameterizedTest
61 | @MethodSource("workerConfigProvider")
62 | void connectEnvVarToProp_connectConfigs(String prop) {
63 | String input = propToConnectEnv(prop);
64 | assertThat(ConnectDistributedWrapper.connectEnvVarToProp(input))
65 | .isEqualTo(prop);
66 | }
67 |
68 | static Stream workerConfigProvider() {
69 | return Stream.of(
70 | // Kafka connection details
71 | WorkerConfig.BOOTSTRAP_SERVERS_CONFIG,
72 | DistributedConfig.GROUP_ID_CONFIG,
73 | // REST server
74 | RestServerConfig.REST_ADVERTISED_LISTENER_CONFIG,
75 | RestServerConfig.LISTENERS_CONFIG,
76 | // Plugins
77 | WorkerConfig.PLUGIN_PATH_CONFIG,
78 | // Converters
79 | WorkerConfig.KEY_CONVERTER_CLASS_CONFIG,
80 | WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG,
81 | WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG,
82 | // Internal topics
83 | DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG,
84 | DistributedConfig.STATUS_STORAGE_REPLICATION_FACTOR_CONFIG,
85 | DistributedConfig.CONFIG_TOPIC_CONFIG,
86 | DistributedConfig.CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG,
87 | DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG,
88 | DistributedConfig.OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG
89 | );
90 | }
91 |
92 | @SuppressWarnings({"ConstantConditions", "unused"})
93 | @Test
94 | void createConnectProperties_throws() {
95 | final String ex = "Provided argument cannot be null or empty";
96 | assertThatThrownBy(() -> {
97 | final File propFile = ConnectDistributedWrapper.createConnectProperties(null);
98 | }).isInstanceOf(IllegalArgumentException.class).hasMessage(ex);
99 | }
100 |
101 | @Test
102 | void createConnectProperties_createsFile() throws IOException, URISyntaxException {
103 | Map propMap = Stream.of(
104 | new AbstractMap.SimpleImmutableEntry<>(
105 | DistributedConfig.BOOTSTRAP_SERVERS_CONFIG, DistributedConfig.BOOTSTRAP_SERVERS_DEFAULT),
106 | new AbstractMap.SimpleImmutableEntry<>(
107 | DistributedConfig.GROUP_ID_CONFIG, "junit"),
108 | new AbstractMap.SimpleImmutableEntry<>(
109 | DistributedConfig.KEY_CONVERTER_CLASS_CONFIG, ByteArrayConverter.class.getName()),
110 | new AbstractMap.SimpleImmutableEntry<>(
111 | DistributedConfig.VALUE_CONVERTER_CLASS_CONFIG, ByteArrayConverter.class.getName())
112 | ).map(e -> new AbstractMap.SimpleImmutableEntry<>(propToConnectEnv(e.getKey()), e.getValue()))
113 | .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue,
114 | (prev, next) -> next, HashMap::new));
115 |
116 | final File propFile = ConnectDistributedWrapper.createConnectProperties(propMap);
117 | assertThat(propFile.exists()).isTrue();
118 | final String fname = "connect-distributed";
119 | assertThat(propFile.getName())
120 | .startsWith("tmp-" + fname)
121 | .endsWith(".properties");
122 | final Properties properties = Utils.loadProps(propFile.getAbsolutePath());
123 |
124 | final URI expectedPropertiesURI = Objects.requireNonNull(getClass().getResource(
125 | "expected-" + fname + ".properties")).toURI();
126 | final File expectedPropertiesFile = new File(expectedPropertiesURI);
127 | final Properties expectedProperties = Utils.loadProps(expectedPropertiesFile.getAbsolutePath());
128 |
129 | assertThat(properties).containsExactlyEntriesOf(expectedProperties);
130 | }
131 |
132 | }
--------------------------------------------------------------------------------
/mvnw.cmd:
--------------------------------------------------------------------------------
1 | <# : batch portion
2 | @REM ----------------------------------------------------------------------------
3 | @REM Licensed to the Apache Software Foundation (ASF) under one
4 | @REM or more contributor license agreements. See the NOTICE file
5 | @REM distributed with this work for additional information
6 | @REM regarding copyright ownership. The ASF licenses this file
7 | @REM to you under the Apache License, Version 2.0 (the
8 | @REM "License"); you may not use this file except in compliance
9 | @REM with the License. You may obtain a copy of the License at
10 | @REM
11 | @REM http://www.apache.org/licenses/LICENSE-2.0
12 | @REM
13 | @REM Unless required by applicable law or agreed to in writing,
14 | @REM software distributed under the License is distributed on an
15 | @REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16 | @REM KIND, either express or implied. See the License for the
17 | @REM specific language governing permissions and limitations
18 | @REM under the License.
19 | @REM ----------------------------------------------------------------------------
20 |
21 | @REM ----------------------------------------------------------------------------
22 | @REM Apache Maven Wrapper startup batch script, version 3.3.2
23 | @REM
24 | @REM Optional ENV vars
25 | @REM MVNW_REPOURL - repo url base for downloading maven distribution
26 | @REM MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven
27 | @REM MVNW_VERBOSE - true: enable verbose log; others: silence the output
28 | @REM ----------------------------------------------------------------------------
29 |
30 | @IF "%__MVNW_ARG0_NAME__%"=="" (SET __MVNW_ARG0_NAME__=%~nx0)
31 | @SET __MVNW_CMD__=
32 | @SET __MVNW_ERROR__=
33 | @SET __MVNW_PSMODULEP_SAVE=%PSModulePath%
34 | @SET PSModulePath=
35 | @FOR /F "usebackq tokens=1* delims==" %%A IN (`powershell -noprofile "& {$scriptDir='%~dp0'; $script='%__MVNW_ARG0_NAME__%'; icm -ScriptBlock ([Scriptblock]::Create((Get-Content -Raw '%~f0'))) -NoNewScope}"`) DO @(
36 | IF "%%A"=="MVN_CMD" (set __MVNW_CMD__=%%B) ELSE IF "%%B"=="" (echo %%A) ELSE (echo %%A=%%B)
37 | )
38 | @SET PSModulePath=%__MVNW_PSMODULEP_SAVE%
39 | @SET __MVNW_PSMODULEP_SAVE=
40 | @SET __MVNW_ARG0_NAME__=
41 | @SET MVNW_USERNAME=
42 | @SET MVNW_PASSWORD=
43 | @IF NOT "%__MVNW_CMD__%"=="" (%__MVNW_CMD__% %*)
44 | @echo Cannot start maven from wrapper >&2 && exit /b 1
45 | @GOTO :EOF
46 | : end batch / begin powershell #>
47 |
48 | $ErrorActionPreference = "Stop"
49 | if ($env:MVNW_VERBOSE -eq "true") {
50 | $VerbosePreference = "Continue"
51 | }
52 |
53 | # calculate distributionUrl, requires .mvn/wrapper/maven-wrapper.properties
54 | $distributionUrl = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionUrl
55 | if (!$distributionUrl) {
56 | Write-Error "cannot read distributionUrl property in $scriptDir/.mvn/wrapper/maven-wrapper.properties"
57 | }
58 |
59 | switch -wildcard -casesensitive ( $($distributionUrl -replace '^.*/','') ) {
60 | "maven-mvnd-*" {
61 | $USE_MVND = $true
62 | $distributionUrl = $distributionUrl -replace '-bin\.[^.]*$',"-windows-amd64.zip"
63 | $MVN_CMD = "mvnd.cmd"
64 | break
65 | }
66 | default {
67 | $USE_MVND = $false
68 | $MVN_CMD = $script -replace '^mvnw','mvn'
69 | break
70 | }
71 | }
72 |
73 | # apply MVNW_REPOURL and calculate MAVEN_HOME
74 | # maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/
75 | if ($env:MVNW_REPOURL) {
76 | $MVNW_REPO_PATTERN = if ($USE_MVND) { "/org/apache/maven/" } else { "/maven/mvnd/" }
77 | $distributionUrl = "$env:MVNW_REPOURL$MVNW_REPO_PATTERN$($distributionUrl -replace '^.*'+$MVNW_REPO_PATTERN,'')"
78 | }
79 | $distributionUrlName = $distributionUrl -replace '^.*/',''
80 | $distributionUrlNameMain = $distributionUrlName -replace '\.[^.]*$','' -replace '-bin$',''
81 | $MAVEN_HOME_PARENT = "$HOME/.m2/wrapper/dists/$distributionUrlNameMain"
82 | if ($env:MAVEN_USER_HOME) {
83 | $MAVEN_HOME_PARENT = "$env:MAVEN_USER_HOME/wrapper/dists/$distributionUrlNameMain"
84 | }
85 | $MAVEN_HOME_NAME = ([System.Security.Cryptography.MD5]::Create().ComputeHash([byte[]][char[]]$distributionUrl) | ForEach-Object {$_.ToString("x2")}) -join ''
86 | $MAVEN_HOME = "$MAVEN_HOME_PARENT/$MAVEN_HOME_NAME"
87 |
88 | if (Test-Path -Path "$MAVEN_HOME" -PathType Container) {
89 | Write-Verbose "found existing MAVEN_HOME at $MAVEN_HOME"
90 | Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD"
91 | exit $?
92 | }
93 |
94 | if (! $distributionUrlNameMain -or ($distributionUrlName -eq $distributionUrlNameMain)) {
95 | Write-Error "distributionUrl is not valid, must end with *-bin.zip, but found $distributionUrl"
96 | }
97 |
98 | # prepare tmp dir
99 | $TMP_DOWNLOAD_DIR_HOLDER = New-TemporaryFile
100 | $TMP_DOWNLOAD_DIR = New-Item -Itemtype Directory -Path "$TMP_DOWNLOAD_DIR_HOLDER.dir"
101 | $TMP_DOWNLOAD_DIR_HOLDER.Delete() | Out-Null
102 | trap {
103 | if ($TMP_DOWNLOAD_DIR.Exists) {
104 | try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null }
105 | catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" }
106 | }
107 | }
108 |
109 | New-Item -Itemtype Directory -Path "$MAVEN_HOME_PARENT" -Force | Out-Null
110 |
111 | # Download and Install Apache Maven
112 | Write-Verbose "Couldn't find MAVEN_HOME, downloading and installing it ..."
113 | Write-Verbose "Downloading from: $distributionUrl"
114 | Write-Verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName"
115 |
116 | $webclient = New-Object System.Net.WebClient
117 | if ($env:MVNW_USERNAME -and $env:MVNW_PASSWORD) {
118 | $webclient.Credentials = New-Object System.Net.NetworkCredential($env:MVNW_USERNAME, $env:MVNW_PASSWORD)
119 | }
120 | [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
121 | $webclient.DownloadFile($distributionUrl, "$TMP_DOWNLOAD_DIR/$distributionUrlName") | Out-Null
122 |
123 | # If specified, validate the SHA-256 sum of the Maven distribution zip file
124 | $distributionSha256Sum = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionSha256Sum
125 | if ($distributionSha256Sum) {
126 | if ($USE_MVND) {
127 | Write-Error "Checksum validation is not supported for maven-mvnd. `nPlease disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties."
128 | }
129 | Import-Module $PSHOME\Modules\Microsoft.PowerShell.Utility -Function Get-FileHash
130 | if ((Get-FileHash "$TMP_DOWNLOAD_DIR/$distributionUrlName" -Algorithm SHA256).Hash.ToLower() -ne $distributionSha256Sum) {
131 | Write-Error "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised. If you updated your Maven version, you need to update the specified distributionSha256Sum property."
132 | }
133 | }
134 |
135 | # unzip and move
136 | Expand-Archive "$TMP_DOWNLOAD_DIR/$distributionUrlName" -DestinationPath "$TMP_DOWNLOAD_DIR" | Out-Null
137 | Rename-Item -Path "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" -NewName $MAVEN_HOME_NAME | Out-Null
138 | try {
139 | Move-Item -Path "$TMP_DOWNLOAD_DIR/$MAVEN_HOME_NAME" -Destination $MAVEN_HOME_PARENT | Out-Null
140 | } catch {
141 | if (! (Test-Path -Path "$MAVEN_HOME" -PathType Container)) {
142 | Write-Error "fail to move MAVEN_HOME"
143 | }
144 | } finally {
145 | try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null }
146 | catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" }
147 | }
148 |
149 | Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD"
150 |
--------------------------------------------------------------------------------
/mvnw:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # ----------------------------------------------------------------------------
3 | # Licensed to the Apache Software Foundation (ASF) under one
4 | # or more contributor license agreements. See the NOTICE file
5 | # distributed with this work for additional information
6 | # regarding copyright ownership. The ASF licenses this file
7 | # to you under the Apache License, Version 2.0 (the
8 | # "License"); you may not use this file except in compliance
9 | # with the License. You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing,
14 | # software distributed under the License is distributed on an
15 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16 | # KIND, either express or implied. See the License for the
17 | # specific language governing permissions and limitations
18 | # under the License.
19 | # ----------------------------------------------------------------------------
20 |
21 | # ----------------------------------------------------------------------------
22 | # Apache Maven Wrapper startup batch script, version 3.3.2
23 | #
24 | # Optional ENV vars
25 | # -----------------
26 | # JAVA_HOME - location of a JDK home dir, required when download maven via java source
27 | # MVNW_REPOURL - repo url base for downloading maven distribution
28 | # MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven
29 | # MVNW_VERBOSE - true: enable verbose log; debug: trace the mvnw script; others: silence the output
30 | # ----------------------------------------------------------------------------
31 |
32 | set -euf
33 | [ "${MVNW_VERBOSE-}" != debug ] || set -x
34 |
35 | # OS specific support.
36 | native_path() { printf %s\\n "$1"; }
37 | case "$(uname)" in
38 | CYGWIN* | MINGW*)
39 | [ -z "${JAVA_HOME-}" ] || JAVA_HOME="$(cygpath --unix "$JAVA_HOME")"
40 | native_path() { cygpath --path --windows "$1"; }
41 | ;;
42 | esac
43 |
44 | # set JAVACMD and JAVACCMD
45 | set_java_home() {
46 | # For Cygwin and MinGW, ensure paths are in Unix format before anything is touched
47 | if [ -n "${JAVA_HOME-}" ]; then
48 | if [ -x "$JAVA_HOME/jre/sh/java" ]; then
49 | # IBM's JDK on AIX uses strange locations for the executables
50 | JAVACMD="$JAVA_HOME/jre/sh/java"
51 | JAVACCMD="$JAVA_HOME/jre/sh/javac"
52 | else
53 | JAVACMD="$JAVA_HOME/bin/java"
54 | JAVACCMD="$JAVA_HOME/bin/javac"
55 |
56 | if [ ! -x "$JAVACMD" ] || [ ! -x "$JAVACCMD" ]; then
57 | echo "The JAVA_HOME environment variable is not defined correctly, so mvnw cannot run." >&2
58 | echo "JAVA_HOME is set to \"$JAVA_HOME\", but \"\$JAVA_HOME/bin/java\" or \"\$JAVA_HOME/bin/javac\" does not exist." >&2
59 | return 1
60 | fi
61 | fi
62 | else
63 | JAVACMD="$(
64 | 'set' +e
65 | 'unset' -f command 2>/dev/null
66 | 'command' -v java
67 | )" || :
68 | JAVACCMD="$(
69 | 'set' +e
70 | 'unset' -f command 2>/dev/null
71 | 'command' -v javac
72 | )" || :
73 |
74 | if [ ! -x "${JAVACMD-}" ] || [ ! -x "${JAVACCMD-}" ]; then
75 | echo "The java/javac command does not exist in PATH nor is JAVA_HOME set, so mvnw cannot run." >&2
76 | return 1
77 | fi
78 | fi
79 | }
80 |
81 | # hash string like Java String::hashCode
82 | hash_string() {
83 | str="${1:-}" h=0
84 | while [ -n "$str" ]; do
85 | char="${str%"${str#?}"}"
86 | h=$(((h * 31 + $(LC_CTYPE=C printf %d "'$char")) % 4294967296))
87 | str="${str#?}"
88 | done
89 | printf %x\\n $h
90 | }
91 |
92 | verbose() { :; }
93 | [ "${MVNW_VERBOSE-}" != true ] || verbose() { printf %s\\n "${1-}"; }
94 |
95 | die() {
96 | printf %s\\n "$1" >&2
97 | exit 1
98 | }
99 |
100 | trim() {
101 | # MWRAPPER-139:
102 | # Trims trailing and leading whitespace, carriage returns, tabs, and linefeeds.
103 | # Needed for removing poorly interpreted newline sequences when running in more
104 | # exotic environments such as mingw bash on Windows.
105 | printf "%s" "${1}" | tr -d '[:space:]'
106 | }
107 |
108 | # parse distributionUrl and optional distributionSha256Sum, requires .mvn/wrapper/maven-wrapper.properties
109 | while IFS="=" read -r key value; do
110 | case "${key-}" in
111 | distributionUrl) distributionUrl=$(trim "${value-}") ;;
112 | distributionSha256Sum) distributionSha256Sum=$(trim "${value-}") ;;
113 | esac
114 | done <"${0%/*}/.mvn/wrapper/maven-wrapper.properties"
115 | [ -n "${distributionUrl-}" ] || die "cannot read distributionUrl property in ${0%/*}/.mvn/wrapper/maven-wrapper.properties"
116 |
117 | case "${distributionUrl##*/}" in
118 | maven-mvnd-*bin.*)
119 | MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/
120 | case "${PROCESSOR_ARCHITECTURE-}${PROCESSOR_ARCHITEW6432-}:$(uname -a)" in
121 | *AMD64:CYGWIN* | *AMD64:MINGW*) distributionPlatform=windows-amd64 ;;
122 | :Darwin*x86_64) distributionPlatform=darwin-amd64 ;;
123 | :Darwin*arm64) distributionPlatform=darwin-aarch64 ;;
124 | :Linux*x86_64*) distributionPlatform=linux-amd64 ;;
125 | *)
126 | echo "Cannot detect native platform for mvnd on $(uname)-$(uname -m), use pure java version" >&2
127 | distributionPlatform=linux-amd64
128 | ;;
129 | esac
130 | distributionUrl="${distributionUrl%-bin.*}-$distributionPlatform.zip"
131 | ;;
132 | maven-mvnd-*) MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ ;;
133 | *) MVN_CMD="mvn${0##*/mvnw}" _MVNW_REPO_PATTERN=/org/apache/maven/ ;;
134 | esac
135 |
136 | # apply MVNW_REPOURL and calculate MAVEN_HOME
137 | # maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/
138 | [ -z "${MVNW_REPOURL-}" ] || distributionUrl="$MVNW_REPOURL$_MVNW_REPO_PATTERN${distributionUrl#*"$_MVNW_REPO_PATTERN"}"
139 | distributionUrlName="${distributionUrl##*/}"
140 | distributionUrlNameMain="${distributionUrlName%.*}"
141 | distributionUrlNameMain="${distributionUrlNameMain%-bin}"
142 | MAVEN_USER_HOME="${MAVEN_USER_HOME:-${HOME}/.m2}"
143 | MAVEN_HOME="${MAVEN_USER_HOME}/wrapper/dists/${distributionUrlNameMain-}/$(hash_string "$distributionUrl")"
144 |
145 | exec_maven() {
146 | unset MVNW_VERBOSE MVNW_USERNAME MVNW_PASSWORD MVNW_REPOURL || :
147 | exec "$MAVEN_HOME/bin/$MVN_CMD" "$@" || die "cannot exec $MAVEN_HOME/bin/$MVN_CMD"
148 | }
149 |
150 | if [ -d "$MAVEN_HOME" ]; then
151 | verbose "found existing MAVEN_HOME at $MAVEN_HOME"
152 | exec_maven "$@"
153 | fi
154 |
155 | case "${distributionUrl-}" in
156 | *?-bin.zip | *?maven-mvnd-?*-?*.zip) ;;
157 | *) die "distributionUrl is not valid, must match *-bin.zip or maven-mvnd-*.zip, but found '${distributionUrl-}'" ;;
158 | esac
159 |
160 | # prepare tmp dir
161 | if TMP_DOWNLOAD_DIR="$(mktemp -d)" && [ -d "$TMP_DOWNLOAD_DIR" ]; then
162 | clean() { rm -rf -- "$TMP_DOWNLOAD_DIR"; }
163 | trap clean HUP INT TERM EXIT
164 | else
165 | die "cannot create temp dir"
166 | fi
167 |
168 | mkdir -p -- "${MAVEN_HOME%/*}"
169 |
170 | # Download and Install Apache Maven
171 | verbose "Couldn't find MAVEN_HOME, downloading and installing it ..."
172 | verbose "Downloading from: $distributionUrl"
173 | verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName"
174 |
175 | # select .zip or .tar.gz
176 | if ! command -v unzip >/dev/null; then
177 | distributionUrl="${distributionUrl%.zip}.tar.gz"
178 | distributionUrlName="${distributionUrl##*/}"
179 | fi
180 |
181 | # verbose opt
182 | __MVNW_QUIET_WGET=--quiet __MVNW_QUIET_CURL=--silent __MVNW_QUIET_UNZIP=-q __MVNW_QUIET_TAR=''
183 | [ "${MVNW_VERBOSE-}" != true ] || __MVNW_QUIET_WGET='' __MVNW_QUIET_CURL='' __MVNW_QUIET_UNZIP='' __MVNW_QUIET_TAR=v
184 |
185 | # normalize http auth
186 | case "${MVNW_PASSWORD:+has-password}" in
187 | '') MVNW_USERNAME='' MVNW_PASSWORD='' ;;
188 | has-password) [ -n "${MVNW_USERNAME-}" ] || MVNW_USERNAME='' MVNW_PASSWORD='' ;;
189 | esac
190 |
191 | if [ -z "${MVNW_USERNAME-}" ] && command -v wget >/dev/null; then
192 | verbose "Found wget ... using wget"
193 | wget ${__MVNW_QUIET_WGET:+"$__MVNW_QUIET_WGET"} "$distributionUrl" -O "$TMP_DOWNLOAD_DIR/$distributionUrlName" || die "wget: Failed to fetch $distributionUrl"
194 | elif [ -z "${MVNW_USERNAME-}" ] && command -v curl >/dev/null; then
195 | verbose "Found curl ... using curl"
196 | curl ${__MVNW_QUIET_CURL:+"$__MVNW_QUIET_CURL"} -f -L -o "$TMP_DOWNLOAD_DIR/$distributionUrlName" "$distributionUrl" || die "curl: Failed to fetch $distributionUrl"
197 | elif set_java_home; then
198 | verbose "Falling back to use Java to download"
199 | javaSource="$TMP_DOWNLOAD_DIR/Downloader.java"
200 | targetZip="$TMP_DOWNLOAD_DIR/$distributionUrlName"
201 | cat >"$javaSource" <<-END
202 | public class Downloader extends java.net.Authenticator
203 | {
204 | protected java.net.PasswordAuthentication getPasswordAuthentication()
205 | {
206 | return new java.net.PasswordAuthentication( System.getenv( "MVNW_USERNAME" ), System.getenv( "MVNW_PASSWORD" ).toCharArray() );
207 | }
208 | public static void main( String[] args ) throws Exception
209 | {
210 | setDefault( new Downloader() );
211 | java.nio.file.Files.copy( java.net.URI.create( args[0] ).toURL().openStream(), java.nio.file.Paths.get( args[1] ).toAbsolutePath().normalize() );
212 | }
213 | }
214 | END
215 | # For Cygwin/MinGW, switch paths to Windows format before running javac and java
216 | verbose " - Compiling Downloader.java ..."
217 | "$(native_path "$JAVACCMD")" "$(native_path "$javaSource")" || die "Failed to compile Downloader.java"
218 | verbose " - Running Downloader.java ..."
219 | "$(native_path "$JAVACMD")" -cp "$(native_path "$TMP_DOWNLOAD_DIR")" Downloader "$distributionUrl" "$(native_path "$targetZip")"
220 | fi
221 |
222 | # If specified, validate the SHA-256 sum of the Maven distribution zip file
223 | if [ -n "${distributionSha256Sum-}" ]; then
224 | distributionSha256Result=false
225 | if [ "$MVN_CMD" = mvnd.sh ]; then
226 | echo "Checksum validation is not supported for maven-mvnd." >&2
227 | echo "Please disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2
228 | exit 1
229 | elif command -v sha256sum >/dev/null; then
230 | if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | sha256sum -c >/dev/null 2>&1; then
231 | distributionSha256Result=true
232 | fi
233 | elif command -v shasum >/dev/null; then
234 | if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | shasum -a 256 -c >/dev/null 2>&1; then
235 | distributionSha256Result=true
236 | fi
237 | else
238 | echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." >&2
239 | echo "Please install either command, or disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2
240 | exit 1
241 | fi
242 | if [ $distributionSha256Result = false ]; then
243 | echo "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised." >&2
244 | echo "If you updated your Maven version, you need to update the specified distributionSha256Sum property." >&2
245 | exit 1
246 | fi
247 | fi
248 |
249 | # unzip and move
250 | if command -v unzip >/dev/null; then
251 | unzip ${__MVNW_QUIET_UNZIP:+"$__MVNW_QUIET_UNZIP"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -d "$TMP_DOWNLOAD_DIR" || die "failed to unzip"
252 | else
253 | tar xzf${__MVNW_QUIET_TAR:+"$__MVNW_QUIET_TAR"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -C "$TMP_DOWNLOAD_DIR" || die "failed to untar"
254 | fi
255 | printf %s\\n "$distributionUrl" >"$TMP_DOWNLOAD_DIR/$distributionUrlNameMain/mvnw.url"
256 | mv -- "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" "$MAVEN_HOME" || [ -d "$MAVEN_HOME" ] || die "fail to move MAVEN_HOME"
257 |
258 | clean || :
259 | exec_maven "$@"
260 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Containerized [Apache Kafka Connect](http://kafka.apache.org/documentation/#connect)
2 |
3 |
4 | [](https://hub.docker.com/r/cricketeerone/apache-kafka-connect/tags)
5 | [](https://hub.docker.com/r/cricketeerone/apache-kafka-connect/tags)
6 | [](https://hub.docker.com/r/cricketeerone/apache-kafka-connect)
7 |
8 | [](https://github.com/OneCricketeer/apache-kafka-connect-docker/blob/master/LICENSE)
9 |
10 | Using [GoogleContainerTools/Jib](https://github.com/GoogleContainerTools/jib) to package Apache Kafka Connect Distributed Server.
11 |
12 | ### FAQ
13 |
14 | #### Why this image over others?
15 |
16 | This image is almost 10x **_smaller_** than popular Kafka Connect images. It only includes the Connect Runtime, no extra bloat!
17 |
18 | #### When will version _X_ be available?
19 |
20 | The builds are automated. The version releases are not. If you find a version missing, feel free to submit a corresponding PR.
21 |
22 | ---
23 |
24 | Docker Pull! 🐳
25 |
26 | ```sh
27 | docker pull cricketeerone/apache-kafka-connect
28 | ```
29 |
30 | The above image is enough for MirrorMaker2. There is also an image that includes `confluent-hub` for adding a majority of third-party connectors! See section [Extending with new Connectors](#extending-with-new-connectors) for full usage.
31 |
32 | ```sh
33 | docker pull cricketeerone/apache-kafka-connect:latest-confluent-hub
34 | ```
35 |
36 | Alpine variants are also available. Check [Docker Hub](https://hub.docker.com/r/cricketeerone/apache-kafka-connect/tags) for all tags and versions.
37 |
38 | **Table of Contents**
39 | - [Image Details](#image-details)
40 | - [Build it locally](#build-it-locally)
41 | - [Tutorial](#tutorial)
42 | - [Without Docker](#without-docker)
43 | - [Starting Kafka in Docker](#start-kafka-cluster-in-docker)
44 | - Extra
45 | - [Scaling Up](#scaling-up)
46 | - [Scaling Out](#scaling-out)
47 | - [Extending with new Connectors](#extending-with-new-connectors)
48 | - [HTTP Authentication](#http-authentication)
49 |
50 | ## Image Details
51 |
52 | Much like the `confluentinc/cp-kafka-connect` images, this container uses environment variables starting with `CONNECT_`, followed by the Kafka Connect Worker properties to be configured.
53 |
54 | For example, these are the bare minimum variables necessary to get a Connect Distributed Server running,
55 | but assumes it is connected to Kafka cluster with at least three brokers (replication factor for the three Connect topics).
56 | Additional variables for replication factor of the three Connect topics can be added, as described below for
57 | testing against less than three brokers.
58 |
59 | ```txt
60 | CONNECT_BOOTSTRAP_SERVERS
61 | CONNECT_GROUP_ID
62 | CONNECT_KEY_CONVERTER
63 | CONNECT_VALUE_CONVERTER
64 | CONNECT_CONFIG_STORAGE_TOPIC
65 | CONNECT_OFFSET_STORAGE_TOPIC
66 | CONNECT_STATUS_STORAGE_TOPIC
67 | ```
68 |
69 | See [`docker-compose.yml`](docker-compose.yml) for a full example of these variables' usage with the container while connected to a Kafka broker.
70 |
71 | ## Build it locally
72 |
73 | Looking to build your own image? **tl;dr** - Clone repo, and use `./mvnw clean compile jib:dockerBuild` or `MVN_BUILD_CMD='compile jib:dockerBuild' make` and you're done!
74 |
75 | **Multi-platform builds (buildx)**
76 |
77 | By default, with the above commands, an image will be built for a `linux/amd64` Ubuntu-based container.
78 | The following builds and pushes multi-platform images to your personal Docker Hub account via Docker Buildx.
79 |
80 | ```sh
81 | BUILDX_PLATFORMS=linux/arm64,linux/amd64 DOCKER_USER=$(whoami) make
82 | ```
83 |
84 | As of May 2023, Alpine variants of Eclipse Temurin Java 17 images do not support `arm64`.
85 |
86 | ## Push to a private registry
87 |
88 | To push to a private Docker Registry, you'll need to `docker login` to that address. The following commands will push the `apache-kafka-connect` image to a Docker Registry under your local username.
89 | Feel free to change `DOCKER_USER` to a custom repo name in the Registry.
90 |
91 | ```sh
92 | $ docker login --username=$(whoami)
93 |
94 | $ DOCKER_REGISTRY= DOCKER_USER=$(whoami) \
95 | make
96 | ```
97 |
98 | ## Tutorial
99 |
100 | The following tutorial uses Jib to package `ConnectDistributed` class for running Kafka Connect Distributed mode workers.
101 | The following instructions use the [Bitnami](https://github.com/bitnami/bitnami-docker-kafka) Kafka images, however any other Kafka Docker images should work.
102 |
103 | This tutorial will roughly follow the same steps as the [tutorial for Connect on Kafka's site](https://kafka.apache.org/documentation/#quickstart_kafkaconnect),
104 | except using the Distributed Connect server instead.
105 |
106 | ### Without Docker
107 |
108 | If not using Docker, Kafka (and ZooKeeper, if not using Kraft) can be started locally using their respective start scripts.
109 | If this is done, though, the variables for the bootstrap servers will need to be adjusted accordingly.
110 |
111 | The following steps can be used to run this application locally outside of Docker.
112 |
113 | ```bash
114 | # Assumes Kafka default port
115 | export CONNECT_BOOTSTRAP_SERVERS=127.0.0.1:9092
116 |
117 | export CONNECT_GROUP_ID=cg_connect-jib
118 | export CONNECT_CONFIG_STORAGE_TOPIC=connect-jib_config
119 | export CONNECT_OFFSET_STORAGE_TOPIC=connect-jib_offsets
120 | export CONNECT_STATUS_STORAGE_TOPIC=connect-jib_status
121 | # Cannot be higher than the number of brokers in the export Kafka cluster
122 | export CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR=1
123 | export CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR=1
124 | export CONNECT_STATUS_STORAGE_REPLICATION_FACTOR=1
125 |
126 | # We're going to use ByteArrayConverter by default, and let individual connectors configure themselves
127 | export CONNECT_KEY_CONVERTER=org.apache.kafka.connect.converters.ByteArrayConverter
128 | export CONNECT_VALUE_CONVERTER=org.apache.kafka.connect.converters.ByteArrayConverter
129 |
130 | # Runs ConnectDistributed via Maven
131 | ./mvnw clean exec:java
132 | ```
133 |
134 | ### Start Kafka Cluster in Docker
135 |
136 | > ***Note***: Sometimes the Kafka container kills itself in below steps, and the consumer commands therefore may need to be re-executed. The Connect worker should reconnect on its own.
137 |
138 | For this exercise, we will be using three separate terminal windows, so go ahead and open those.
139 |
140 | First, we start with getting our cluster running in the foreground. This starts Kafka listening on `9092` on the host, and `29092` within the Docker network.
141 |
142 | > *Terminal 1*
143 |
144 | ```bash
145 | docker compose up kafka
146 | ```
147 |
148 | ### Create Kafka Topics
149 |
150 | We need to create the topics where data will be produced into.
151 |
152 | > *Terminal 2*
153 |
154 | ```bash
155 | docker compose exec kafka \
156 | bash -c "kafka-topics.sh --create --bootstrap-server kafka:29092 --topic input --partitions=1 --replication-factor=1"
157 | ```
158 |
159 | Verify topics exist
160 |
161 | ```bash
162 | docker compose exec kafka \
163 | bash -c "kafka-topics.sh --list --bootstrap-server kafka:29092"
164 | ```
165 |
166 | Should include `input` topic in the list.
167 |
168 | ### Produce Lorem Ipsum into input topic
169 |
170 | ```bash
171 | docker compose exec kafka \
172 | bash -c "cat /data/lipsum.txt | kafka-console-producer.sh --topic input --broker-list kafka:29092"
173 | ```
174 |
175 | Verify that data is there (note: hard-coding `max-messages` to the number of lines of expected text)
176 |
177 | ```bash
178 | docker compose exec kafka \
179 | bash -c "kafka-console-consumer.sh --topic input --bootstrap-server kafka:29092 --from-beginning --max-messages=9"
180 | ```
181 |
182 | Should see last line `Processed a total of 9 messages`.
183 |
184 | ### Start Kafka Connect
185 |
186 | Now, we can build the Kafka Connect image and start it.
187 |
188 | ```bash
189 | ./mvnw clean install
190 |
191 | docker compose up connect-jib-1
192 | ```
193 |
194 | Wait for log-line `Kafka Connect Started`, then post the FileSink Connector. When not provided a `file`, the connector tasks will write data to the stdout of the container (Terminal 1).
195 |
196 | > *Terminal 3*
197 |
198 | Use Kafka Connect REST API to start this process
199 |
200 | ```bash
201 | curl -XPUT http://localhost:8083/connectors/console-sink/config -H 'Content-Type: application/json' -d '{
202 | "connector.class": "FileStreamSink",
203 | "tasks.max": 1,
204 | "topics": "input",
205 | "transforms": "MakeMap,AddPartition",
206 | "transforms.MakeMap.type": "org.apache.kafka.connect.transforms.HoistField$Value",
207 | "transforms.MakeMap.field" : "line",
208 | "transforms.AddPartition.type": "org.apache.kafka.connect.transforms.InsertField$Value",
209 | "transforms.AddPartition.partition.field" : "partition!",
210 | "key.converter": "org.apache.kafka.connect.storage.StringConverter",
211 | "value.converter": "org.apache.kafka.connect.storage.StringConverter"
212 | }'
213 | ```
214 |
215 | This will read from the beginning of the `input` topic that had data sent into it, and begin processing it.
216 |
217 | In the output of _Terminal 2_, you should see something similar to the following.
218 |
219 | ```text
220 | connect-jib_1 | Struct{line=Morbi eu pharetra dolor. ....,partition=0}
221 | connect-jib_1 | Struct{line=,partition=0}
222 | connect-jib_1 | Struct{line=Nullam mauris sapien, vestibulum ....,partition=0}
223 | ```
224 |
225 | This is the `toString()` representation of Kafka Connect's internal `Struct` class. Since we added a `HoistField$Value` transform,
226 | then there is a Structured Object with a field of `line` set to the value of the Kafka message that was read from the lines of the `lipsum.txt` file that was produced in the third step above,
227 | as well as a `partition` field set to the consumed record partition. The topic was only created with one partition.
228 |
229 | To repeat that process, we delete the connector and reset the consumer group.
230 |
231 | ```bash
232 | curl -XDELETE http://localhost:8083/connectors/console-sink
233 |
234 | docker compose exec kafka \
235 | bash -c "kafka-consumer-groups.sh --bootstrap-server kafka:29092 --group connect-console-sink --reset-offsets --all-topics --to-earliest --execute"
236 | ```
237 |
238 | Re-run above console-producer and `curl -XPUT ...` command, but this time, there will be more than 9 total messages printed.
239 |
240 | ## Extra
241 |
242 | ### Scaling up
243 |
244 | Redo the tutorial with a new topic having more than one partition. Produce more input data to it, then increase `max.tasks` of the connector.
245 | Notice that the `partition` field in the output may change (you may need to produce data multiple times to randomize the record batches).
246 |
247 | ### Scaling out
248 |
249 | Scaling the workers will require adding another container with a unique `CONNECT_ADVERTISED_HOST_NAME` variable. I.e.
250 |
251 | ```yml
252 | connect-jib-2:
253 | image: *connect-image
254 | hostname: connect-jib-2
255 | depends_on:
256 | - kafka
257 | environment:
258 | <<: *connect-vars
259 | CONNECT_REST_ADVERTISED_HOST_NAME: connect-jib-2
260 | ```
261 |
262 | A reverse proxy should be added in front of all instances. See an example using Traefik in [`docker-compose.cluster.yml`](./docker-compose.cluster.yml).
263 | It can be started via `docker compose -f docker-compose.cluster.yml up` and tested with `curl -H Host:connect-jib.docker.localhost http://127.0.0.1/`.
264 |
265 | ## Extending with new Connectors
266 |
267 | > ***Disclaimer*** It is best to think of this image as a base upon which you can add your own Connectors. Below is the output of the default connector plugins, as provided by Apache Kafka project.
268 |
269 | Connector plugins should preferably be placed into `/app/libs`, thus requiring an environment variable of `CONNECT_PLUGIN_PATH="/app/libs"`. Kafka Connect plugins are often distributed as Tarballs/ZIP/JAR files that need to be extracted or added to this path, via a volume mount or downloaded with `curl`.
270 |
271 | When using the `confluent-hub` image tags, you can extend those images like so
272 |
273 | ```Dockerfile
274 | FROM cricketeerone/apache-kafka-connect:latest-confluent-hub
275 |
276 | # Example connector installation from Confluent Hub
277 | RUN confluent-hub install --no-prompt \
278 | --component-dir /app/libs --worker-configs /app/resources/connect-distributed.properties -- \
279 |
280 | ```
281 |
282 | Where `` is copied from one of the available sources on [Confluent Hub](https://www.confluent.io/hub/).
283 | There is no guarantee in compatibility with the Kafka Connect base version and any version of a plugin that you install.
284 |
285 | To re-iterate, `confluent-hub` is **not** part of the base image versions; they **only include** Connector classes provided by Apache Kafka.
286 | These are limited to File Sink/Source and MirrorSource Connector (MirrorMaker 2.0). In general, you'll probably want to add your own Connectors, as above, rather than use this image by itself.
287 |
288 | As of 3.6.0 release, the `confluent-hub` tags include `unzip` shell command for extracting other third-party connectors.
289 |
290 | For a full example of adding plugins, and using the [Confluent Schema Registry](https://docs.confluent.io/platform/current/schema-registry/index.html),
291 | please [refer to the `schema-registry` branch](https://github.com/OneCricketeer/apache-kafka-connect-docker/blob/schema-registry/Dockerfile.schema-registry).
292 |
293 | #### Default Plugins
294 |
295 | ```bash
296 | $ curl localhost:8083/connector-plugins | jq
297 | [
298 | {
299 | "class": "org.apache.kafka.connect.file.FileStreamSinkConnector",
300 | "type": "sink",
301 | "version": "4.0.0"
302 | },
303 | {
304 | "class": "org.apache.kafka.connect.file.FileStreamSourceConnector",
305 | "type": "source",
306 | "version": "4.0.0"
307 | },
308 | {
309 | "class": "org.apache.kafka.connect.mirror.MirrorCheckpointConnector",
310 | "type": "source",
311 | "version": "4.0.0"
312 | },
313 | {
314 | "class": "org.apache.kafka.connect.mirror.MirrorHeartbeatConnector",
315 | "type": "source",
316 | "version": "4.0.0"
317 | },
318 | {
319 | "class": "org.apache.kafka.connect.mirror.MirrorSourceConnector",
320 | "type": "source",
321 | "version": "4.0.0"
322 | }
323 | ]
324 | ```
325 |
326 | The File Source/Sink are **not** to be used in production,
327 | and is only really meant as a "simple, standalone example," [according to the docs](https://kafka.apache.org/documentation/#connect_developing) (emphasis added).
328 |
329 | > A _simple **example**_ is included with the source code for Kafka in the `file` package. This connector is **_meant for use in standalone mode_**
330 | >
331 | > ...
332 | >
333 | > files have trivially structured data -- each line is just a string. Almost **_all practical connectors_** will need schemas with more complex data formats.
334 |
335 | That being said, the MirrorSource would be a more real-world example
336 |
337 | ## HTTP Authentication
338 |
339 | [Confluent documentation covers this for Basic Auth](https://docs.confluent.io/platform/current/security/basic-auth.html#kconnect-rest-api).
340 |
341 | Create files
342 |
343 | ```shell
344 | $ cat /tmp/connect-jaas.conf
345 | KafkaConnect {
346 | org.apache.kafka.connect.rest.basic.auth.extension.PropertyFileLoginModule required
347 | file="/tmp/connect.password";
348 | };
349 | $ cat /tmp/connect.password # add as many lines as needed
350 | admin: OneCricketeer
351 | ```
352 |
353 | Add environment variables and mounts (`JAVA_TOOL_OPTIONS` comes from Eclipse Temurin base image)
354 |
355 | ```yaml
356 | environment:
357 | ...
358 | # Auth
359 | CONNECT_REST_EXTENSION_CLASSES: org.apache.kafka.connect.rest.basic.auth.extension.BasicAuthSecurityRestExtension
360 | JAVA_TOOL_OPTIONS: "-Djava.security.auth.login.config=/app/connect-jaas.conf"
361 | volumes:
362 | # Auth
363 | - /tmp/connect-jaas.conf:/app/connect-jaas.conf:ro
364 | - /tmp/connect.password:/tmp/connect.password:ro
365 | ```
366 |
367 | `docker compose up` and test it
368 |
369 | ```shell
370 | $ curl -w'\n' http://localhost:8083
371 | User cannot access the resource.
372 | $ curl -w'\n' -uadmin:OneCricketeer http://localhost:8083
373 | {"version":"4.0.0","commit":"60e845626d8a465a","kafka_cluster_id":"nA5eYC5WSrSHjaKgw1BpHg"}
374 | ```
375 |
376 | ## Maven Details
377 |
378 | The `exec:java` goal can be used to run Kafka Connect outside of Docker.
379 |
380 | To rebuild the container, for example, run `./mvnw clean install` or `make`.
381 |
382 | ## Cleanup environment
383 |
384 | ```bash
385 | docker compose rm -sf
386 | # Clean up mounted docker volumes
387 | docker volume ls | grep $(basename `pwd`) | awk '{print $2}' | xargs docker volume rm
388 | # Clean up networks
389 | docker network ls | grep $(basename `pwd`) | awk '{print $2}' | xargs docker network rm
390 | ```
391 |
392 | ## More information
393 |
394 | Learn [more about Jib](https://github.com/GoogleContainerTools/jib).
395 |
396 | Learn [more about Apache Kafka & Kafka Connect](http://kafka.apache.org/documentation).
397 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
4 | 4.0.0
5 |
6 | cricket.jmoore
7 | kafka-connect-docker
8 | 4.0.0
9 | jar
10 |
11 |
12 | UTF-8
13 |
14 | 17
15 |
16 | ${project.version}
17 |
18 | 2.16.2
19 | 12.0.15
20 | 3.1.9
21 | 1.7.36
22 | 2.24.3
23 |
24 |
25 | 5.12.2
26 | 3.27.3
27 |
28 |
29 | org.apache.kafka.connect.cli.ConnectDistributedWrapper
30 | 3.4.5
31 | eclipse-temurin:${maven.compiler.release}-jre
32 | ${baseDockerImage}
33 | cricketeerone/apache-kafka-connect
34 |
35 |
36 |
37 |
38 |
39 | org.junit
40 | junit-bom
41 | ${junit.version}
42 | pom
43 | import
44 |
45 |
46 |
47 |
48 | com.fasterxml.jackson
49 | jackson-bom
50 | ${jackson.version}
51 | pom
52 | import
53 |
54 |
55 | com.fasterxml.jackson.core
56 | jackson-databind
57 | ${jackson.version}
58 |
59 |
60 |
61 | org.eclipse.jetty
62 | jetty-bom
63 | ${jetty.version}
64 | pom
65 | import
66 |
67 |
68 |
69 | org.glassfish.jersey
70 | jersey-bom
71 | ${jersey.version}
72 | pom
73 | import
74 |
75 |
76 |
77 |
78 | org.slf4j
79 | slf4j-api
80 | ${slf4j.version}
81 |
82 |
83 | org.slf4j
84 | log4j-over-slf4j
85 | ${slf4j.version}
86 |
87 |
88 | org.apache.logging.log4j
89 | log4j-api
90 | ${log4j2.version}
91 |
92 |
93 | org.apache.logging.log4j
94 | log4j-core
95 | ${log4j2.version}
96 |
97 |
98 | org.apache.logging.log4j
99 | log4j-slf4j-impl
100 | ${log4j2.version}
101 |
102 |
103 |
104 | org.apache.kafka
105 | kafka-clients
106 | ${kafka.version}
107 |
108 |
109 |
110 |
111 | org.apache.kafka
112 | connect-api
113 | ${kafka.version}
114 |
115 |
116 | org.apache.kafka
117 | connect-runtime
118 | ${kafka.version}
119 |
120 |
121 | org.apache.kafka
122 | connect-transforms
123 | ${kafka.version}
124 |
125 |
126 | org.apache.kafka
127 | connect-json
128 | ${kafka.version}
129 |
130 |
131 | org.apache.kafka
132 | connect-basic-auth-extension
133 | ${kafka.version}
134 |
135 |
136 |
137 | org.apache.kafka
138 | connect-file
139 | ${kafka.version}
140 |
141 |
142 | org.apache.kafka
143 | connect-mirror
144 | ${kafka.version}
145 |
146 |
147 | org.apache.kafka
148 | connect-mirror-client
149 | ${kafka.version}
150 |
151 |
152 |
153 | org.assertj
154 | assertj-core
155 | ${assertj.version}
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 | org.slf4j
164 | slf4j-api
165 |
166 |
167 | org.slf4j
168 | log4j-over-slf4j
169 |
170 |
171 | org.apache.logging.log4j
172 | log4j-api
173 |
174 |
175 | org.apache.logging.log4j
176 | log4j-core
177 |
178 |
179 | org.apache.logging.log4j
180 | log4j-slf4j-impl
181 |
182 |
183 |
184 |
185 | org.apache.kafka
186 | kafka-clients
187 |
188 |
189 |
190 |
191 | org.apache.kafka
192 | connect-api
193 |
194 |
195 | org.apache.kafka
196 | connect-runtime
197 |
198 |
199 | org.apache.kafka
200 | kafka-tools
201 |
202 |
203 |
204 |
205 | org.apache.kafka
206 | connect-transforms
207 |
208 |
209 | org.apache.kafka
210 | connect-json
211 |
212 |
213 | org.apache.kafka
214 | connect-basic-auth-extension
215 |
216 |
217 |
218 | org.apache.kafka
219 | connect-file
220 |
221 |
222 | org.apache.kafka
223 | connect-mirror
224 |
225 |
226 | net.sourceforge.argparse4j
227 | argparse4j
228 |
229 |
230 |
231 |
232 | org.apache.kafka
233 | connect-mirror-client
234 |
235 |
236 |
237 | org.junit.jupiter
238 | junit-jupiter-api
239 | test
240 |
241 |
242 | org.junit.jupiter
243 | junit-jupiter-params
244 | test
245 |
246 |
247 | org.assertj
248 | assertj-core
249 | test
250 |
251 |
252 |
253 |
254 |
255 |
256 |
257 | org.apache.maven.plugins
258 | maven-resources-plugin
259 | 3.3.1
260 |
261 |
262 |
263 | com.google.cloud.tools
264 | jib-maven-plugin
265 | ${jib-maven-plugin.version}
266 |
267 |
268 | ${jib.from.image}
269 |
270 |
271 | ${jib.to.image}
272 |
273 |
274 | ${mainClass}
275 |
276 |
277 | -server
278 | -XX:+UseG1GC
279 | -XX:MaxGCPauseMillis=20
280 | -XX:InitiatingHeapOccupancyPercent=35
281 | -XX:+ExplicitGCInvokesConcurrent
282 |
283 | -XX:MaxInlineLevel=15
284 | -Djava.awt.headless=true
285 |
286 | USE_CURRENT_TIMESTAMP
287 |
288 |
289 |
290 |
291 | docker-load
292 | install
293 |
294 | dockerBuild
295 |
296 |
297 |
298 |
299 |
300 |
301 | org.codehaus.mojo
302 | exec-maven-plugin
303 | 3.5.0
304 |
305 | ${mainClass}
306 |
307 |
308 |
309 | org.apache.maven.plugins
310 | maven-surefire-plugin
311 | 3.5.3
312 |
313 |
314 | org.apache.maven.plugins
315 | maven-jar-plugin
316 | 3.4.2
317 |
318 |
319 | org.apache.maven.plugins
320 | maven-install-plugin
321 | 3.1.4
322 |
323 |
324 |
325 |
326 |
327 |
328 | org.codehaus.mojo
329 | exec-maven-plugin
330 |
331 |
332 | org.apache.maven.plugins
333 | maven-surefire-plugin
334 |
335 |
336 | org.apache.maven.plugins
337 | maven-jar-plugin
338 |
339 |
340 | default-jar
341 | none
342 |
343 |
344 |
345 |
346 | org.apache.maven.plugins
347 | maven-install-plugin
348 |
349 |
350 | default-install
351 | none
352 |
353 |
354 |
355 |
356 | com.google.cloud.tools
357 | jib-maven-plugin
358 |
359 |
360 |
361 |
362 |
363 |
364 | ubuntu
365 |
366 | true
367 |
368 |
369 |
370 |
371 | com.google.cloud.tools
372 | jib-maven-plugin
373 |
374 |
375 |
376 | latest
377 | ${project.version}
378 |
379 |
380 |
381 |
382 |
383 |
384 |
385 |
386 | alpine-temurin
387 |
388 | ${baseDockerImage}-alpine
389 |
390 |
391 |
392 |
393 | com.google.cloud.tools
394 | jib-maven-plugin
395 |
396 |
397 |
398 | alpine
399 | ${project.version}-alpine
400 |
401 |
402 |
403 |
404 |
405 |
406 |
407 |
408 | ubuntu-multi-arch
409 |
410 |
411 |
412 | com.google.cloud.tools
413 | jib-maven-plugin
414 |
415 |
416 |
417 |
418 | linux
419 | amd64
420 |
421 |
422 | linux
423 | arm64
424 |
425 |
426 |
427 |
428 |
429 |
430 |
431 |
432 |
433 | alpine-multi-arch
434 |
435 |
436 | amazoncorretto:${maven.compiler.release}-alpine
437 |
438 |
439 |
440 |
441 | com.google.cloud.tools
442 | jib-maven-plugin
443 |
444 |
445 |
446 |
447 | linux
448 | amd64
449 |
450 |
451 | linux
452 | arm64
453 |
454 |
455 |
456 |
457 |
458 | alpine
459 | ${project.version}-alpine
460 |
461 |
462 |
463 |
464 |
465 |
466 |
467 |
468 |
--------------------------------------------------------------------------------