├── .gitignore
├── README.md
├── appendix_B
├── play.yml
└── 예제
│ ├── 예제 B-1
│ └── 예제 B-2
├── appendix_C
├── cluster_zk_kafka
│ └── docker-compose.yml
└── single_zk_kafka
│ └── docker-compose.yml
├── chapter10
├── 10_commands.txt
├── python-avro-consumer.py
├── python-avro-consumer2.py
├── python-avro-consumer_v1.py
├── python-avro-consumer_v2.py
├── python-avro-producer.py
├── python-avro-producer2.py
├── python-avro-producer_v1.py
├── python-avro-producer_v2.py
└── 예제
│ ├── 예제 10-1
│ ├── 예제 10-10
│ ├── 예제 10-11
│ ├── 예제 10-12
│ ├── 예제 10-13
│ ├── 예제 10-14
│ ├── 예제 10-2
│ ├── 예제 10-3
│ ├── 예제 10-4
│ ├── 예제 10-5
│ ├── 예제 10-6
│ ├── 예제 10-7
│ ├── 예제 10-8
│ └── 예제 10-9
├── chapter11
├── 11_commands.txt
└── 예제
│ ├── 예제 11-1
│ ├── 예제 11-2
│ └── 예제 11-3
├── chapter12
├── 12_commands.txt
├── ansible_playbook
│ ├── delete-topics.sh
│ ├── es.yml
│ ├── expoter.yml
│ ├── group_vars
│ │ ├── all.yml
│ │ ├── kafkahosts.yml
│ │ └── zkhosts.yml
│ ├── hosts
│ ├── kafka3.yml
│ ├── kafka4.yml
│ ├── local-kafka.yml
│ ├── monitoring.yml
│ ├── roles
│ │ ├── common
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── docker
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── exporterall
│ │ │ ├── files
│ │ │ │ ├── jmx-exporter.service
│ │ │ │ ├── jmx_prometheus_httpserver-0.13.1-SNAPSHOT-jar-with-dependencies.jar
│ │ │ │ ├── jmx_prometheus_httpserver.yml
│ │ │ │ ├── kafka-exporter-stop.sh
│ │ │ │ └── node-exporter.service
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ └── kafka-exporter.service.j2
│ │ ├── kafka
│ │ │ ├── files
│ │ │ │ ├── confluentinc-kafka-connect-avro-converter-5.5.3.zip
│ │ │ │ ├── connect-distributed.properties
│ │ │ │ ├── jmx
│ │ │ │ ├── kafka-connect.service
│ │ │ │ └── kafka-server.service
│ │ │ ├── handlers
│ │ │ │ └── main.yml
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ └── server.properties.j2
│ │ ├── monitoring
│ │ │ ├── files
│ │ │ │ └── prometheus.yml
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── schemaregistry
│ │ │ ├── files
│ │ │ │ ├── schema-registry.properties
│ │ │ │ └── schema-registry.service
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ └── zookeeper
│ │ │ ├── files
│ │ │ └── zookeeper-server.service
│ │ │ ├── handlers
│ │ │ └── main.yml
│ │ │ ├── tasks
│ │ │ └── main.yml
│ │ │ └── templates
│ │ │ └── zoo.cfg.j2
│ ├── schema-registry.yml
│ ├── site.yml
│ ├── stop-etc.yml
│ ├── stop-kafka.yml
│ └── zookeeper.yml
├── python
│ ├── consumer-1_kafka-1_v1.py
│ ├── consumer-1_kafka-1_v2.py
│ ├── consumer_kafka-2_producer_es_v1.py
│ ├── consumer_kafka-2_producer_es_v2.py
│ ├── producer-1_kafka-1_v1.py
│ ├── producer-1_kafka-1_v2.py
│ └── producer-1_kafka-1_v3.py
└── 예제
│ ├── 예제 12-1
│ ├── 예제 12-2
│ ├── 예제 12-3
│ ├── 예제 12-4
│ ├── 예제 12-5
│ └── 예제 12-6
├── chapter2
├── 2_commands.txt
├── ansible_playbook
│ ├── group_vars
│ │ ├── all.yml
│ │ ├── kafkahosts.yml
│ │ └── zkhosts.yml
│ ├── hosts
│ ├── kafka-exporter.yml
│ ├── kafka-scaleout.yml
│ ├── kafka.yml
│ ├── kafka1.yml
│ ├── kafka2.1.yml
│ ├── kafka2.yml
│ ├── kerberos.yml
│ ├── roles
│ │ ├── common
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ └── krb5.conf.j2
│ │ ├── kafka
│ │ │ ├── files
│ │ │ │ ├── connect-distributed.properties
│ │ │ │ ├── jmx
│ │ │ │ ├── kafka-connect.service
│ │ │ │ └── kafka-server.service
│ │ │ ├── handlers
│ │ │ │ └── main.yml
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ └── server.properties.j2
│ │ ├── kafkaexporter
│ │ │ ├── files
│ │ │ │ └── kafka-exporter-stop.sh
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ └── kafka-exporter.service.j2
│ │ ├── kerberos
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ ├── kdc.conf.j2
│ │ │ │ └── krb5.conf.j2
│ │ └── zookeeper
│ │ │ ├── files
│ │ │ └── zookeeper-server.service
│ │ │ ├── handlers
│ │ │ └── main.yml
│ │ │ ├── tasks
│ │ │ └── main.yml
│ │ │ └── templates
│ │ │ └── zoo.cfg.j2
│ ├── site.yml
│ └── zookeeper.yml
└── 예제
│ └── 예제 2-1.txt
├── chapter3
├── 3_commands.txt
├── pom.xml
├── src
│ └── main
│ │ └── java
│ │ ├── ConsumerAsync.java
│ │ ├── ConsumerAuto.java
│ │ ├── ConsumerSync.java
│ │ ├── PeterProducerCallback.java
│ │ ├── ProducerAsync.java
│ │ ├── ProducerFireForgot.java
│ │ └── ProducerSync.java
└── 예제
│ ├── 예제 3-1.txt
│ ├── 예제 3-2.txt
│ ├── 예제 3-3.txt
│ ├── 예제 3-4.txt
│ ├── 예제 3-5.txt
│ ├── 예제 3-6.txt
│ └── 예제 3-7.txt
├── chapter4
└── 4_commands.txt
├── chapter5
├── 5_commands.txt
├── ExactlyOnceProducer.jar
├── pom.xml
├── src
│ └── main
│ │ └── java
│ │ └── ExactlyOnceProducer.java
└── 예제
│ ├── 예제 5-1
│ ├── 예제 5-2
│ ├── 예제 5-3
│ └── 예제 5-4
├── chapter6
├── 6_commands.txt
├── ExactlyOnceConsumer.jar
├── consumer_standard.py
├── consumer_static.py
├── pom.xml
├── producer.py
├── src
│ └── main
│ │ └── java
│ │ └── ExactlyOnceConsumer.java
└── 예제
│ ├── 예제 6-1
│ ├── 예제 6-2
│ ├── 예제 6-3
│ └── 예제 6-4
├── chapter7
├── 7_commands.txt
├── jmx-exporter.service
├── jmx_prometheus_httpserver-0.13.1-SNAPSHOT-jar-with-dependencies.jar
├── jmx_prometheus_httpserver.yml
├── kafka_metrics.json
├── node-exporter.service
├── prometheus.yml
└── 예제
│ ├── 예제 7-1
│ ├── 예제 7-2
│ ├── 예제 7-3
│ ├── 예제 7-4
│ └── 예제 7-5
├── chapter8
├── 8_commands.txt
└── 예제
│ ├── 예제 8-1
│ ├── 예제 8-2
│ ├── 예제 8-3
│ └── 예제 8-4
└── chapter9
├── 9_commands.txt
└── 예제
├── 예제 9-1
├── 예제 9-2
├── 예제 9-3
├── 예제 9-4
├── 예제 9-5
├── 예제 9-6
├── 예제 9-7
└── 예제 9-8
/.gitignore:
--------------------------------------------------------------------------------
1 | # General
2 | .DS_Store
3 | .AppleDouble
4 | .LSOverride
5 |
6 | # Byte-compiled / optimized / DLL files
7 | __pycache__/
8 | *.py[cod]
9 | *$py.class
10 |
11 | # C extensions
12 | *.so
13 |
14 | # Distribution / packaging
15 | .Python
16 | build/
17 | develop-eggs/
18 | dist/
19 | downloads/
20 | eggs/
21 | .eggs/
22 | lib/
23 | lib64/
24 | parts/
25 | sdist/
26 | var/
27 | wheels/
28 | share/python-wheels/
29 | *.egg-info/
30 | .installed.cfg
31 | *.egg
32 | MANIFEST
33 |
34 | # PyInstaller
35 | # Usually these files are written by a python script from a template
36 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
37 | *.manifest
38 | *.spec
39 |
40 | # Installer logs
41 | pip-log.txt
42 | pip-delete-this-directory.txt
43 |
44 | # Unit test / coverage reports
45 | htmlcov/
46 | .tox/
47 | .nox/
48 | .coverage
49 | .coverage.*
50 | .cache
51 | nosetests.xml
52 | coverage.xml
53 | *.cover
54 | *.py,cover
55 | .hypothesis/
56 | .pytest_cache/
57 | cover/
58 |
59 | # Translations
60 | *.mo
61 | *.pot
62 |
63 | # Django stuff:
64 | *.log
65 | local_settings.py
66 | db.sqlite3
67 | db.sqlite3-journal
68 |
69 | # Flask stuff:
70 | instance/
71 | .webassets-cache
72 |
73 | # Scrapy stuff:
74 | .scrapy
75 |
76 | # Sphinx documentation
77 | docs/_build/
78 |
79 | # PyBuilder
80 | .pybuilder/
81 | target/
82 |
83 | # Jupyter Notebook
84 | .ipynb_checkpoints
85 |
86 | # IPython
87 | profile_default/
88 | ipython_config.py
89 |
90 | # pyenv
91 | # For a library or package, you might want to ignore these files since the code is
92 | # intended to run in multiple environments; otherwise, check them in:
93 | # .python-version
94 |
95 | # pipenv
96 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
97 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
98 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
99 | # install all needed dependencies.
100 | #Pipfile.lock
101 |
102 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
103 | __pypackages__/
104 |
105 | # Celery stuff
106 | celerybeat-schedule
107 | celerybeat.pid
108 |
109 | # SageMath parsed files
110 | *.sage.py
111 |
112 | # Environments
113 | .env
114 | .venv
115 | env/
116 | venv/
117 | ENV/
118 | env.bak/
119 | venv.bak/
120 |
121 | # Spyder project settings
122 | .spyderproject
123 | .spyproject
124 |
125 | # Rope project settings
126 | .ropeproject
127 |
128 | # mkdocs documentation
129 | /site
130 |
131 | # mypy
132 | .mypy_cache/
133 | .dmypy.json
134 | dmypy.json
135 |
136 | # Pyre type checker
137 | .pyre/
138 |
139 | # pytype static type analyzer
140 | .pytype/
141 |
142 | # Cython debug symbols
143 | cython_debug/
144 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # 실전 카프카 개발부터 운영까지
4 | ## 데이터 플랫폼의 중추 아파치 카프카의 내부 동작과 개발, 운영, 보안의 모든 것
5 | ### 고승범 지음
6 | 512쪽 | 33,000원 | 2021년 10월 29일 출간 | 180x235x25 | ISBN 9791189909345
7 |
8 | 판매처 | [[교보문고]](http://www.kyobobook.co.kr/product/detailViewKor.laf?ejkGb=KOR&barcode=9791189909345&fbclid=IwAR0ArKj65Bqo7k6YrI8KtQGxOB_UIyHiOdKXWLDbumzfQ4Tjb8pWVPg3M1Q) [[YES24]](http://www.yes24.com/Product/Goods/104410708) [[알라딘]](https://www.aladin.co.kr/shop/wproduct.aspx?ItemId=281606911) [[인터파크]](http://book.interpark.com/product/BookDisplay.do?_method=detail&sc.shopNo=0000400000&sc.prdNo=354228707) + 전국 교보문고 매장
9 |
10 | #### 정오표: https://www.onlybook.co.kr/entry/kafka2-errata
11 |
12 | ### 아파치 카프카의 공동 창시자 준 라오(Jun Rao)가 추천한 책!
13 |
14 |
15 |
16 | **국내 최초이자 유일한 컨플루언트 공인 아파치 카프카 강사(Confluent Certified Trainer for Apache Kafka)와 공인 아파치 카프카 관리자(Confluent Certified Administrator for Apache Kafka) 자격**을 보유한 <카프카, 데이터 플랫폼의 최강자> 저자 고승범이 **SKT, 카카오 등 국내 최대 규모의 데이터 플랫폼상**에서 카프카를 운영하며 쌓아온 **현업 경험과 노하우**를 이 책에 모두 담아냈다.
17 |
18 | ### 대량의 데이터를 손실없이 빠르고 정확하게 처리하고 싶다면? 정답은 카프카!
19 | 풍부한 그림으로 쉽고 빠르게 이해하는 카프카의 내부 구조와 동작 방식부터 카프카 클라이언트의 기본이 되는 예제 코드와 실제 운영에서 필요한 핵심 노하우는 물론이고, 365일 안전한 카프카를 운영할 수 있는 보안과 모니터링 기법, 운영 편의성과 효율성을 극대화할 수 있는 스키마 레지스트리와 카프카 커넥트까지 카프카에 대한 모든 것을 담은 가장 완벽하고 상세한 최고의 가이드북!
20 |
21 | ### | 이 책에서 다루는 내용 |
22 | * 풍부한 그림으로 알기 쉽게 설명한 카프카 내부 구조와 동작 원리
23 | * 자바와 파이썬을 이용한 카프카 클라이언트 예제 코드
24 | * AWS와 온프레미스 환경에서의 카프카 구축과 운영
25 | * 고통을 최소화하는 카프카 업그레이드와 유지보수 방안
26 | * 아파치 카프카 기반 보안 구축 방법
27 | * 스키마 레지스트리와 카프카 커넥트의 다양한 활용
28 | * 카프카를 제대로 사용하기 위한 프로듀서/컨슈머의 내부 동작과 리밸런싱 동작 방식
29 | * 엔터프라이즈 환경에서의 카프카 아키텍처 구성 사례
30 | * 현업 전문가의 경험과 팁을 정리한 Q&A
31 |
32 | ### | 이 책의 대상 독자 |
33 | * 카프카를 배우고자 하는 초보자
34 | * 카프카를 현업에 적용하고 싶은 운영자
35 | * 카프카를 최대한 활용하기 위해 내부 동작 방식을 알고 싶은 개발자
36 | * 카프카와 카프카 에코시스템을 이해하고 활용하고자 하는 개발자
37 | * 데이터 표준화와 실시간 처리에 대해 고민하는 아키텍트
38 | * 효율적으로 데이터를 수집, 처리, 분석하기를 원하는 아키텍트
39 |
40 | 
41 |
--------------------------------------------------------------------------------
/appendix_B/play.yml:
--------------------------------------------------------------------------------
1 | - name: ansible playbook
2 | hosts:
3 | - testservers
4 | remote_user: ec2-user
5 | connection: ssh
6 | tasks:
7 | - name: Example from an Ansible Playbook(copy)
8 | copy:
9 | src: /home/ec2-user/ex.txt
10 | dest: /home/ec2-user/ex.txt
11 | mode: '0644'
12 |
13 | - name: Example from an Ansible Playbook(shell)
14 | shell:
15 | cmd: mv ex.txt ex1.txt
--------------------------------------------------------------------------------
/appendix_B/예제/예제 B-1:
--------------------------------------------------------------------------------
1 | [testservers]
2 | test01.foo.bar
3 | test02.foo.bar
--------------------------------------------------------------------------------
/appendix_B/예제/예제 B-2:
--------------------------------------------------------------------------------
1 | - name: ansible playbook
2 | hosts:
3 | - testservers
4 | remote_user: ec2-user
5 | connection: ssh
6 | tasks:
7 | - name: Example from an Ansible Playbook(copy)
8 | copy:
9 | src: /home/ec2-user/ex.txt
10 | dest: /home/ec2-user/ex.txt
11 | mode: '0644'
12 |
13 | - name: Example from an Ansible Playbook(shell)
14 | shell:
15 | cmd: mv ex.txt ex1.txt
--------------------------------------------------------------------------------
/appendix_C/cluster_zk_kafka/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.5"
2 | services:
3 | zk1:
4 | image: confluentinc/cp-zookeeper:5.5.1
5 | restart: always
6 | hostname: zk1
7 | container_name: zk1
8 | ports:
9 | - "12181:12181"
10 | environment:
11 | - ZOOKEEPER_SERVER_ID=1
12 | - ZOOKEEPER_CLIENT_PORT=12181
13 | - ZOOKEEPER_TICK_TIME=2000
14 | - ZOOKEEPER_INIT_LIMIT=5
15 | - ZOOKEEPER_SYNC_LIMIT=2
16 | - ZOOKEEPER_SERVERS=zk1:2888:3888;zk2:2888:3888;zk3:2888:3888
17 | zk2:
18 | image: confluentinc/cp-zookeeper:5.5.1
19 | restart: always
20 | hostname: zk2
21 | container_name: zk2
22 | ports:
23 | - "22181:22181"
24 | environment:
25 | - ZOOKEEPER_SERVER_ID=2
26 | - ZOOKEEPER_CLIENT_PORT=22181
27 | - ZOOKEEPER_TICK_TIME=2000
28 | - ZOOKEEPER_INIT_LIMIT=5
29 | - ZOOKEEPER_SYNC_LIMIT=2
30 | - ZOOKEEPER_SERVERS=zk1:2888:3888;zk2:2888:3888;zk3:2888:3888
31 | zk3:
32 | image: confluentinc/cp-zookeeper:5.5.1
33 | restart: always
34 | hostname: zk3
35 | container_name: zk3
36 | ports:
37 | - "32181:32181"
38 | environment:
39 | - ZOOKEEPER_SERVER_ID=3
40 | - ZOOKEEPER_CLIENT_PORT=32181
41 | - ZOOKEEPER_TICK_TIME=2000
42 | - ZOOKEEPER_INIT_LIMIT=5
43 | - ZOOKEEPER_SYNC_LIMIT=2
44 | - ZOOKEEPER_SERVERS=zk1:2888:3888;zk2:2888:3888;zk3:2888:3888
45 |
46 | kafka1:
47 | image: confluentinc/cp-kafka:5.5.1
48 | restart: always
49 | hostname: kafka1
50 | container_name: kafka1
51 | ports:
52 | - "9091:9091"
53 | - "9991:9991"
54 | environment:
55 | KAFKA_BROKER_ID: 1
56 | KAFKA_ZOOKEEPER_CONNECT: zk1:12181,zk2:22181,zk3:32181
57 | KAFKA_LISTENERS: INTERNAL://kafka1:9091
58 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka1:9091
59 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT
60 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
61 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
62 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 2
63 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 3
64 | KAFKA_JMX_PORT: 9991
65 | kafka2:
66 | image: confluentinc/cp-kafka:5.5.1
67 | restart: always
68 | hostname: kafka2
69 | container_name: kafka2
70 | ports:
71 | - "9092:9092"
72 | - "9992:9992"
73 | environment:
74 | KAFKA_BROKER_ID: 2
75 | KAFKA_ZOOKEEPER_CONNECT: zk1:12181,zk2:22181,zk3:32181
76 | KAFKA_LISTENERS: INTERNAL://kafka2:9092
77 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka2:9092
78 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT
79 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
80 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
81 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 2
82 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 3
83 | KAFKA_JMX_PORT: 9992
84 | kafka3:
85 | image: confluentinc/cp-kafka:5.5.1
86 | restart: always
87 | hostname: kafka3
88 | container_name: kafka3
89 | ports:
90 | - "9093:9093"
91 | - "9993:9993"
92 | environment:
93 | KAFKA_BROKER_ID: 3
94 | KAFKA_ZOOKEEPER_CONNECT: zk1:12181,zk2:22181,zk3:32181
95 | KAFKA_LISTENERS: INTERNAL://kafka3:9093
96 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka3:9093
97 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT
98 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
99 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
100 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 2
101 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 3
102 | KAFKA_JMX_PORT: 9993
103 |
104 | kafka_manager:
105 | image: hlebalbau/kafka-manager:stable
106 | container_name: cmak
107 | ports:
108 | - "9000:9000"
109 | environment:
110 | ZK_HOSTS: "zk1:12181,zk2:22181,zk3:32181"
111 | APPLICATION_SECRET: "random-secret"
112 | command: -Dpidfile.path=/dev/null
113 |
--------------------------------------------------------------------------------
/appendix_C/single_zk_kafka/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.5"
2 | services:
3 | zk:
4 | image: confluentinc/cp-zookeeper:5.5.1
5 | restart: always
6 | hostname: zk
7 | container_name: zk
8 | ports:
9 | - "2181:2181"
10 | environment:
11 | - ZOOKEEPER_SERVER_ID=1
12 | - ZOOKEEPER_CLIENT_PORT=2181
13 | - ZOOKEEPER_TICK_TIME=2000
14 | - ZOOKEEPER_INIT_LIMIT=5
15 | - ZOOKEEPER_SYNC_LIMIT=2
16 | - ZOOKEEPER_SERVERS=zk:2888:3888
17 |
18 | kafka:
19 | image: confluentinc/cp-kafka:5.5.1
20 | restart: always
21 | hostname: kafka
22 | container_name: kafka
23 | ports:
24 | - "9092:9092"
25 | - "9999:9999"
26 | environment:
27 | KAFKA_BROKER_ID: 1
28 | KAFKA_ZOOKEEPER_CONNECT: zk:2181
29 | KAFKA_LISTENERS: INTERNAL://kafka:9092
30 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:9092
31 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT
32 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
33 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
34 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
35 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
36 | KAFKA_JMX_PORT: 9999
37 |
38 | kafka_manager:
39 | image: hlebalbau/kafka-manager:stable
40 | container_name: cmak
41 | ports:
42 | - "9000:9000"
43 | environment:
44 | ZK_HOSTS: "zk:2181"
45 | APPLICATION_SECRET: "random-secret"
46 | command: -Dpidfile.path=/dev/null
--------------------------------------------------------------------------------
/chapter10/10_commands.txt:
--------------------------------------------------------------------------------
1 | P333
2 | cd kafka2/
3 | cd chapter2/ansible_playbook
4 | ansible-playbook -i hosts kafka.yml
5 |
6 | P334
7 | cd kafka2/chapter2/ansible_playbook
8 | ansible-playbook -i hosts site.yml
9 | sudo wget http://packages.confluent.io/archive/6.1/confluent-community-6.1.0.tar.gz -O /opt/confluent-community-6.1.0.tar.gz
10 | sudo tar zxf /opt/confluent-community-6.1.0.tar.gz -C /usr/local/
11 | sudo ln -s /usr/local/confluent-6.1.0 /usr/local/confluent
12 |
13 | P335
14 | vi /usr/local/confluent/etc/schema-registry/schema-registry.properties
15 |
16 | P336
17 | sudo vi /etc/systemd/system/schema-registry.service
18 | sudo systemctl daemon-reload
19 | sudo systemctl start schema-registry
20 | curl -X GET http://peter-kafka03.foo.bar:8081/config
21 |
22 | P340
23 | sudo yum -y install python3
24 | python3 -m venv venv10
25 | source venv10/bin/activate
26 | pip install confluent-kafka[avro]
27 | pip install urllib3==1.26.6
28 |
29 | P342
30 | python python-avro-producer.py
31 |
32 | P344
33 | python python-avro-consumer.py
34 | curl http://peter-kafka03.foo.bar:8081/schemas | python -m json.tool
35 |
36 | P347
37 | python python-avro-producer2.py
38 |
39 | P348
40 | python python-avro-consumer2.py
41 |
42 | P349
43 | python python-avro-producer2.py
44 | python python-avro-consumer2.py
45 |
46 | P350
47 | python python-avro-producer.py
48 | python python-avro-consumer2.py
49 |
50 | P351
51 | curl http://peter-kafka03.foo.bar:8081/subjects/peter-avro2-value/versions | python -m json.tool
52 |
53 | P356
54 | sudo systemctl restart schema-registry
55 | curl -X GET http://peter-kafka03.foo.bar:8081/config
56 |
57 | P358
58 | python python-avro-producer_v1.py
59 |
60 | P360
61 | python python-avro-consumer_v1.py
62 |
63 | P362
64 | python python-avro-producer_v2.py
65 |
66 | P365
67 | python python-avro-producer_v2.py
68 | python python-avro-consumer_v2.py
69 | python python-avro-producer_v1.py
70 |
71 | P366
72 | sudo systemctl stop schema-registry
73 | sudo systemctl stop kafka-server
--------------------------------------------------------------------------------
/chapter10/python-avro-consumer.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 |
5 | value_schema_str = """
6 | {"namespace": "student.avro",
7 | "type": "record",
8 | "doc": "This is an example of Avro.",
9 | "name": "Student",
10 | "fields": [
11 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
12 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
13 | ]
14 | }
15 | """
16 |
17 | value_schema = avro.loads(value_schema_str)
18 |
19 | c = AvroConsumer({
20 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
21 | 'group.id': 'python-groupid01',
22 | 'auto.offset.reset': 'earliest',
23 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
24 |
25 | c.subscribe(['peter-avro2'])
26 |
27 | while True:
28 | try:
29 | msg = c.poll(10)
30 |
31 | except SerializerError as e:
32 | print("Message deserialization failed for {}: {}".format(msg, e))
33 | break
34 |
35 | if msg is None:
36 | continue
37 |
38 | if msg.error():
39 | print("AvroConsumer error: {}".format(msg.error()))
40 | continue
41 |
42 | print(msg.value())
43 |
44 | c.close()
--------------------------------------------------------------------------------
/chapter10/python-avro-consumer2.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 |
5 | value_schema_str = """
6 | {"namespace": "student.avro",
7 | "type": "record",
8 | "doc": "This is an example of Avro.",
9 | "name": "Student",
10 | "fields": [
11 | {"name": "first_name", "type": ["null", "string"], "default": null, "doc": "First name of the student"},
12 | {"name": "last_name", "type": ["null", "string"], "default": null, "doc": "Last name of the student"},
13 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
14 | ]
15 | }
16 | """
17 |
18 | value_schema = avro.loads(value_schema_str)
19 |
20 | c = AvroConsumer({
21 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
22 | 'group.id': 'python-groupid02',
23 | 'auto.offset.reset': 'earliest',
24 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
25 |
26 | c.subscribe(['peter-avro2'])
27 |
28 | while True:
29 | try:
30 | msg = c.poll(10)
31 |
32 | except SerializerError as e:
33 | print("Message deserialization failed for {}: {}".format(msg, e))
34 | break
35 |
36 | if msg is None:
37 | continue
38 |
39 | if msg.error():
40 | print("AvroConsumer error: {}".format(msg.error()))
41 | continue
42 |
43 | print(msg.value())
44 |
45 | c.close()
--------------------------------------------------------------------------------
/chapter10/python-avro-consumer_v1.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 |
5 | value_schema_str = """
6 | {"namespace": "student.avro",
7 | "type": "record",
8 | "doc": "This is an example of Avro.",
9 | "name": "Student",
10 | "fields": [
11 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
12 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
13 | ]
14 | }
15 | """
16 |
17 | value_schema = avro.loads(value_schema_str)
18 |
19 | c = AvroConsumer({
20 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
21 | 'group.id': 'python-groupid01',
22 | 'auto.offset.reset': 'earliest',
23 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
24 |
25 | c.subscribe(['peter-avro3'])
26 |
27 | while True:
28 | try:
29 | msg = c.poll(10)
30 |
31 | except SerializerError as e:
32 | print("Message deserialization failed for {}: {}".format(msg, e))
33 | break
34 |
35 | if msg is None:
36 | continue
37 |
38 | if msg.error():
39 | print("AvroConsumer error: {}".format(msg.error()))
40 | continue
41 |
42 | print(msg.value())
43 |
44 | c.close()
--------------------------------------------------------------------------------
/chapter10/python-avro-consumer_v2.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 |
5 | value_schema_str = """
6 | {"namespace": "student.avro",
7 | "type": "record",
8 | "doc": "This is an example of Avro.",
9 | "name": "Student",
10 | "fields": [
11 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
12 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"},
13 | {"name": "age", "type": "int", "default": 1, "doc": "Age of the student"}
14 | ]
15 | }
16 | """
17 |
18 | value_schema = avro.loads(value_schema_str)
19 |
20 | c = AvroConsumer({
21 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
22 | 'group.id': 'python-groupid01',
23 | 'auto.offset.reset': 'earliest',
24 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
25 |
26 | c.subscribe(['peter-avro3'])
27 |
28 | while True:
29 | try:
30 | msg = c.poll(10)
31 |
32 | except SerializerError as e:
33 | print("Message deserialization failed for {}: {}".format(msg, e))
34 | break
35 |
36 | if msg is None:
37 | continue
38 |
39 | if msg.error():
40 | print("AvroConsumer error: {}".format(msg.error()))
41 | continue
42 |
43 | print(msg.value())
44 |
45 | c.close()
--------------------------------------------------------------------------------
/chapter10/python-avro-producer.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroProducer
3 |
4 | value_schema_str = """
5 | {"namespace": "student.avro",
6 | "type": "record",
7 | "doc": "This is an example of Avro.",
8 | "name": "Student",
9 | "fields": [
10 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
11 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
12 | ]
13 | }
14 | """
15 |
16 | value_schema = avro.loads(value_schema_str)
17 | value = {"name": "Peter", "class": 1} # 전송할 메시지
18 |
19 | def delivery_report(err, msg):
20 | """ Called once for each message produced to indicate delivery result.
21 | Triggered by poll() or flush(). """
22 | if err is not None:
23 | print('Message delivery failed: {}'.format(err))
24 | else:
25 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
26 |
27 | avroProducer = AvroProducer({
28 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
29 | 'on_delivery': delivery_report,
30 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'
31 | }, default_value_schema=value_schema)
32 |
33 | avroProducer.produce(topic='peter-avro2', value=value)
34 | avroProducer.flush()
--------------------------------------------------------------------------------
/chapter10/python-avro-producer2.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroProducer
3 |
4 | value_schema_str = """
5 | {"namespace": "student.avro",
6 | "type": "record",
7 | "doc": "This is an example of Avro.",
8 | "name": "Student",
9 | "fields": [
10 | {"name": "first_name", "type": ["null", "string"], "default": null, "doc": "First name of the student"},
11 | {"name": "last_name", "type": ["null", "string"], "default": null, "doc": "Last name of the student"},
12 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
13 | ]
14 | }
15 | """
16 |
17 | value_schema = avro.loads(value_schema_str)
18 | value = {"first_name": "Peter", "last_name": "Parker", "class": 1} # 전송할 메시지
19 |
20 | def delivery_report(err, msg):
21 | """ Called once for each message produced to indicate delivery result.
22 | Triggered by poll() or flush(). """
23 | if err is not None:
24 | print('Message delivery failed: {}'.format(err))
25 | else:
26 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
27 |
28 | avroProducer = AvroProducer({
29 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
30 | 'on_delivery': delivery_report,
31 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'
32 | }, default_value_schema=value_schema)
33 |
34 | avroProducer.produce(topic='peter-avro2', value=value)
35 | avroProducer.flush()
--------------------------------------------------------------------------------
/chapter10/python-avro-producer_v1.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroProducer
3 |
4 | value_schema_str = """
5 | {"namespace": "student.avro",
6 | "type": "record",
7 | "doc": "This is an example of Avro.",
8 | "name": "Student",
9 | "fields": [
10 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
11 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
12 | ]
13 | }
14 | """
15 |
16 | value_schema = avro.loads(value_schema_str)
17 | value = {"name": "Peter", "class": 1} # 전송할 메시지
18 |
19 | def delivery_report(err, msg):
20 | """ Called once for each message produced to indicate delivery result.
21 | Triggered by poll() or flush(). """
22 | if err is not None:
23 | print('Message delivery failed: {}'.format(err))
24 | else:
25 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
26 |
27 | avroProducer = AvroProducer({
28 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
29 | 'on_delivery': delivery_report,
30 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'
31 | }, default_value_schema=value_schema)
32 |
33 | avroProducer.produce(topic='peter-avro3', value=value)
34 | avroProducer.flush()
--------------------------------------------------------------------------------
/chapter10/python-avro-producer_v2.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroProducer
3 |
4 | value_schema_str = """
5 | {"namespace": "student.avro",
6 | "type": "record",
7 | "doc": "This is an example of Avro.",
8 | "name": "Student",
9 | "fields": [
10 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
11 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"},
12 | {"name": "age", "type": "int", "default": 1, "doc": "Age of the student"}
13 | ]
14 | }
15 | """
16 |
17 | value_schema = avro.loads(value_schema_str)
18 | value = {"name": "Peter", "class": 1, "age": 2} # 전송할 메시지
19 |
20 | def delivery_report(err, msg):
21 | """ Called once for each message produced to indicate delivery result.
22 | Triggered by poll() or flush(). """
23 | if err is not None:
24 | print('Message delivery failed: {}'.format(err))
25 | else:
26 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
27 |
28 | avroProducer = AvroProducer({
29 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
30 | 'on_delivery': delivery_report,
31 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'
32 | }, default_value_schema=value_schema)
33 |
34 | avroProducer.produce(topic='peter-avro3', value=value)
35 | avroProducer.flush()
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-1:
--------------------------------------------------------------------------------
1 | {"namespace": "student.avro",
2 | "type": "record",
3 | "doc": "This is an example of Avro.",
4 | "name": "Student",
5 | "fields": [
6 | {"name": "name", "type": "string", "doc": "Name of the student"},
7 | {"name": "class", "type": "int", "doc": "Class of the student"}
8 | ]
9 | }
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-10:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroProducer
3 |
4 | value_schema_str = """
5 | {"namespace": "student.avro",
6 | "type": "record",
7 | "doc": "This is an example of Avro.",
8 | "name": "Student",
9 | "fields": [
10 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
11 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
12 | ]
13 | }
14 | """
15 |
16 | value_schema = avro.loads(value_schema_str)
17 | value = {"name": "Peter", "class": 1} # 전송할 메시지
18 |
19 | def delivery_report(err, msg):
20 | """ Called once for each message produced to indicate delivery result.
21 | Triggered by poll() or flush(). """
22 | if err is not None:
23 | print('Message delivery failed: {}'.format(err))
24 | else:
25 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
26 |
27 | avroProducer = AvroProducer({
28 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
29 | 'on_delivery': delivery_report,
30 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'
31 | }, default_value_schema=value_schema)
32 |
33 | avroProducer.produce(topic='peter-avro3', value=value)
34 | avroProducer.flush()
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-11:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 |
5 | value_schema_str = """
6 | {"namespace": "student.avro",
7 | "type": "record",
8 | "doc": "This is an example of Avro.",
9 | "name": "Student",
10 | "fields": [
11 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
12 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
13 | ]
14 | }
15 | """
16 |
17 | value_schema = avro.loads(value_schema_str)
18 |
19 | c = AvroConsumer({
20 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
21 | 'group.id': 'python-groupid01',
22 | 'auto.offset.reset': 'earliest',
23 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
24 |
25 | c.subscribe(['peter-avro3'])
26 |
27 | while True:
28 | try:
29 | msg = c.poll(10)
30 |
31 | except SerializerError as e:
32 | print("Message deserialization failed for {}: {}".format(msg, e))
33 | break
34 |
35 | if msg is None:
36 | continue
37 |
38 | if msg.error():
39 | print("AvroConsumer error: {}".format(msg.error()))
40 | continue
41 |
42 | print(msg.value())
43 |
44 | c.close()
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-12:
--------------------------------------------------------------------------------
1 | {"namespace": "student.avro",
2 | "type": "record",
3 | "doc": "This is an example of Avro.",
4 | "name": "Student",
5 | "fields": [
6 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
7 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"},
8 | {"name": "age", "type": "int", "default": 1, "doc": "Age of the student"}
9 | ]
10 | }
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-13:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroProducer
3 |
4 | value_schema_str = """
5 | {"namespace": "student.avro",
6 | "type": "record",
7 | "doc": "This is an example of Avro.",
8 | "name": "Student",
9 | "fields": [
10 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
11 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"},
12 | {"name": "age", "type": "int", "default": 1, "doc": "Age of the student"}
13 | ]
14 | }
15 | """
16 |
17 | value_schema = avro.loads(value_schema_str)
18 | value = {"name": "Peter", "class": 1, "age": 2} # 전송할 메시지
19 |
20 | def delivery_report(err, msg):
21 | """ Called once for each message produced to indicate delivery result.
22 | Triggered by poll() or flush(). """
23 | if err is not None:
24 | print('Message delivery failed: {}'.format(err))
25 | else:
26 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
27 |
28 | avroProducer = AvroProducer({
29 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
30 | 'on_delivery': delivery_report,
31 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'
32 | }, default_value_schema=value_schema)
33 |
34 | avroProducer.produce(topic='peter-avro3', value=value)
35 | avroProducer.flush()
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-14:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 |
5 | value_schema_str = """
6 | {"namespace": "student.avro",
7 | "type": "record",
8 | "doc": "This is an example of Avro.",
9 | "name": "Student",
10 | "fields": [
11 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
12 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"},
13 | {"name": "age", "type": "int", "default": 1, "doc": "Age of the student"}
14 | ]
15 | }
16 | """
17 |
18 | value_schema = avro.loads(value_schema_str)
19 |
20 | c = AvroConsumer({
21 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
22 | 'group.id': 'python-groupid01',
23 | 'auto.offset.reset': 'earliest',
24 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
25 |
26 | c.subscribe(['peter-avro3'])
27 |
28 | while True:
29 | try:
30 | msg = c.poll(10)
31 |
32 | except SerializerError as e:
33 | print("Message deserialization failed for {}: {}".format(msg, e))
34 | break
35 |
36 | if msg is None:
37 | continue
38 |
39 | if msg.error():
40 | print("AvroConsumer error: {}".format(msg.error()))
41 | continue
42 |
43 | print(msg.value())
44 |
45 | c.close()
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-2:
--------------------------------------------------------------------------------
1 | listeners=http://0.0.0.0:8081
2 | kafkastore.bootstrap.servers=PLAINTEXT://peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092
3 | kafkastore.topic=_schemas
4 | schema.compatibility.level=full
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-3:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=schema registry
3 | After=network.target
4 |
5 | [Service]
6 | Type=simple
7 | ExecStart=/usr/local/confluent/bin/schema-registry-start /usr/local/confluent/etc/schema-registry/schema-registry.properties
8 | Restart=always
9 |
10 | [Install]
11 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-4:
--------------------------------------------------------------------------------
1 | #https://github.com/confluentinc/confluent-kafka-python
2 | from confluent_kafka import avro
3 | from confluent_kafka.avro import AvroProducer
4 |
5 | value_schema_str = """
6 | {"namespace": "student.avro",
7 | "type": "record",
8 | "doc": "This is an example of Avro.",
9 | "name": "Student",
10 | "fields": [
11 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
12 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
13 | ]
14 | }
15 | """
16 |
17 | value_schema = avro.loads(value_schema_str)
18 | value = {"name": "Peter", "class": 1} # 전송할 메시지
19 |
20 | def delivery_report(err, msg):
21 | """ Called once for each message produced to indicate delivery result.
22 | Triggered by poll() or flush(). """
23 | if err is not None:
24 | print('Message delivery failed: {}'.format(err))
25 | else:
26 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
27 |
28 | avroProducer = AvroProducer({
29 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
30 | 'on_delivery': delivery_report,
31 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'
32 | }, default_value_schema=value_schema)
33 |
34 | avroProducer.produce(topic='peter-avro2', value=value)
35 | avroProducer.flush()
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-5:
--------------------------------------------------------------------------------
1 | #https://github.com/confluentinc/confluent-kafka-python
2 | from confluent_kafka import avro
3 | from confluent_kafka.avro import AvroConsumer
4 | from confluent_kafka.avro.serializer import SerializerError
5 |
6 | value_schema_str = """
7 | {"namespace": "student.avro",
8 | "type": "record",
9 | "doc": "This is an example of Avro.",
10 | "name": "Student",
11 | "fields": [
12 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
13 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
14 | ]
15 | }
16 | """
17 |
18 | value_schema = avro.loads(value_schema_str)
19 |
20 | c = AvroConsumer({
21 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
22 | 'group.id': 'python-groupid01',
23 | 'auto.offset.reset': 'earliest',
24 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
25 |
26 | c.subscribe(['peter-avro2'])
27 |
28 | while True:
29 | try:
30 | msg = c.poll(10)
31 |
32 | except SerializerError as e:
33 | print("Message deserialization failed for {}: {}".format(msg, e))
34 | break
35 |
36 | if msg is None:
37 | continue
38 |
39 | if msg.error():
40 | print("AvroConsumer error: {}".format(msg.error()))
41 | continue
42 |
43 | print(msg.value())
44 |
45 | c.close()
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-6:
--------------------------------------------------------------------------------
1 | #https://github.com/confluentinc/confluent-kafka-python
2 | from confluent_kafka import avro
3 | from confluent_kafka.avro import AvroProducer
4 |
5 | value_schema_str = """
6 | {"namespace": "student.avro",
7 | "type": "record",
8 | "doc": "This is an example of Avro.",
9 | "name": "Student",
10 | "fields": [
11 | {"name": "first_name", "type": ["null", "string"], "default": null, "doc": "First name of the student"},
12 | {"name": "last_name", "type": ["null", "string"], "default": null, "doc": "Last name of the student"},
13 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
14 | ]
15 | }
16 | """
17 |
18 | value_schema = avro.loads(value_schema_str)
19 | value = {"first_name": "Peter", "last_name": "Parker", "class": 1} # 전송할 메시지
20 |
21 | def delivery_report(err, msg):
22 | """ Called once for each message produced to indicate delivery result.
23 | Triggered by poll() or flush(). """
24 | if err is not None:
25 | print('Message delivery failed: {}'.format(err))
26 | else:
27 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
28 |
29 | avroProducer = AvroProducer({
30 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
31 | 'on_delivery': delivery_report,
32 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'
33 | }, default_value_schema=value_schema)
34 |
35 | avroProducer.produce(topic='peter-avro2', value=value)
36 | avroProducer.flush()
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-7:
--------------------------------------------------------------------------------
1 | #https://github.com/confluentinc/confluent-kafka-python
2 | from confluent_kafka import avro
3 | from confluent_kafka.avro import AvroConsumer
4 | from confluent_kafka.avro.serializer import SerializerError
5 |
6 | value_schema_str = """
7 | {"namespace": "student.avro",
8 | "type": "record",
9 | "doc": "This is an example of Avro.",
10 | "name": "Student",
11 | "fields": [
12 | {"name": "first_name", "type": ["null", "string"], "default": null, "doc": "First name of the student"},
13 | {"name": "last_name", "type": ["null", "string"], "default": null, "doc": "Last name of the student"},
14 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
15 | ]
16 | }
17 | """
18 |
19 | value_schema = avro.loads(value_schema_str)
20 |
21 | c = AvroConsumer({
22 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
23 | 'group.id': 'python-groupid02',
24 | 'auto.offset.reset': 'earliest',
25 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
26 |
27 | c.subscribe(['peter-avro2'])
28 |
29 | while True:
30 | try:
31 | msg = c.poll(10)
32 |
33 | except SerializerError as e:
34 | print("Message deserialization failed for {}: {}".format(msg, e))
35 | break
36 |
37 | if msg is None:
38 | continue
39 |
40 | if msg.error():
41 | print("AvroConsumer error: {}".format(msg.error()))
42 | continue
43 |
44 | print(msg.value())
45 |
46 | c.close()
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-8:
--------------------------------------------------------------------------------
1 | listeners=http://0.0.0.0:8081
2 | kafkastore.bootstrap.servers=PLAINTEXT://peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092
3 | kafkastore.topic=_schemas
4 | schema.compatibility.level=forward
--------------------------------------------------------------------------------
/chapter10/예제/예제 10-9:
--------------------------------------------------------------------------------
1 | {"namespace": "student.avro",
2 | "type": "record",
3 | "doc": "This is an example of Avro.",
4 | "name": "Student",
5 | "fields": [
6 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
7 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
8 | ]
9 | }
--------------------------------------------------------------------------------
/chapter11/11_commands.txt:
--------------------------------------------------------------------------------
1 | P371
2 | cd kafka2/
3 | cd chapter2/ansible_playbook
4 | ansible-playbook -i hosts kafka1.yml
5 |
6 | P372
7 | echo "hello-1" > test.txt
8 | echo "hello-2" >> test.txt
9 | echo "hello-3" >> test.txt
10 | cat test.txt
11 |
12 | P373
13 | sudo vi /usr/local/kafka/config/connect-file-source.properties
14 | sudo vi /usr/local/kafka/config/connect-standalone.properties
15 |
16 | P375
17 | sudo /usr/local/kafka/bin/connect-standalone.sh -daemon /usr/local/kafka/config/connect-standalone.properties /usr/local/kafka/config/connect-file-source.properties
18 | curl http://localhost:8083/connectors/local-file-source | python -m json.tool
19 |
20 | P376
21 | curl --header "Content-Type: application/json" --header "Accept: application/json" --request PUT --data '{ "name":"local-file-sink", "connector.class":"FileStreamSink", "tasks.max":"1", "file":"/home/ec2-user/test.sink.txt", "topics":"connect-test"}' http://localhost:8083/connectors/local-file-sink/config
22 | curl http://localhost:8083/connectors/local-file-sink | python -m json.tool
23 |
24 | P377
25 | cat test.sink.txt
26 | sudo pkill -f connect
27 |
28 | P381
29 | sudo cat /usr/local/kafka/config/connect-distributed.properties
30 |
31 | P382
32 | sudo systemctl start kafka-connect
33 | sudo systemctl status kafka-connect
34 |
35 | P387
36 | cd kafka2/
37 | cd chapter2/ansible_playbook
38 | ansible-playbook -i hosts kafka2.yml
39 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-zk01.foo.bar:9092 --create --topic peter-mirror01 --partitions 1 --replication-factor 3
40 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-zk01.foo.bar:9092 --topic peter-mirror01
41 | curl --header "Content-Type: application/json" --header "Accept: application/json" --request PUT --data '{"name": "peter-mirrormaker2","connector.class": "org.apache.kafka.connect.mirror.MirrorSourceConnector","tasks.max": "1","source.cluster.alias": "src","target.cluster.alias": "dst","source.cluster.bootstrap.servers": "peter-zk01.foo.bar:9092,peter-zk02.foo.bar:9092,peter-zk03.foo.bar:9092","target.cluster.bootstrap.servers": "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092","replication.factor": "3","topics": ".*" }' http://peter-kafka01.foo.bar:8083/connectors/peter-mirrormaker2/config
42 |
43 | P388
44 | curl http://peter-kafka01.foo.bar:8083/connectors/peter-mirrormaker2/status | python -m json.tool
45 |
46 | P389
47 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --list
48 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic src.peter-mirror01 --from-beginning
49 | sudo systemctl stop kafka-connect
50 | curl http://peter-kafka02.foo.bar:8083/connectors/peter-mirrormaker2/status | python -m json.tool
51 |
52 | P390
53 | curl http://peter-kafka02.foo.bar:8083/connectors/peter-mirrormaker2/status | python -m json.tool
54 |
55 | P391
56 | sudo systemctl stop kafka-connect
57 | sudo systemctl stop kafka-server
--------------------------------------------------------------------------------
/chapter11/예제/예제 11-1:
--------------------------------------------------------------------------------
1 | name=local-file-source
2 | connector.class=FileStreamSource
3 | tasks.max=1
4 | file=/home/ec2-user/test.txt
5 | topic=connect-test
--------------------------------------------------------------------------------
/chapter11/예제/예제 11-2:
--------------------------------------------------------------------------------
1 | bootstrap.servers=localhost:9092
2 | key.converter=org.apache.kafka.connect.json.JsonConverter
3 | value.converter=org.apache.kafka.connect.json.JsonConverter
4 | key.converter.schemas.enable=false
5 | value.converter.schemas.enable=false
6 | offset.storage.file.filename=/tmp/connect.offsets
7 | offset.flush.interval.ms=10000
--------------------------------------------------------------------------------
/chapter11/예제/예제 11-3:
--------------------------------------------------------------------------------
1 | bootstrap.servers=peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092
2 | group.id=peter-connect-cluster
3 | key.converter=org.apache.kafka.connect.converters.ByteArrayConverter
4 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter
5 | key.converter.schemas.enable=false
6 | value.converter.schemas.enable=false
7 | offset.storage.topic=connect-offsets
8 | offset.storage.replication.factor=3
9 | offset.storage.partitions=25
10 | config.storage.topic=connect-configs
11 | config.storage.replication.factor=3
12 | config.storage.partitions=1
13 | status.storage.topic=connect-status
14 | status.storage.replication.factor=3
15 | status.storage.partitions=5
16 | offset.flush.interval.ms=10000
--------------------------------------------------------------------------------
/chapter12/12_commands.txt:
--------------------------------------------------------------------------------
1 | P400
2 | cd kafka2/
3 | cd chapter12/ansible_playbook
4 | ansible-playbook -i hosts site.yml
5 | ansible-playbook -i hosts expoter.yml
6 | ansible-playbook -i hosts monitoring.yml
7 |
8 | P404
9 | curl http://peter-zk01.foo.bar:8083/connectors
10 | curl --header "Content-Type: application/json" --header "Accept: application/json" --request PUT --data '{"name": "peter-mirrormaker2","connector.class": "org.apache.kafka.connect.mirror.MirrorSourceConnector","tasks.max": "1","source.cluster.alias": "src","target.cluster.alias": "dst","source.cluster.bootstrap.servers": "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092","target.cluster.bootstrap.servers": "peter-zk01.foo.bar:9092,peter-zk02.foo.bar:9092,peter-zk03.foo.bar:9092","replication.factor": "3","topics": "peter-avro01-kafka1" }' http://peter-zk01.foo.bar:8083/connectors/peter-mirrormaker2/config
11 |
12 | P405
13 | curl http://peter-zk01.foo.bar:8083/connectors/peter-mirrormaker2/status | python -m json.tool
14 |
15 | P409
16 | git clone https://github.com/onlybooks/kafka2.git
17 | sudo yum -y install python3
18 | python3 -m venv venv12
19 | source venv12/bin/activate
20 | pip install confluent-kafka[avro]
21 | pip install names
22 | pip install elasticsearch
23 |
24 | P410
25 | python kafka2/chapter12/python/producer-1_kafka-1_v1.py
26 |
27 | P412
28 | python kafka2/chapter12/python/consumer-1_kafka-1_v1.py
29 |
30 | P413
31 | curl -X GET 'http://peter-kafka02.foo.bar:9200/_cat/health?v'
32 | curl -X GET 'http://peter-kafka02.foo.bar:9200/_cat/indices?v'
33 |
34 | P416
35 | python kafka2/chapter12/python/consumer_kafka-2_producer_es_v1.py
36 |
37 | P417
38 | curl -X GET 'http://peter-kafka02.foo.bar:9200/_cat/indices?v'
39 |
40 | P426
41 | python kafka2/chapter12/python/consumer-1_kafka-1_v2.py
42 |
43 | P428
44 | python kafka2/chapter12/python/producer-1_kafka-1_v2.py
45 |
46 | P430
47 | python kafka2/chapter12/python/consumer_kafka-2_producer_es_v2.py
48 |
49 | P431
50 | curl http://peter-kafka03.foo.bar:8081/schemas | python -m json.tool
51 |
52 | P432
53 | curl -X GET 'http://peter-kafka02.foo.bar:9200/_cat/indices?v'
54 |
55 | P433
56 | python kafka2/chapter12/python/producer-1_kafka-1_v2.py
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/delete-topics.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | KAFKAPATH="/home/ec2-user/kafka_2.12-2.6.0/bin/kafka-topics.sh"
4 | TOPICS=`${KAFKAPATH} --zookeeper peter-zk01.foo.bar:2181/$1 --list`
5 |
6 | for topic in $TOPICS
7 | do
8 | if [ "${topic}" != "__consumer_offsets" ]; then
9 | ${KAFKAPATH} --zookeeper peter-zk01.foo.bar:2181/$1 --delete --topic ${topic}
10 | fi
11 | done
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/es.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: elasticsearch
3 | become: true
4 | connection: ssh
5 | roles:
6 | - docker
7 | tasks:
8 | - name: start elasticsearch container
9 | docker_container:
10 | name: elasticsearch
11 | image: docker.elastic.co/elasticsearch/elasticsearch:7.12.1
12 | network_mode: host
13 | env:
14 | discovery.type: single-node
15 | ES_JAVA_OPTS: "-Xms1024m -Xmx1024m"
16 |
17 | - name: start kibana container
18 | docker_container:
19 | name: kibana
20 | image: docker.elastic.co/kibana/kibana:7.12.1
21 | network_mode: host
22 | env:
23 | ELASTICSEARCH_HOSTS: "http://peter-kafka02.foo.bar:9200"
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/expoter.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts:
3 | - zkhosts
4 | - kafkahosts
5 | become: true
6 | connection: ssh
7 | roles:
8 | - exporterall
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/group_vars/all.yml:
--------------------------------------------------------------------------------
1 | user_name: ec2-user
2 | kafkaversion: 2.6.0
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/group_vars/kafkahosts.yml:
--------------------------------------------------------------------------------
1 | brokerid: "{{ inventory_hostname | regex_search('(peter-kafka0[1-9]\\.foo\\.bar)') | regex_replace('\\.foo\\.bar', '') | regex_replace('^peter-kafka0', '') }}"
2 | zookeeperinfo: peter-zk01.foo.bar:2181,peter-zk02.foo.bar:2181,peter-zk03.foo.bar:2181
3 | dir_path: /data/kafka-logs
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/group_vars/zkhosts.yml:
--------------------------------------------------------------------------------
1 | zookeeperversion: zookeeper-3.5.9
2 | myid: "{{ inventory_hostname | regex_search('(peter-zk0[0-9]\\.foo\\.bar)') | regex_replace('\\.foo\\.bar', '') | regex_replace('^peter-zk0', '') }}"
3 | dir_path: /data/zk
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/hosts:
--------------------------------------------------------------------------------
1 | [zkhosts]
2 | peter-zk01.foo.bar
3 | peter-zk02.foo.bar
4 | peter-zk03.foo.bar
5 |
6 | [kafkahosts]
7 | peter-kafka01.foo.bar
8 | peter-kafka02.foo.bar
9 | peter-kafka03.foo.bar
10 |
11 | [schemaregistry]
12 | peter-kafka03.foo.bar
13 |
14 | [elasticsearch]
15 | peter-kafka02.foo.bar
16 |
17 | [monitoring]
18 | peter-kafka01.foo.bar
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/kafka3.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: kafkahosts
3 | become: true
4 | connection: ssh
5 | vars:
6 | - zookeeperinfo: peter-zk01.foo.bar:2181,peter-zk02.foo.bar:2181,peter-zk03.foo.bar:2181/kafka3
7 | - dir_path: /data/kafka3-logs
8 | roles:
9 | - kafka
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/kafka4.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: zkhosts
3 | become: true
4 | connection: ssh
5 | vars:
6 | - brokerid: "{{ inventory_hostname | regex_search('(peter-zk0[1-9]\\.foo\\.bar)') | regex_replace('\\.foo\\.bar', '') | regex_replace('^peter-zk0', '') }}"
7 | - zookeeperinfo: peter-zk01.foo.bar:2181,peter-zk02.foo.bar:2181,peter-zk03.foo.bar:2181/kafka4
8 | - dir_path: /data/kafka4-logs
9 | roles:
10 | - kafka
11 | tasks:
12 | - name: copy kafka conf file
13 | copy:
14 | src: roles/kafka/files/confluentinc-kafka-connect-avro-converter-5.5.3.zip
15 | dest: /opt
16 | mode: '0644'
17 | backup: no
18 |
19 | - name: unarchive avro converter
20 | unarchive:
21 | src: /opt/confluentinc-kafka-connect-avro-converter-5.5.3.zip
22 | dest: /usr/local/kafka
23 | remote_src: yes
24 |
25 | - name: make sure a service is running
26 | systemd:
27 | state: started
28 | name: kafka-connect
29 | enabled: yes
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/local-kafka.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: 127.0.0.1
3 | connection: localhost
4 | tasks:
5 | - name: download kafka from web
6 | get_url:
7 | url: https://archive.apache.org/dist/kafka/{{ kafkaversion }}/kafka_2.12-{{ kafkaversion }}.tgz
8 | dest: /home/{{ user_name }}
9 | mode: '0600'
10 |
11 | - name: unarchive kafka
12 | unarchive:
13 | src: /home/{{ user_name }}/kafka_2.12-{{ kafkaversion }}.tgz
14 | dest: /home/{{ user_name }}
15 |
16 | - name: check delete-topics.sh exists
17 | stat:
18 | path: /home/{{ user_name }}/kafka2/chapter12/ansible_playbook/delete-topics.sh
19 | register: stat_result
20 |
21 | - name : print a deub message
22 | debug:
23 | msg: "check /home/{{ user_name }}/kafka2/chapter12/ansible_playbook/delete-topics.sh"
24 | when: stat_result.exists is not defined
25 |
26 | - name: run a script with arguments
27 | script:
28 | cmd: "/home/{{ user_name }}/kafka2/chapter12/ansible_playbook/delete-topics.sh {{ item }}"
29 | with_items:
30 | - kafka3
31 | - kafka4
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/monitoring.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: monitoring
3 | become: true
4 | connection: ssh
5 | roles:
6 | - docker
7 | - monitoring
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set timezone to Asia/Seoul
3 | timezone:
4 | name: Asia/Seoul
5 |
6 | - name: install Java and tools
7 | yum:
8 | name: ['dstat', 'java-1.8.0-openjdk', 'java-1.8.0-openjdk-devel', 'krb5-workstation', 'git', 'python-pip']
9 | state: latest
10 |
11 | - name: install kazoo python package
12 | pip:
13 | name: kazoo
14 | extra_args: --user
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/docker/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install docker
3 | yum:
4 | name: ['docker', 'python-pip']
5 | state: latest
6 |
7 | - name: install docker python package
8 | pip:
9 | name: docker
10 | extra_args: --user
11 |
12 | - name: make sure a service is running
13 | systemd:
14 | state: started
15 | name: docker
16 | enabled: True
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/exporterall/files/jmx-exporter.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=JMX Exporter for Kafka
3 | After=kafka-server.target
4 |
5 | [Service]
6 | Type=simple
7 | Restart=always
8 | ExecStart=/usr/bin/java -jar /usr/local/jmx/jmx_prometheus_httpserver-0.13.1-SNAPSHOT-jar-with-dependencies.jar 7071 /usr/local/jmx/jmx_prometheus_httpserver.yml
9 |
10 | [Install]
11 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/exporterall/files/jmx_prometheus_httpserver-0.13.1-SNAPSHOT-jar-with-dependencies.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/onlybooks/kafka2/6aeabf38dc3bceb95786a23df67f026926fe555a/chapter12/ansible_playbook/roles/exporterall/files/jmx_prometheus_httpserver-0.13.1-SNAPSHOT-jar-with-dependencies.jar
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/exporterall/files/jmx_prometheus_httpserver.yml:
--------------------------------------------------------------------------------
1 | hostPort: 127.0.0.1:9999
2 | ssl: false
3 | lowercaseOutputName: false
4 | lowercaseOutputLabelNames: false
5 | rules:
6 | - pattern: ".*"
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/exporterall/files/kafka-exporter-stop.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | PIDS=$(ps ax | grep -i 'kafka\_exporter' | awk '{print $1}')
3 |
4 | if [ -z "$PIDS" ]; then
5 | echo "No kafka_exporter stop"
6 | exit 1
7 | else
8 | kill -s TERM $PIDS
9 | fi
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/exporterall/files/node-exporter.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Node Exporter
3 | After=network-online.target
4 |
5 | [Service]
6 | Type=simple
7 | ExecStart=/usr/local/node_exporter/node_exporter
8 |
9 | [Install]
10 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/exporterall/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: stop services
3 | systemd:
4 | state: stopped
5 | name: "{{ item }}"
6 | with_items:
7 | - kafka-exporter
8 | - node-exporter
9 | - jmx-exporter
10 | ignore_errors: yes
11 |
12 | - name: Downlad kafka exporter from web
13 | get_url:
14 | url: "{{ item }}"
15 | dest: /opt/
16 | mode: '0600'
17 | with_items:
18 | - https://github.com/danielqsj/kafka_exporter/releases/download/v1.2.0/kafka_exporter-1.2.0.linux-386.tar.gz
19 | - https://github.com/prometheus/node_exporter/releases/download/v1.0.1/node_exporter-1.0.1.linux-386.tar.gz
20 |
21 | - name: unarchive exporter
22 | unarchive:
23 | src: "/opt/{{ item }}"
24 | dest: /usr/local/
25 | remote_src: yes
26 | with_items:
27 | - kafka_exporter-1.2.0.linux-386.tar.gz
28 | - node_exporter-1.0.1.linux-386.tar.gz
29 |
30 | - name: Move to kafka_exporter
31 | copy:
32 | remote_src: yes
33 | src: /usr/local/kafka_exporter-1.2.0.linux-386/kafka_exporter
34 | dest: /usr/sbin/
35 | mode: '0755'
36 |
37 | - name: setup link node_exporter
38 | file:
39 | src: /usr/local/node_exporter-1.0.1.linux-386
40 | dest: /usr/local/node_exporter
41 | state: link
42 | force: yes
43 | remote_src: yes
44 |
45 | - name: copy kafka exporter stop
46 | copy:
47 | src: kafka-exporter-stop.sh
48 | dest: /usr/sbin/kafka-exporter-stop.sh
49 | mode: '0755'
50 |
51 | - name: copy jmx exporter
52 | copy:
53 | src: jmx_prometheus_httpserver-0.13.1-SNAPSHOT-jar-with-dependencies.jar
54 | dest: /usr/local/jmx
55 | mode: '0644'
56 |
57 | - name: copy jmx exporter config
58 | copy:
59 | src: jmx_prometheus_httpserver.yml
60 | dest: /usr/local/jmx/jmx_prometheus_httpserver.yml
61 | mode: '0644'
62 |
63 | - name: copy kafka exporter in systemd
64 | template:
65 | src: kafka-exporter.service.j2
66 | dest: /etc/systemd/system/kafka-exporter.service
67 | owner: root
68 | group: root
69 | mode: '0644'
70 | backup: no
71 |
72 | - name: copy node, jmx exporter in systemd
73 | copy:
74 | src: "{{ item }}"
75 | dest: /etc/systemd/system/
76 | owner: root
77 | group: root
78 | mode: '0644'
79 | backup: no
80 | with_items:
81 | - node-exporter.service
82 | - jmx-exporter.service
83 |
84 | - name: just force systemd to reload configs
85 | systemd:
86 | daemon_reload: yes
87 |
88 | - name: make sure a service is running
89 | systemd:
90 | state: started
91 | name: "{{ item }}"
92 | enabled: yes
93 | with_items:
94 | - kafka-exporter
95 | - node-exporter
96 | - jmx-exporter
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/exporterall/templates/kafka-exporter.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=kafka-exporter
3 | After=network.target
4 |
5 | [Service]
6 | ExecStart=/usr/sbin/kafka_exporter --kafka.server={{ inventory_hostname }}:9092
7 | ExecStop=/usr/sbin/kafka-exporter-stop.sh
8 |
9 | [Install]
10 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/kafka/files/confluentinc-kafka-connect-avro-converter-5.5.3.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/onlybooks/kafka2/6aeabf38dc3bceb95786a23df67f026926fe555a/chapter12/ansible_playbook/roles/kafka/files/confluentinc-kafka-connect-avro-converter-5.5.3.zip
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/kafka/files/connect-distributed.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=peter-zk01.foo.bar:9092,peter-zk02.foo.bar:9092,peter-zk03.foo.bar:9092
2 | group.id=peter-connect-cluster
3 | key.converter=org.apache.kafka.connect.converters.ByteArrayConverter
4 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter
5 | #key.converter=io.confluent.connect.avro.AvroConverter
6 | #value.converter=io.confluent.connect.avro.AvroConverter
7 | #key.converter.schemas.enable=true
8 | #value.converter.schemas.enable=true
9 | #key.converter.schema.registry.url=http://peter-kafka03.foo.bar:8081
10 | #value.converter.schema.registry.url=http://peter-kafka03.foo.bar:8081
11 | offset.storage.topic=connect-offsets
12 | offset.storage.replication.factor=3
13 | offset.storage.partitions=25
14 | config.storage.topic=connect-configs
15 | config.storage.replication.factor=3
16 | config.storage.partitions=1
17 | status.storage.topic=connect-status
18 | status.storage.replication.factor=3
19 | status.storage.partitions=5
20 | offset.flush.interval.ms=10000
21 |
22 | plugin.path=/usr/local/kafka/confluentinc-kafka-connect-avro-converter-5.5.3
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/kafka/files/jmx:
--------------------------------------------------------------------------------
1 | JMX_PORT=9999
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/kafka/files/kafka-connect.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=kafka-connect
3 | After=network.target kafka-server.target
4 |
5 | [Service]
6 | Type=simple
7 | SyslogIdentifier=kafka-connect
8 | WorkingDirectory=/usr/local/kafka
9 | Restart=always
10 | ExecStart=/usr/local/kafka/bin/connect-distributed.sh /usr/local/kafka/config/connect-distributed.properties
11 | ExecStop=/usr/local/kafka/bin/connect-distributed.sh
12 |
13 | [Install]
14 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/kafka/files/kafka-server.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=kafka-server
3 | After=network.target
4 |
5 | [Service]
6 | Type=simple
7 | SyslogIdentifier=kafka-server
8 | WorkingDirectory=/usr/local/kafka
9 | EnvironmentFile=/usr/local/kafka/config/jmx
10 | Restart=always
11 | ExecStart=/usr/local/kafka/bin/kafka-server-start.sh /usr/local/kafka/config/server.properties
12 | ExecStop=/usr/local/kafka/bin/kafka-server-stop.sh
13 |
14 | [Install]
15 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/kafka/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart kafka-server
3 | systemd:
4 | name: kafka-server
5 | state: restarted
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/kafka/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: remove directory kafka data
3 | file:
4 | path: "{{ dir_path }}"
5 | state: absent
6 |
7 | - name: remove directory kafka
8 | file:
9 | path: "/usr/local/kafka_2.12-{{ kafkaversion }}"
10 | state: absent
11 |
12 | - name: make dir kafka
13 | file:
14 | path: "{{ item }}"
15 | state: directory
16 | mode: '0755'
17 | with_items:
18 | - "{{ dir_path }}"
19 | - /usr/local/jmx
20 |
21 | - name: download kafka from web
22 | get_url:
23 | url: https://archive.apache.org/dist/kafka/{{ kafkaversion }}/kafka_2.12-{{ kafkaversion }}.tgz
24 | dest: /opt/
25 | mode: '0600'
26 |
27 | - name: unarchive kafka
28 | unarchive:
29 | src: /opt/kafka_2.12-{{ kafkaversion }}.tgz
30 | dest: /usr/local
31 | creates: /usr/local/kafka_2.12-{{ kafkaversion }}
32 | remote_src: yes
33 |
34 | - name: setup link kafka
35 | file:
36 | path: /usr/local/kafka
37 | src: "/usr/local/kafka_2.12-{{ kafkaversion }}"
38 | state: link
39 | force: yes
40 |
41 | - name: copy kafka server conf files
42 | template:
43 | src: server.properties.j2
44 | dest: /usr/local/kafka/config/server.properties
45 | mode: '0644'
46 | backup: no
47 |
48 | - name: copy kafka conf file
49 | copy:
50 | src: "{{ item }}"
51 | dest: /usr/local/kafka/config/
52 | mode: '0644'
53 | backup: no
54 | with_items:
55 | - jmx
56 | - connect-distributed.properties
57 |
58 | - name: copy kafka server in systemd
59 | copy:
60 | src: "{{ item }}"
61 | dest: /etc/systemd/system/
62 | owner: root
63 | group: root
64 | mode: '0644'
65 | backup: no
66 | with_items:
67 | - kafka-server.service
68 | - kafka-connect.service
69 |
70 | - name: just force systemd to reload configs
71 | systemd:
72 | daemon_reload: yes
73 | notify:
74 | - restart kafka-server
75 |
76 | - name: make sure a service is running
77 | systemd:
78 | state: restarted
79 | name: kafka-server
80 | enabled: yes
81 |
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/kafka/templates/server.properties.j2:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # see kafka.server.KafkaConfig for additional details and defaults
17 |
18 | ############################# Server Basics #############################
19 |
20 | # The id of the broker. This must be set to a unique integer for each broker.
21 | broker.id={{ brokerid }}
22 |
23 | ############################# Socket Server Settings #############################
24 |
25 | # The address the socket server listens on. It will get the value returned from
26 | # java.net.InetAddress.getCanonicalHostName() if not configured.
27 | # FORMAT:
28 | # listeners = listener_name://host_name:port
29 | # EXAMPLE:
30 | # listeners = PLAINTEXT://your.host.name:9092
31 | #listeners=PLAINTEXT://:9092
32 | listeners=PLAINTEXT://0.0.0.0:9092
33 |
34 | # Hostname and port the broker will advertise to producers and consumers. If not set,
35 | # it uses the value for "listeners" if configured. Otherwise, it will use the value
36 | # returned from java.net.InetAddress.getCanonicalHostName().
37 | #advertised.listeners=PLAINTEXT://your.host.name:9092
38 | advertised.listeners=PLAINTEXT://{{ inventory_hostname }}:9092
39 |
40 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
41 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
42 |
43 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
44 | num.network.threads=4
45 |
46 | # The number of threads that the server uses for processing requests, which may include disk I/O
47 | num.io.threads=8
48 |
49 | # The send buffer (SO_SNDBUF) used by the socket server
50 | socket.send.buffer.bytes=102400
51 |
52 | # The receive buffer (SO_RCVBUF) used by the socket server
53 | socket.receive.buffer.bytes=102400
54 |
55 | # The maximum size of a request that the socket server will accept (protection against OOM)
56 | socket.request.max.bytes=104857600
57 |
58 |
59 | ############################# Log Basics #############################
60 |
61 | # A comma separated list of directories under which to store log files
62 | log.dirs={{ dir_path }}
63 |
64 | # The default number of log partitions per topic. More partitions allow greater
65 | # parallelism for consumption, but this will also result in more files across
66 | # the brokers.
67 | num.partitions=1
68 | default.replication.factor = 3
69 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
70 | # This value is recommended to be increased for installations with data dirs located in RAID array.
71 | num.recovery.threads.per.data.dir=1
72 |
73 | ############################# Internal Topic Settings #############################
74 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
75 | # For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
76 | offsets.topic.replication.factor=3
77 | transaction.state.log.replication.factor=3
78 | transaction.state.log.min.isr=2
79 |
80 | ############################# Log Flush Policy #############################
81 |
82 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
83 | # the OS cache lazily. The following configurations control the flush of data to disk.
84 | # There are a few important trade-offs here:
85 | # 1. Durability: Unflushed data may be lost if you are not using replication.
86 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
87 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
88 | # The settings below allow one to configure the flush policy to flush data after a period of time or
89 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
90 |
91 | # The number of messages to accept before forcing a flush of data to disk
92 | #log.flush.interval.messages=10000
93 |
94 | # The maximum amount of time a message can sit in a log before we force a flush
95 | #log.flush.interval.ms=1000
96 |
97 | ############################# Log Retention Policy #############################
98 |
99 | # The following configurations control the disposal of log segments. The policy can
100 | # be set to delete segments after a period of time, or after a given size has accumulated.
101 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
102 | # from the end of the log.
103 |
104 | # The minimum age of a log file to be eligible for deletion due to age
105 | log.retention.hours=72
106 |
107 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
108 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
109 | #log.retention.bytes=1073741824
110 |
111 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
112 | log.segment.bytes=1073741824
113 |
114 | # The interval at which log segments are checked to see if they can be deleted according
115 | # to the retention policies
116 | log.retention.check.interval.ms=300000
117 |
118 | ############################# Zookeeper #############################
119 |
120 | # Zookeeper connection string (see zookeeper docs for details).
121 | # This is a comma separated host:port pairs, each corresponding to a zk
122 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
123 | # You can also append an optional chroot string to the urls to specify the
124 | # root directory for all kafka znodes.
125 | zookeeper.connect={{ zookeeperinfo }}
126 |
127 | # Timeout in ms for connecting to zookeeper
128 | zookeeper.connection.timeout.ms=6000
129 |
130 |
131 | ############################# Group Coordinator Settings #############################
132 |
133 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
134 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
135 | # The default value for this is 3 seconds.
136 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
137 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
138 | group.initial.rebalance.delay.ms=3000
139 |
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/monitoring/files/prometheus.yml:
--------------------------------------------------------------------------------
1 | # prometheus config
2 | global:
3 | scrape_interval: 5s
4 | evaluation_interval: 5s
5 |
6 | scrape_configs:
7 | - job_name: 'peter-jmx-kafka1'
8 | static_configs:
9 | - targets:
10 | - peter-kafka01.foo.bar:7071
11 | - peter-kafka02.foo.bar:7071
12 | - peter-kafka03.foo.bar:7071
13 |
14 | - job_name: 'peter-jmx-kafka2'
15 | static_configs:
16 | - targets:
17 | - peter-zk01.foo.bar:7071
18 | - peter-zk02.foo.bar:7071
19 | - peter-zk03.foo.bar:7071
20 |
21 | - job_name: 'peter-kafka1-exporter'
22 | static_configs:
23 | - targets:
24 | - peter-kafka01.foo.bar:9308
25 | - peter-kafka02.foo.bar:9308
26 | - peter-kafka03.foo.bar:9308
27 |
28 | - job_name: 'peter-kafka2-exporter'
29 | static_configs:
30 | - targets:
31 | - peter-zk01.foo.bar:9308
32 | - peter-zk02.foo.bar:9308
33 | - peter-zk03.foo.bar:9308
34 |
35 | - job_name: 'node-exporter-kafka1'
36 | static_configs:
37 | - targets:
38 | - peter-kafka01.foo.bar:9100
39 | - peter-kafka02.foo.bar:9100
40 | - peter-kafka03.foo.bar:9100
41 |
42 | - job_name: 'node-exporter-kafka2'
43 | static_configs:
44 | - targets:
45 | - peter-zk01.foo.bar:9100
46 | - peter-zk02.foo.bar:9100
47 | - peter-zk03.foo.bar:9100
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/monitoring/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: add the group prometheus
3 | group:
4 | name: prometheus
5 | state: present
6 |
7 | - name: add the user prometheus
8 | user:
9 | name: prometheus
10 | group: prometheus
11 | state: present
12 |
13 | - name: make dir prometheus
14 | file:
15 | path: /etc/prometheus
16 | state: directory
17 | owner: prometheus
18 | group: prometheus
19 | mode: '0755'
20 |
21 | - name: copy prometheus.yml conf file
22 | copy:
23 | src: prometheus.yml
24 | dest: /etc/prometheus/prometheus.yml
25 | owner: prometheus
26 | group: prometheus
27 | mode: '0755'
28 |
29 | - name: start prometheus container
30 | docker_container:
31 | name: prometheus
32 | image: prom/prometheus
33 | network_mode: host
34 | volumes:
35 | - /etc/prometheus:/etc/prometheus
36 |
37 | - name: start grafana container
38 | docker_container:
39 | name: grafana
40 | image: grafana/grafana:7.3.7
41 | network_mode: host
42 |
43 | - name: start kafka-manager container
44 | docker_container:
45 | name: cmak
46 | image: hlebalbau/kafka-manager:stable
47 | network_mode: host
48 | env:
49 | ZK_HOSTS: "peter-zk01.foo.bar:2181,peter-zk02.foo.bar:2181,peter-zk03.foo.bar:2181"
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/schemaregistry/files/schema-registry.properties:
--------------------------------------------------------------------------------
1 | listeners=http://0.0.0.0:8081
2 | kafkastore.bootstrap.servers=PLAINTEXT://peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092
3 | kafkastore.topic=_schemas
4 | schema.compatibility.level=full
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/schemaregistry/files/schema-registry.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=schema registry
3 | After=network.target
4 |
5 | [Service]
6 | Type=simple
7 | ExecStart=/usr/local/confluent/bin/schema-registry-start /usr/local/confluent/etc/schema-registry/schema-registry.properties
8 | Restart=always
9 |
10 | [Install]
11 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/schemaregistry/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: download schema registry from web
3 | get_url:
4 | url: http://packages.confluent.io/archive/6.1/confluent-community-6.1.0.tar.gz
5 | dest: /opt/
6 | mode: '0600'
7 |
8 | - name: unarchive confluent-community
9 | unarchive:
10 | src: /opt/confluent-community-6.1.0.tar.gz
11 | dest: /usr/local
12 | creates: /usr/local/confluent-6.1.0
13 | remote_src: yes
14 |
15 | - name: setup link confluent
16 | file:
17 | path: /usr/local/confluent
18 | src: /usr/local/confluent-6.1.0
19 | state: link
20 | force: yes
21 |
22 | - name: copy schema registry conf file
23 | copy:
24 | src: schema-registry.properties
25 | dest: /usr/local/confluent/etc/schema-registry/
26 | mode: '0644'
27 | backup: no
28 |
29 | - name: copy schema registry in systemd
30 | copy:
31 | src: schema-registry.service
32 | dest: /etc/systemd/system/
33 | owner: root
34 | group: root
35 | mode: '0644'
36 | backup: no
37 |
38 | - name: just force systemd to reload configs
39 | systemd:
40 | daemon_reload: yes
41 |
42 | - name: make sure a service is running
43 | systemd:
44 | state: started
45 | name: schema-registry
46 | enabled: yes
47 |
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/zookeeper/files/zookeeper-server.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=zookeeper-server
3 | After=network.target
4 |
5 | [Service]
6 | Type=forking
7 | User=zookeeper
8 | Group=zookeeper
9 | SyslogIdentifier=zookeeper-server
10 | WorkingDirectory=/usr/local/zookeeper
11 | Restart=always
12 | RestartSec=0s
13 | ExecStart=/usr/local/zookeeper/bin/zkServer.sh start
14 | ExecStop=/usr/local/zookeeper/bin/zkServer.sh stop
15 | ExecReload=/usr/local/zookeeper/bin/zkServer.sh restart
16 |
17 | [Install]
18 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/zookeeper/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart zookeeper-server
3 | systemd:
4 | name: zookeeper-server
5 | state: restarted
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/zookeeper/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: add the group zookeeper
3 | group:
4 | name: zookeeper
5 | state: present
6 |
7 | - name: add the user zookeeper
8 | user:
9 | name: zookeeper
10 | group: zookeeper
11 | state: present
12 |
13 | - name: remove directory zk data
14 | file:
15 | path: "{{ dir_path }}"
16 | state: absent
17 |
18 | - name: make dir zookeeper
19 | file:
20 | path: "{{ dir_path }}"
21 | state: directory
22 | owner: zookeeper
23 | group: zookeeper
24 | mode: '0755'
25 |
26 | - name: download zookeeper from web
27 | get_url:
28 | url: https://archive.apache.org/dist/zookeeper/{{ zookeeperversion }}/apache-{{ zookeeperversion }}-bin.tar.gz
29 | dest: /opt/
30 | mode: '0600'
31 |
32 | - name: unarchive zookeeper
33 | unarchive:
34 | src: /opt/apache-{{ zookeeperversion }}-bin.tar.gz
35 | dest: /usr/local
36 | owner: zookeeper
37 | group: zookeeper
38 | creates: /usr/local/apache-{{ zookeeperversion }}-bin
39 | remote_src: yes
40 |
41 | - name: setup link zookeeper
42 | file:
43 | path: /usr/local/zookeeper
44 | src: /usr/local/apache-{{ zookeeperversion }}-bin
45 | owner: zookeeper
46 | group: zookeeper
47 | state: link
48 | force: yes
49 |
50 | - name: copy zookeeper server conf files
51 | template:
52 | src: zoo.cfg.j2
53 | dest: /usr/local/zookeeper/conf/zoo.cfg
54 | owner: zookeeper
55 | group: zookeeper
56 | mode: '0644'
57 | backup: no
58 |
59 | - name: create myid
60 | shell: echo {{ myid }} > /data/zk/myid
61 |
62 | - name: change file ownership, group and permissions
63 | file:
64 | path: /data/zk/myid
65 | owner: zookeeper
66 | group: zookeeper
67 | mode: '0644'
68 |
69 | - name: copy zookeeper server in systemd
70 | copy:
71 | src: zookeeper-server.service
72 | dest: /etc/systemd/system/zookeeper-server.service
73 | owner: root
74 | group: root
75 | mode: '0644'
76 | backup: no
77 |
78 | - name: just force systemd to reload configs
79 | systemd:
80 | daemon_reload: yes
81 |
82 | - name: make sure a service is running
83 | systemd:
84 | state: started
85 | name: zookeeper-server
86 | enabled: yes
87 |
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/roles/zookeeper/templates/zoo.cfg.j2:
--------------------------------------------------------------------------------
1 | tickTime=2000
2 | initLimit=10
3 | syncLimit=5
4 | dataDir=/data/zk
5 | clientPort=2181
6 | autopurge.snapRetainCount=3
7 | autopurge.purgeInterval=1
8 | {% for host in groups['zkhosts'] %}
9 | server.{{ host | regex_search('(peter-zk0[1-9]\\.foo\\.bar)') | regex_replace('\\.foo\\.bar', '') | regex_replace('^peter-zk0', '') }}={{ host }}:2888:3888;2181
10 | {% endfor %}
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/schema-registry.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: schemaregistry
3 | become: true
4 | connection: ssh
5 | roles:
6 | - schemaregistry
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/site.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - import_playbook: stop-etc.yml
3 | - import_playbook: local-kafka.yml
4 | - import_playbook: stop-kafka.yml
5 | - import_playbook: zookeeper.yml
6 | - import_playbook: kafka3.yml
7 | - import_playbook: kafka4.yml
8 | - import_playbook: schema-registry.yml
9 | - import_playbook: es.yml
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/stop-etc.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts:
3 | - zkhosts
4 | - kafkahosts
5 | become: true
6 | connection: ssh
7 | tasks:
8 | - name: get service facts
9 | service_facts:
10 |
11 | - name: stop kafka-connect
12 | systemd:
13 | state: stopped
14 | name: kafka-connect
15 | when: "'kafka-connect.service' in ansible_facts.services"
16 |
17 | - name: stop schema-registry
18 | systemd:
19 | state: stopped
20 | name: schema-registry
21 | when: "'schema-registry.service' in ansible_facts.services"
22 |
23 | - name: stop exporters
24 | systemd:
25 | state: stopped
26 | name: "{{ item }}"
27 | with_items:
28 | - kafka-exporter
29 | - node-exporter
30 | - jmx-exporter
31 | ignore_errors: yes
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/stop-kafka.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts:
3 | - zkhosts
4 | - kafkahosts
5 | become: true
6 | connection: ssh
7 | roles:
8 | - common
9 | tasks:
10 | - name: get service facts
11 | service_facts:
12 |
13 | - name: stop kafka-server
14 | systemd:
15 | state: stopped
16 | name: kafka-server
17 | when: "'kafka-server.service' in ansible_facts.services"
18 |
19 | - name: delete znode
20 | znode:
21 | hosts: 'peter-zk01.foo.bar:2181'
22 | name: "/{{ item }}"
23 | recursive: yes
24 | state: absent
25 | with_items:
26 | - kafka3
27 | - kafka4
28 |
29 | - name: stop zookeeper-server
30 | systemd:
31 | state: stopped
32 | name: zookeeper-server
33 | when: "'zookeeper-server.service' in ansible_facts.services"
--------------------------------------------------------------------------------
/chapter12/ansible_playbook/zookeeper.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: zkhosts
3 | become: true
4 | connection: ssh
5 | roles:
6 | - zookeeper
--------------------------------------------------------------------------------
/chapter12/python/consumer-1_kafka-1_v1.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 |
5 | value_schema_str = """
6 | {"namespace": "student.avro",
7 | "type": "record",
8 | "doc": "This is an example of Avro.",
9 | "name": "Student",
10 | "fields": [
11 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
12 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
13 | ]
14 | }
15 | """
16 |
17 | value_schema = avro.loads(value_schema_str)
18 |
19 | c = AvroConsumer({
20 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
21 | 'group.id': 'python-groupid01',
22 | 'auto.offset.reset': 'earliest',
23 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
24 |
25 | c.subscribe(['peter-avro01-kafka1'])
26 |
27 | while True:
28 | try:
29 | msg = c.poll(10)
30 |
31 | except SerializerError as e:
32 | print("Message deserialization failed for {}: {}".format(msg, e))
33 | break
34 |
35 | if msg is None:
36 | continue
37 |
38 | if msg.error():
39 | print("AvroConsumer error: {}".format(msg.error()))
40 | continue
41 |
42 | print(msg.value())
43 |
44 | c.close()
--------------------------------------------------------------------------------
/chapter12/python/consumer-1_kafka-1_v2.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 |
5 | value_schema_str = """
6 | {"namespace": "student.avro",
7 | "type": "record",
8 | "doc": "This is an example of Avro.",
9 | "name": "Student",
10 | "fields": [
11 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
12 | {"name": "phone", "type": "int", "default": 1, "doc": "Phone of the student"},
13 | {"name": "age", "type": "int", "default": 1, "doc": "Age of the student"},
14 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
15 | ]
16 | }
17 | """
18 |
19 | value_schema = avro.loads(value_schema_str)
20 |
21 | c = AvroConsumer({
22 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
23 | 'group.id': 'python-groupid01',
24 | 'auto.offset.reset': 'earliest',
25 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
26 |
27 | c.subscribe(['peter-avro01-kafka1'])
28 |
29 | while True:
30 | try:
31 | msg = c.poll(10)
32 |
33 | except SerializerError as e:
34 | print("Message deserialization failed for {}: {}".format(msg, e))
35 | break
36 |
37 | if msg is None:
38 | continue
39 |
40 | if msg.error():
41 | print("AvroConsumer error: {}".format(msg.error()))
42 | continue
43 |
44 | print(msg.value())
45 |
46 | c.close()
--------------------------------------------------------------------------------
/chapter12/python/consumer_kafka-2_producer_es_v1.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 | from elasticsearch import Elasticsearch
5 | from datetime import datetime
6 |
7 | value_schema_str = """
8 | {"namespace": "student.avro",
9 | "type": "record",
10 | "doc": "This is an example of Avro.",
11 | "name": "Student",
12 | "fields": [
13 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
14 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
15 | ]
16 | }
17 | """
18 |
19 | value_schema = avro.loads(value_schema_str)
20 |
21 | c = AvroConsumer({
22 | 'bootstrap.servers': 'peter-zk01.foo.bar,peter-zk02.foo.bar,peter-zk03.foo.bar',
23 | 'group.id': 'python-groupid01',
24 | 'auto.offset.reset': 'earliest',
25 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
26 |
27 | c.subscribe(['src.peter-avro01-kafka1'])
28 |
29 | es = Elasticsearch('peter-kafka02.foo.bar:9200')
30 | index = 'students'
31 |
32 | while True:
33 | try:
34 | msg = c.poll(10)
35 |
36 | except SerializerError as e:
37 | print("Message deserialization failed for {}: {}".format(msg, e))
38 | break
39 |
40 | if msg is None:
41 | continue
42 |
43 | if msg.error():
44 | print("AvroConsumer error: {}".format(msg.error()))
45 | continue
46 |
47 | print(msg.value())
48 | doc = msg.value()
49 | doc['timestamp'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
50 | # doc['timestamp'] = datetime.now()
51 |
52 | if not es.indices.exists(index=index):
53 | es.indices.create(index=index)
54 | es.index(index=index, doc_type='_doc', body=doc)
55 |
56 | c.close()
--------------------------------------------------------------------------------
/chapter12/python/consumer_kafka-2_producer_es_v2.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro, TopicPartition
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 | from elasticsearch import Elasticsearch
5 | from datetime import datetime
6 |
7 | value_schema_str = """
8 | {"namespace": "student.avro",
9 | "type": "record",
10 | "doc": "This is an example of Avro.",
11 | "name": "Student",
12 | "fields": [
13 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
14 | {"name": "phone", "type": "int", "default": 1, "doc": "Phone of the student"},
15 | {"name": "age", "type": "int", "default": 1, "doc": "Age of the student"},
16 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
17 | ]
18 | }
19 | """
20 |
21 | value_schema = avro.loads(value_schema_str)
22 |
23 | c = AvroConsumer({
24 | 'bootstrap.servers': 'peter-zk01.foo.bar,peter-zk02.foo.bar,peter-zk03.foo.bar',
25 | 'group.id': 'python-groupid01',
26 | 'auto.offset.reset': 'earliest',
27 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
28 |
29 | c.subscribe(['src.peter-avro01-kafka1'])
30 | es = Elasticsearch('peter-kafka02.foo.bar:9200')
31 | index = 'students'
32 |
33 | while True:
34 | try:
35 | msg = c.poll(10)
36 |
37 | except SerializerError as e:
38 | print("Message deserialization failed for {}: {}".format(msg, e))
39 | break
40 |
41 | if msg is None:
42 | continue
43 |
44 | if msg.error():
45 | print("AvroConsumer error: {}".format(msg.error()))
46 | continue
47 |
48 | print(msg.value())
49 | doc = msg.value()
50 | doc['timestamp'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
51 |
52 | if not es.indices.exists(index=index):
53 | es.indices.create(index=index)
54 | es.index(index=index, doc_type='_doc', body=doc)
55 |
56 | c.close()
--------------------------------------------------------------------------------
/chapter12/python/producer-1_kafka-1_v1.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroProducer
3 | import names
4 | import random
5 |
6 | value_schema_str = """
7 | {"namespace": "student.avro",
8 | "type": "record",
9 | "doc": "This is an example of Avro.",
10 | "name": "Student",
11 | "fields": [
12 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
13 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
14 | ]
15 | }
16 | """
17 |
18 | value_schema = avro.loads(value_schema_str)
19 |
20 | def delivery_report(err, msg):
21 | """ Called once for each message produced to indicate delivery result.
22 | Triggered by poll() or flush(). """
23 | if err is not None:
24 | print('Message delivery failed: {}'.format(err))
25 | else:
26 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.offset()))
27 |
28 | avroProducer = AvroProducer({
29 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
30 | 'on_delivery': delivery_report,
31 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'
32 | }, default_value_schema=value_schema)
33 |
34 | for x in range(100):
35 | value = {"name": names.get_first_name(), "class": random.randint(1,5)} # 전송할 메시지
36 | avroProducer.produce(topic='peter-avro01-kafka1', value=value)
37 |
38 | avroProducer.flush()
--------------------------------------------------------------------------------
/chapter12/python/producer-1_kafka-1_v2.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroProducer
3 | import names
4 | import random
5 |
6 | value_schema_str = """
7 | {"namespace": "student.avro",
8 | "type": "record",
9 | "doc": "This is an example of Avro.",
10 | "name": "Student",
11 | "fields": [
12 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
13 | {"name": "phone", "type": "int", "default": 1, "doc": "Phone of the student"},
14 | {"name": "age", "type": "int", "default": 1, "doc": "Age of the student"},
15 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
16 | ]
17 | }
18 | """
19 |
20 | value_schema = avro.loads(value_schema_str)
21 |
22 | def delivery_report(err, msg):
23 | """ Called once for each message produced to indicate delivery result.
24 | Triggered by poll() or flush(). """
25 | if err is not None:
26 | print('Message delivery failed: {}'.format(err))
27 | else:
28 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.offset()))
29 |
30 | avroProducer = AvroProducer({
31 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
32 | 'on_delivery': delivery_report,
33 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'
34 | }, default_value_schema=value_schema)
35 |
36 | for x in range(5):
37 | value = {"name": names.get_first_name(), "class": random.randint(1,5), "phone": random.randint(1000,9999), "age": random.randint(10,20)} # 전송할 메시지
38 | avroProducer.produce(topic='peter-avro01-kafka1', value=value)
39 |
40 | avroProducer.flush()
--------------------------------------------------------------------------------
/chapter12/python/producer-1_kafka-1_v3.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroProducer
3 | import names
4 | import random
5 |
6 | value_schema_str = """
7 | {"namespace": "student.avro",
8 | "type": "record",
9 | "doc": "This is an example of Avro.",
10 | "name": "Student",
11 | "fields": [
12 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
13 | {"name": "phone", "type": "int", "default": 1, "doc": "Phone of the student"},
14 | {"name": "age", "type": "int", "default": 1, "doc": "Age of the student"},
15 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
16 | ]
17 | }
18 | """
19 |
20 | value_schema = avro.loads(value_schema_str)
21 |
22 | def delivery_report(err, msg):
23 | """ Called once for each message produced to indicate delivery result.
24 | Triggered by poll() or flush(). """
25 | if err is not None:
26 | print('Message delivery failed: {}'.format(err))
27 | else:
28 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.offset()))
29 |
30 | avroProducer = AvroProducer({
31 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
32 | 'on_delivery': delivery_report,
33 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'
34 | }, default_value_schema=value_schema)
35 |
36 | for x in range(50000):
37 | value = {"name": names.get_first_name(), "class": random.randint(1,5), "phone": random.randint(1000,9999), "age": random.randint(10,20)} # 전송할 메시지
38 | avroProducer.produce(topic='peter-avro01-kafka1', value=value)
39 |
40 | avroProducer.flush()
--------------------------------------------------------------------------------
/chapter12/예제/예제 12-1:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroProducer
3 | import names
4 | import random
5 |
6 | value_schema_str = """
7 | {"namespace": "student.avro",
8 | "type": "record",
9 | "doc": "This is an example of Avro.",
10 | "name": "Student",
11 | "fields": [
12 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
13 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
14 | ]
15 | }
16 | """
17 |
18 | value_schema = avro.loads(value_schema_str)
19 |
20 | def delivery_report(err, msg):
21 | """ Called once for each message produced to indicate delivery result.
22 | Triggered by poll() or flush(). """
23 | if err is not None:
24 | print('Message delivery failed: {}'.format(err))
25 | else:
26 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.offset()))
27 |
28 | avroProducer = AvroProducer({
29 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
30 | 'on_delivery': delivery_report,
31 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'
32 | }, default_value_schema=value_schema)
33 |
34 | for x in range(100):
35 | value = {"name": names.get_first_name(), "class": random.randint(1,5)} # 전송할 메시지
36 | avroProducer.produce(topic='peter-avro01-kafka1', value=value)
37 |
38 | avroProducer.flush()
--------------------------------------------------------------------------------
/chapter12/예제/예제 12-2:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 |
5 | value_schema_str = """
6 | {"namespace": "student.avro",
7 | "type": "record",
8 | "doc": "This is an example of Avro.",
9 | "name": "Student",
10 | "fields": [
11 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
12 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
13 | ]
14 | }
15 | """
16 |
17 | value_schema = avro.loads(value_schema_str)
18 |
19 | c = AvroConsumer({
20 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
21 | 'group.id': 'python-groupid01',
22 | 'auto.offset.reset': 'earliest',
23 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
24 |
25 | c.subscribe(['peter-avro01-kafka1'])
26 |
27 | while True:
28 | try:
29 | msg = c.poll(10)
30 |
31 | except SerializerError as e:
32 | print("Message deserialization failed for {}: {}".format(msg, e))
33 | break
34 |
35 | if msg is None:
36 | continue
37 |
38 | if msg.error():
39 | print("AvroConsumer error: {}".format(msg.error()))
40 | continue
41 |
42 | print(msg.value())
43 |
44 | c.close()
--------------------------------------------------------------------------------
/chapter12/예제/예제 12-3:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 | from elasticsearch import Elasticsearch
5 | from datetime import datetime
6 |
7 | value_schema_str = """
8 | {"namespace": "student.avro",
9 | "type": "record",
10 | "doc": "This is an example of Avro.",
11 | "name": "Student",
12 | "fields": [
13 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
14 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
15 | ]
16 | }
17 | """
18 |
19 | value_schema = avro.loads(value_schema_str)
20 |
21 | c = AvroConsumer({
22 | 'bootstrap.servers': 'peter-zk01.foo.bar,peter-zk02.foo.bar,peter-zk03.foo.bar',
23 | 'group.id': 'python-groupid01',
24 | 'auto.offset.reset': 'earliest',
25 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
26 |
27 | c.subscribe(['src.peter-avro01-kafka1'])
28 |
29 | es = Elasticsearch('peter-kafka02.foo.bar:9200')
30 | index = 'students-' + str(datetime.now().date())
31 |
32 | while True:
33 | try:
34 | msg = c.poll(10)
35 |
36 | except SerializerError as e:
37 | print("Message deserialization failed for {}: {}".format(msg, e))
38 | break
39 |
40 | if msg is None:
41 | continue
42 |
43 | if msg.error():
44 | print("AvroConsumer error: {}".format(msg.error()))
45 | continue
46 |
47 | print(msg.value())
48 | doc = msg.value()
49 | doc['timestamp'] = datetime.now()
50 |
51 | if not es.indices.exists(index=index):
52 | es.indices.create(index=index)
53 | es.index(index=index, doc_type='_doc', body=doc)
54 |
55 | c.close()
--------------------------------------------------------------------------------
/chapter12/예제/예제 12-4:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 |
5 | value_schema_str = """
6 | {"namespace": "student.avro",
7 | "type": "record",
8 | "doc": "This is an example of Avro.",
9 | "name": "Student",
10 | "fields": [
11 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
12 | {"name": "phone", "type": "int", "default": 1, "doc": "Phone of the student"},
13 | {"name": "age", "type": "int", "default": 1, "doc": "Age of the student"},
14 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
15 | ]
16 | }
17 | """
18 |
19 | value_schema = avro.loads(value_schema_str)
20 |
21 | c = AvroConsumer({
22 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
23 | 'group.id': 'python-groupid01',
24 | 'auto.offset.reset': 'earliest',
25 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
26 |
27 | c.subscribe(['peter-avro01-kafka1'])
28 |
29 | while True:
30 | try:
31 | msg = c.poll(10)
32 |
33 | except SerializerError as e:
34 | print("Message deserialization failed for {}: {}".format(msg, e))
35 | break
36 |
37 | if msg is None:
38 | continue
39 |
40 | if msg.error():
41 | print("AvroConsumer error: {}".format(msg.error()))
42 | continue
43 |
44 | print(msg.value())
45 |
46 | c.close()
--------------------------------------------------------------------------------
/chapter12/예제/예제 12-5:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroProducer
3 | import names
4 | import random
5 |
6 | value_schema_str = """
7 | {"namespace": "student.avro",
8 | "type": "record",
9 | "doc": "This is an example of Avro.",
10 | "name": "Student",
11 | "fields": [
12 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
13 | {"name": "phone", "type": "int", "default": 1, "doc": "Phone of the student"},
14 | {"name": "age", "type": "int", "default": 1, "doc": "Age of the student"},
15 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
16 | ]
17 | }
18 | """
19 |
20 | value_schema = avro.loads(value_schema_str)
21 |
22 | def delivery_report(err, msg):
23 | """ Called once for each message produced to indicate delivery result.
24 | Triggered by poll() or flush(). """
25 | if err is not None:
26 | print('Message delivery failed: {}'.format(err))
27 | else:
28 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.offset()))
29 |
30 | avroProducer = AvroProducer({
31 | 'bootstrap.servers': 'peter-kafka01.foo.bar,peter-kafka02.foo.bar,peter-kafka03.foo.bar',
32 | 'on_delivery': delivery_report,
33 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'
34 | }, default_value_schema=value_schema)
35 |
36 | for x in range(5):
37 | value = {"name": names.get_first_name(), "class": random.randint(1,5), "phone": random.randint(1000,9999), "age": random.randint(10,20)} # 전송할 메시지
38 | avroProducer.produce(topic='peter-avro01-kafka1', value=value)
39 |
40 | avroProducer.flush()
--------------------------------------------------------------------------------
/chapter12/예제/예제 12-6:
--------------------------------------------------------------------------------
1 | from confluent_kafka import avro
2 | from confluent_kafka.avro import AvroConsumer
3 | from confluent_kafka.avro.serializer import SerializerError
4 | from elasticsearch import Elasticsearch
5 | from datetime import datetime
6 |
7 | value_schema_str = """
8 | {"namespace": "student.avro",
9 | "type": "record",
10 | "doc": "This is an example of Avro.",
11 | "name": "Student",
12 | "fields": [
13 | {"name": "name", "type": ["null", "string"], "default": null, "doc": "Name of the student"},
14 | {"name": "phone", "type": "int", "default": 1, "doc": "Phone of the student"},
15 | {"name": "age", "type": "int", "default": 1, "doc": "Age of the student"},
16 | {"name": "class", "type": "int", "default": 1, "doc": "Class of the student"}
17 | ]
18 | }
19 | """
20 |
21 | value_schema = avro.loads(value_schema_str)
22 |
23 | c = AvroConsumer({
24 | 'bootstrap.servers': 'peter-zk01.foo.bar,peter-zk02.foo.bar,peter-zk03.foo.bar',
25 | 'group.id': 'python-groupid01',
26 | 'auto.offset.reset': 'earliest',
27 | 'schema.registry.url': 'http://peter-kafka03.foo.bar:8081'},reader_value_schema=value_schema)
28 |
29 | c.subscribe(['src.peter-avro01-kafka1'])
30 |
31 | es = Elasticsearch('peter-kafka02.foo.bar:9200')
32 | index = 'students'
33 |
34 | while True:
35 | try:
36 | msg = c.poll(10)
37 |
38 | except SerializerError as e:
39 | print("Message deserialization failed for {}: {}".format(msg, e))
40 | break
41 |
42 | if msg is None:
43 | continue
44 |
45 | if msg.error():
46 | print("AvroConsumer error: {}".format(msg.error()))
47 | continue
48 |
49 | print(msg.value())
50 | doc = msg.value()
51 | doc['@timestamp'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
52 |
53 | if not es.indices.exists(index=index):
54 | es.indices.create(index=index)
55 | es.index(index=index, doc_type='_doc', body=doc)
56 |
57 | c.close()
--------------------------------------------------------------------------------
/chapter2/2_commands.txt:
--------------------------------------------------------------------------------
1 | P62
2 | chmod 600 keypair.pem
3 |
4 | P65
5 | ssh -i keypair.pem -l ec2-user 13.125.209.60
6 | ssh -i keypair.pem ec2-user@13.125.209.60
7 |
8 | P67
9 | ping -c 2 peter-zk01.foo.bar
10 |
11 | P70
12 | sudo amazon-linux-extras install -y ansible2
13 | sudo yum install -y git
14 | git clone https://github.com/onlybooks/kafka2.git
15 | scp -i keypair.pem keypair.pem ec2-user@13.125.20.117:~
16 | chmod 600 keypair.pem
17 | ssh-agent bash
18 | ssh-add keypair.pem
19 |
20 | P71
21 | ssh-keygen
22 | cat /home/ec2-user/.ssh/id_rsa.pub
23 |
24 | P72
25 | vi /home/ec2-user/.ssh/authorized_keys
26 | chmod 600 .ssh/authorized_keys
27 | cd chapter2/ansible_playbook
28 | ansible-playbook -i hosts zookeeper.yml
29 | sudo systemctl status zookeeper-server
30 |
31 | P74
32 | ansible-playbook -i hosts kafka.yml
33 | sudo systemctl status kafka-server
34 |
35 | P77
36 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --create --topic peter-overview01 --partitions 1 --replication-factor 3
37 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-overview01
38 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-overview01
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/group_vars/all.yml:
--------------------------------------------------------------------------------
1 | user_name: ec2-user
2 | krb_realm: FOO.BAR
3 | krb_dns: foo.bar
4 | kadmin_pass: peterpass
5 | kdc_server: peter-zk01.foo.bar
6 | kafkaversion: 2.6.0
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/group_vars/kafkahosts.yml:
--------------------------------------------------------------------------------
1 | brokerid: "{{ inventory_hostname | regex_search('(peter-kafka0[1-9]\\.foo\\.bar)') | regex_replace('\\.foo\\.bar', '') | regex_replace('^peter-kafka0', '') }}"
2 | zookeeperinfo: peter-zk01.foo.bar:2181,peter-zk02.foo.bar:2181,peter-zk03.foo.bar:2181
3 | dir_path: /data/kafka-logs
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/group_vars/zkhosts.yml:
--------------------------------------------------------------------------------
1 | zookeeperversion: zookeeper-3.5.9
2 | myid: "{{ inventory_hostname | regex_search('(peter-zk0[0-9]\\.foo\\.bar)') | regex_replace('\\.foo\\.bar', '') | regex_replace('^peter-zk0', '') }}"
3 | dir_path: /data/zk
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/hosts:
--------------------------------------------------------------------------------
1 | [zkhosts]
2 | peter-zk01.foo.bar
3 | peter-zk02.foo.bar
4 | peter-zk03.foo.bar
5 |
6 | [kafkahosts]
7 | peter-kafka01.foo.bar
8 | peter-kafka02.foo.bar
9 | peter-kafka03.foo.bar
10 |
11 | [kerberoshosts]
12 | peter-zk01.foo.bar
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/kafka-exporter.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: kafkahosts
3 | become: true
4 | connection: ssh
5 | roles:
6 | - common
7 | - kafkaexporter
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/kafka-scaleout.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: peter-zk03.foo.bar
3 | become: true
4 | connection: ssh
5 | vars:
6 | - kafkaversion: 2.6.0
7 | - brokerid: 4
8 | - zookeeperinfo: peter-zk01.foo.bar:2181,peter-zk02.foo.bar:2181,peter-zk03.foo.bar:2181
9 | - dir_path: /data/kafka-logs
10 | roles:
11 | - common
12 | - kafka
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/kafka.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: kafkahosts
3 | become: true
4 | connection: ssh
5 | roles:
6 | - common
7 | - kafka
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/kafka1.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: kafkahosts
3 | become: true
4 | connection: ssh
5 | vars:
6 | - zookeeperinfo: peter-zk01.foo.bar:2181,peter-zk02.foo.bar:2181,peter-zk03.foo.bar:2181/kafka1
7 | - dir_path: /data/kafka1-logs
8 | roles:
9 | - common
10 | - kafka
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/kafka2.1.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: kafkahosts
3 | become: true
4 | connection: ssh
5 | vars:
6 | - kafkaversion: 2.1.0
7 | roles:
8 | - common
9 | - kafka
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/kafka2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: zkhosts
3 | become: true
4 | connection: ssh
5 | vars:
6 | - brokerid: "{{ inventory_hostname | regex_search('(peter-zk0[1-9]\\.foo\\.bar)') | regex_replace('\\.foo\\.bar', '') | regex_replace('^peter-zk0', '') }}"
7 | - zookeeperinfo: peter-zk01.foo.bar:2181,peter-zk02.foo.bar:2181,peter-zk03.foo.bar:2181/kafka2
8 | - dir_path: /data/kafka2-logs
9 | roles:
10 | - common
11 | - kafka
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/kerberos.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: kerberoshosts
3 | become: true
4 | connection: ssh
5 | roles:
6 | - common
7 | - kerberos
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set timezone to Asia/Seoul
3 | timezone:
4 | name: Asia/Seoul
5 |
6 | - name: install Java and tools
7 | yum:
8 | name: ['dstat', 'java-1.8.0-openjdk', 'java-1.8.0-openjdk-devel', 'krb5-workstation', 'git']
9 | state: latest
10 |
11 | - name: copy krb5 conf
12 | template:
13 | src: krb5.conf.j2
14 | dest: /etc/krb5.conf
15 | owner: root
16 | group: root
17 | mode: '0644'
18 | backup: no
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/common/templates/krb5.conf.j2:
--------------------------------------------------------------------------------
1 | # Configuration snippets may be placed in this directory as well
2 | includedir /etc/krb5.conf.d/
3 |
4 | [logging]
5 | default = FILE:/var/log/krb5libs.log
6 | kdc = FILE:/var/log/krb5kdc.log
7 | admin_server = FILE:/var/log/kadmind.log
8 |
9 | [libdefaults]
10 | dns_lookup_realm = false
11 | ticket_lifetime = 24h
12 | rdns = false
13 | default_realm = {{ krb_realm }}
14 |
15 | [realms]
16 | {{ krb_realm }} = {
17 | kdc = {{ kdc_server }}
18 | admin_server = {{ kdc_server }}
19 | }
20 |
21 | [domain_realm]
22 | .{{ krb_dns }} = {{ krb_realm }}
23 | {{ krb_dns }} = {{ krb_realm }}
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/kafka/files/connect-distributed.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092
2 | group.id=peter-connect-cluster
3 | key.converter=org.apache.kafka.connect.converters.ByteArrayConverter
4 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter
5 | key.converter.schemas.enable=false
6 | value.converter.schemas.enable=false
7 | offset.storage.topic=connect-offsets
8 | offset.storage.replication.factor=3
9 | offset.storage.partitions=25
10 | config.storage.topic=connect-configs
11 | config.storage.replication.factor=3
12 | config.storage.partitions=1
13 | status.storage.topic=connect-status
14 | status.storage.replication.factor=3
15 | status.storage.partitions=5
16 | offset.flush.interval.ms=10000
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/kafka/files/jmx:
--------------------------------------------------------------------------------
1 | JMX_PORT=9999
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/kafka/files/kafka-connect.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=kafka-connect
3 | After=network.target kafka-server.target
4 |
5 | [Service]
6 | Type=simple
7 | SyslogIdentifier=kafka-connect
8 | WorkingDirectory=/usr/local/kafka
9 | Restart=always
10 | ExecStart=/usr/local/kafka/bin/connect-distributed.sh /usr/local/kafka/config/connect-distributed.properties
11 | ExecStop=/usr/local/kafka/bin/connect-distributed.sh
12 |
13 | [Install]
14 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/kafka/files/kafka-server.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=kafka-server
3 | After=network.target
4 |
5 | [Service]
6 | Type=simple
7 | SyslogIdentifier=kafka-server
8 | WorkingDirectory=/usr/local/kafka
9 | EnvironmentFile=/usr/local/kafka/config/jmx
10 | Restart=always
11 | ExecStart=/usr/local/kafka/bin/kafka-server-start.sh /usr/local/kafka/config/server.properties
12 | ExecStop=/usr/local/kafka/bin/kafka-server-stop.sh
13 |
14 | [Install]
15 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/kafka/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart kafka-server
3 | systemd:
4 | name: kafka-server
5 | state: restarted
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/kafka/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: stop kafka-server
3 | systemd:
4 | state: stopped
5 | name: kafka-server
6 | ignore_errors: yes
7 |
8 | - name: remove directory kafka
9 | file:
10 | path: "{{ dir_path }}"
11 | state: absent
12 |
13 | - name: make dir kafka
14 | file:
15 | path: "{{ dir_path }}"
16 | state: directory
17 | mode: '0755'
18 |
19 | - name: download kafka from web
20 | get_url:
21 | url: https://archive.apache.org/dist/kafka/{{ kafkaversion }}/kafka_2.12-{{ kafkaversion }}.tgz
22 | dest: /opt/
23 | mode: '0600'
24 |
25 | - name: unarchive kafka
26 | unarchive:
27 | src: /opt/kafka_2.12-{{ kafkaversion }}.tgz
28 | dest: /usr/local
29 | remote_src: yes
30 |
31 | - name: setup link kafka
32 | file:
33 | path: /usr/local/kafka
34 | src: /usr/local/kafka_2.12-{{ kafkaversion }}
35 | state: link
36 | force: yes
37 |
38 | - name: copy kafka server conf files
39 | template:
40 | src: server.properties.j2
41 | dest: /usr/local/kafka/config/server.properties
42 | mode: '0644'
43 | backup: no
44 |
45 | - name: copy kafka conf file
46 | copy:
47 | src: "{{ item }}"
48 | dest: /usr/local/kafka/config/
49 | mode: '0644'
50 | backup: no
51 | with_items:
52 | - jmx
53 | - connect-distributed.properties
54 |
55 | - name: copy kafka server in systemd
56 | copy:
57 | src: "{{ item }}"
58 | dest: /etc/systemd/system/
59 | owner: root
60 | group: root
61 | mode: '0644'
62 | backup: no
63 | with_items:
64 | - kafka-server.service
65 | - kafka-connect.service
66 |
67 | - name: just force systemd to reload configs
68 | systemd:
69 | daemon_reload: yes
70 |
71 | - name: make sure a service is running
72 | systemd:
73 | state: started
74 | name: kafka-server
75 |
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/kafka/templates/server.properties.j2:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # see kafka.server.KafkaConfig for additional details and defaults
17 |
18 | ############################# Server Basics #############################
19 |
20 | # The id of the broker. This must be set to a unique integer for each broker.
21 | broker.id={{ brokerid }}
22 |
23 | ############################# Socket Server Settings #############################
24 |
25 | # The address the socket server listens on. It will get the value returned from
26 | # java.net.InetAddress.getCanonicalHostName() if not configured.
27 | # FORMAT:
28 | # listeners = listener_name://host_name:port
29 | # EXAMPLE:
30 | # listeners = PLAINTEXT://your.host.name:9092
31 | #listeners=PLAINTEXT://:9092
32 | listeners=PLAINTEXT://0.0.0.0:9092
33 |
34 | # Hostname and port the broker will advertise to producers and consumers. If not set,
35 | # it uses the value for "listeners" if configured. Otherwise, it will use the value
36 | # returned from java.net.InetAddress.getCanonicalHostName().
37 | #advertised.listeners=PLAINTEXT://your.host.name:9092
38 | advertised.listeners=PLAINTEXT://{{ inventory_hostname }}:9092
39 |
40 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
41 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
42 |
43 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
44 | num.network.threads=4
45 |
46 | # The number of threads that the server uses for processing requests, which may include disk I/O
47 | num.io.threads=8
48 |
49 | # The send buffer (SO_SNDBUF) used by the socket server
50 | socket.send.buffer.bytes=102400
51 |
52 | # The receive buffer (SO_RCVBUF) used by the socket server
53 | socket.receive.buffer.bytes=102400
54 |
55 | # The maximum size of a request that the socket server will accept (protection against OOM)
56 | socket.request.max.bytes=104857600
57 |
58 |
59 | ############################# Log Basics #############################
60 |
61 | # A comma separated list of directories under which to store log files
62 | log.dirs={{ dir_path }}
63 |
64 | # The default number of log partitions per topic. More partitions allow greater
65 | # parallelism for consumption, but this will also result in more files across
66 | # the brokers.
67 | num.partitions=1
68 |
69 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
70 | # This value is recommended to be increased for installations with data dirs located in RAID array.
71 | num.recovery.threads.per.data.dir=1
72 |
73 | ############################# Internal Topic Settings #############################
74 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
75 | # For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
76 | offsets.topic.replication.factor=3
77 | transaction.state.log.replication.factor=3
78 | transaction.state.log.min.isr=2
79 |
80 | ############################# Log Flush Policy #############################
81 |
82 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
83 | # the OS cache lazily. The following configurations control the flush of data to disk.
84 | # There are a few important trade-offs here:
85 | # 1. Durability: Unflushed data may be lost if you are not using replication.
86 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
87 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
88 | # The settings below allow one to configure the flush policy to flush data after a period of time or
89 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
90 |
91 | # The number of messages to accept before forcing a flush of data to disk
92 | #log.flush.interval.messages=10000
93 |
94 | # The maximum amount of time a message can sit in a log before we force a flush
95 | #log.flush.interval.ms=1000
96 |
97 | ############################# Log Retention Policy #############################
98 |
99 | # The following configurations control the disposal of log segments. The policy can
100 | # be set to delete segments after a period of time, or after a given size has accumulated.
101 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
102 | # from the end of the log.
103 |
104 | # The minimum age of a log file to be eligible for deletion due to age
105 | log.retention.hours=72
106 |
107 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
108 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
109 | #log.retention.bytes=1073741824
110 |
111 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
112 | log.segment.bytes=1073741824
113 |
114 | # The interval at which log segments are checked to see if they can be deleted according
115 | # to the retention policies
116 | log.retention.check.interval.ms=300000
117 |
118 | ############################# Zookeeper #############################
119 |
120 | # Zookeeper connection string (see zookeeper docs for details).
121 | # This is a comma separated host:port pairs, each corresponding to a zk
122 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
123 | # You can also append an optional chroot string to the urls to specify the
124 | # root directory for all kafka znodes.
125 | zookeeper.connect={{ zookeeperinfo }}
126 |
127 | # Timeout in ms for connecting to zookeeper
128 | zookeeper.connection.timeout.ms=6000
129 |
130 |
131 | ############################# Group Coordinator Settings #############################
132 |
133 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
134 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
135 | # The default value for this is 3 seconds.
136 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
137 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
138 | group.initial.rebalance.delay.ms=3000
139 |
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/kafkaexporter/files/kafka-exporter-stop.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | PIDS=$(ps ax | grep -i 'kafka\_exporter' | awk '{print $1}')
3 |
4 | if [ -z "$PIDS" ]; then
5 | echo "No kafka_exporter stop"
6 | exit 1
7 | else
8 | kill -s TERM $PIDS
9 | fi
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/kafkaexporter/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: download kafka exporter from web
3 | get_url:
4 | url: https://github.com/danielqsj/kafka_exporter/releases/download/v1.2.0/kafka_exporter-1.2.0.linux-386.tar.gz
5 | dest: /opt/
6 | mode: '0600'
7 |
8 | - name: unarchive kafka exporter
9 | unarchive:
10 | src: /opt/kafka_exporter-1.2.0.linux-386.tar.gz
11 | dest: /usr/local/
12 | remote_src: yes
13 |
14 | - name: move to kafka_exporter
15 | copy:
16 | remote_src: yes
17 | src: /usr/local/kafka_exporter-1.2.0.linux-386/kafka_exporter
18 | dest: /usr/sbin/
19 | mode: '0755'
20 |
21 | - name: copy kafka exporter stop
22 | copy:
23 | src: kafka-exporter-stop.sh
24 | dest: /usr/sbin/kafka-exporter-stop.sh
25 | mode: '0755'
26 |
27 | - name: copy kafka exporter in systemd
28 | template:
29 | src: kafka-exporter.service.j2
30 | dest: /etc/systemd/system/kafka-exporter.service
31 | owner: root
32 | group: root
33 | mode: '0644'
34 | backup: no
35 |
36 | - name: just force systemd to reload configs
37 | systemd:
38 | daemon_reload: yes
39 |
40 | - name: make sure a service is running
41 | systemd:
42 | state: started
43 | name: kafka-exporter
44 | enabled: yes
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/kafkaexporter/templates/kafka-exporter.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=kafka-exporter
3 | After=network.target
4 |
5 | [Service]
6 | ExecStart=/usr/sbin/kafka_exporter --kafka.server={{ inventory_hostname }}:9092
7 | ExecStop=/usr/sbin/kafka-exporter-stop.sh
8 |
9 | [Install]
10 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/kerberos/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install krb5
3 | yum:
4 | name: ['krb5-server', 'krb5-libs', 'krb5-workstation']
5 | state: latest
6 |
7 | - name: copy kdc conf
8 | template:
9 | src: kdc.conf.j2
10 | dest: /var/kerberos/krb5kdc/kdc.conf
11 | owner: root
12 | group: root
13 | mode: '0644'
14 | backup: no
15 |
16 | - name: copy krb5 conf
17 | template:
18 | src: krb5.conf.j2
19 | dest: /etc/krb5.conf
20 | owner: root
21 | group: root
22 | mode: '0644'
23 | backup: no
24 |
25 | - name: replace string
26 | replace:
27 | path: /var/kerberos/krb5kdc/kadm5.acl
28 | regexp: 'EXAMPLE.COM'
29 | replace: "{{ krb_realm }}"
30 |
31 | - name: check principal exist
32 | stat:
33 | path: /var/kerberos/krb5kdc/principal
34 | register: stat_result
35 |
36 | - name: create KDC database
37 | shell: kdb5_util create -r {{ krb_realm }} -P {{ kadmin_pass }} -s
38 | when: not stat_result.stat.exists
39 |
40 | - name: create root
41 | shell: |
42 | kadmin.local -q "addprinc -pw {{ kadmin_pass }} root/admin"
43 | kadmin.local -q "add_principal -randkey peter-kafka01.foo.bar@FOO.BAR"
44 | kadmin.local -q "add_principal -randkey peter-kafka02.foo.bar@FOO.BAR"
45 | kadmin.local -q "add_principal -randkey peter-kafka03.foo.bar@FOO.BAR"
46 | kadmin.local -q "add_principal -randkey peter-zk01.foo.bar@FOO.BAR"
47 | kadmin.local -q "add_principal -randkey peter-zk02.foo.bar@FOO.BAR"
48 | kadmin.local -q "add_principal -randkey peter-zk03.foo.bar@FOO.BAR"
49 |
50 | - name: make sure a service is running
51 | systemd:
52 | state: started
53 | name: "{{ item }}"
54 | enabled: yes
55 | with_items:
56 | - krb5kdc
57 | - kadmin
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/kerberos/templates/kdc.conf.j2:
--------------------------------------------------------------------------------
1 | [kdcdefaults]
2 | kdc_ports = 88
3 | kdc_tcp_ports = 88
4 |
5 | [realms]
6 | {{ krb_realm }} = {
7 | #master_key_type = aes256-cts
8 | acl_file = /var/kerberos/krb5kdc/kadm5.acl
9 | dict_file = /usr/share/dict/words
10 | admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
11 | supported_enctypes = aes256-cts:normal aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal camellia256-cts:normal camellia128-cts:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
12 | }
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/kerberos/templates/krb5.conf.j2:
--------------------------------------------------------------------------------
1 | # Configuration snippets may be placed in this directory as well
2 | includedir /etc/krb5.conf.d/
3 |
4 | [logging]
5 | default = FILE:/var/log/krb5libs.log
6 | kdc = FILE:/var/log/krb5kdc.log
7 | admin_server = FILE:/var/log/kadmind.log
8 |
9 | [libdefaults]
10 | dns_lookup_realm = false
11 | ticket_lifetime = 24h
12 | rdns = false
13 | default_realm = {{ krb_realm }}
14 |
15 | [realms]
16 | {{ krb_realm }} = {
17 | kdc = {{ kdc_server }}
18 | admin_server = {{ kdc_server }}
19 | }
20 |
21 | [domain_realm]
22 | .{{ krb_dns }} = {{ krb_realm }}
23 | {{ krb_dns }} = {{ krb_realm }}
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/zookeeper/files/zookeeper-server.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=zookeeper-server
3 | After=network.target
4 |
5 | [Service]
6 | Type=forking
7 | User=zookeeper
8 | Group=zookeeper
9 | SyslogIdentifier=zookeeper-server
10 | WorkingDirectory=/usr/local/zookeeper
11 | Restart=always
12 | RestartSec=0s
13 | ExecStart=/usr/local/zookeeper/bin/zkServer.sh start
14 | ExecStop=/usr/local/zookeeper/bin/zkServer.sh stop
15 | ExecReload=/usr/local/zookeeper/bin/zkServer.sh restart
16 |
17 | [Install]
18 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/zookeeper/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart zookeeper-server
3 | systemd:
4 | name: zookeeper-server
5 | state: restarted
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/zookeeper/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: add the group zookeeper
3 | group:
4 | name: zookeeper
5 | state: present
6 |
7 | - name: add the user zookeeper
8 | user:
9 | name: zookeeper
10 | group: zookeeper
11 | state: present
12 |
13 | - name: stop zookeeper-server
14 | systemd:
15 | state: stopped
16 | name: zookeeper-server
17 | ignore_errors: yes
18 |
19 | - name: remove directory zk
20 | file:
21 | path: "{{ dir_path }}"
22 | state: absent
23 |
24 | - name: make dir zookeeper
25 | file:
26 | path: "{{ dir_path }}"
27 | state: directory
28 | owner: zookeeper
29 | group: zookeeper
30 | mode: '0755'
31 |
32 | - name: download zookeeper from web
33 | get_url:
34 | url: https://archive.apache.org/dist/zookeeper/{{ zookeeperversion }}/apache-{{ zookeeperversion }}-bin.tar.gz
35 | dest: /opt/
36 | mode: '0600'
37 |
38 | - name: unarchive zookeeper
39 | unarchive:
40 | src: /opt/apache-{{ zookeeperversion }}-bin.tar.gz
41 | dest: /usr/local
42 | owner: zookeeper
43 | group: zookeeper
44 | remote_src: yes
45 |
46 | - name: setup link zookeeper
47 | file:
48 | path: /usr/local/zookeeper
49 | src: /usr/local/apache-{{ zookeeperversion }}-bin
50 | owner: zookeeper
51 | group: zookeeper
52 | state: link
53 | force: yes
54 |
55 | - name: copy zookeeper server conf files
56 | template:
57 | src: zoo.cfg.j2
58 | dest: /usr/local/zookeeper/conf/zoo.cfg
59 | owner: zookeeper
60 | group: zookeeper
61 | mode: '0644'
62 | backup: no
63 |
64 | - name: create myid
65 | shell: echo {{ myid }} > /data/zk/myid
66 |
67 | - name: change file ownership, group and permissions
68 | file:
69 | path: /data/zk/myid
70 | owner: zookeeper
71 | group: zookeeper
72 | mode: '0644'
73 |
74 | - name: copy zookeeper server in systemd
75 | copy:
76 | src: zookeeper-server.service
77 | dest: /etc/systemd/system/zookeeper-server.service
78 | owner: root
79 | group: root
80 | mode: '0644'
81 | backup: no
82 |
83 | - name: just force systemd to reload configs
84 | systemd:
85 | daemon_reload: yes
86 |
87 | - name: make sure a service is running
88 | systemd:
89 | state: started
90 | name: zookeeper-server
91 | enabled: yes
92 |
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/roles/zookeeper/templates/zoo.cfg.j2:
--------------------------------------------------------------------------------
1 | tickTime=2000
2 | initLimit=10
3 | syncLimit=5
4 | dataDir=/data/zk
5 | clientPort=2181
6 | autopurge.snapRetainCount=3
7 | autopurge.purgeInterval=1
8 | {% for host in groups['zkhosts'] %}
9 | server.{{ host | regex_search('(peter-zk0[1-9]\\.foo\\.bar)') | regex_replace('\\.foo\\.bar', '') | regex_replace('^peter-zk0', '') }}={{ host }}:2888:3888;2181
10 | {% endfor %}
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/site.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - import_playbook: zookeeper.yml
3 | - import_playbook: kafka.yml
--------------------------------------------------------------------------------
/chapter2/ansible_playbook/zookeeper.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: zkhosts
3 | become: true
4 | connection: ssh
5 | roles:
6 | - common
7 | - zookeeper
--------------------------------------------------------------------------------
/chapter2/예제/예제 2-1.txt:
--------------------------------------------------------------------------------
1 | 172.31.3.209 peter-ansible01.foo.bar peter-ansible01
2 | 172.31.0.186 peter-zk01.foo.bar peter-zk01
3 | 172.31.12.195 peter-zk02.foo.bar peter-zk02
4 | 172.31.3.173 peter-zk03.foo.bar peter-zk03
5 | 172.31.5.59 peter-kafka01.foo.bar peter-kafka01
6 | 172.31.11.46 peter-kafka02.foo.bar peter-kafka02
7 | 172.31.8.78 peter-kafka03.foo.bar peter-kafka03
--------------------------------------------------------------------------------
/chapter3/3_commands.txt:
--------------------------------------------------------------------------------
1 | P86
2 | cd /data/kafka-logs/
3 | ls
4 |
5 | P87
6 | cd peter-overview01-0
7 | ls
8 |
9 | P88
10 | xxd 00000000000000000000.log
11 |
12 | P97
13 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --create --topic peter-basic01 --partitions 1 --replication-factor 3
--------------------------------------------------------------------------------
/chapter3/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | org.example
8 | KafkaClientExample
9 | 1.0
10 |
11 |
12 |
13 |
14 | org.apache.kafka
15 | kafka-clients
16 | 2.7.0
17 |
18 |
19 |
20 |
21 | 8
22 | 8
23 |
24 |
25 |
--------------------------------------------------------------------------------
/chapter3/src/main/java/ConsumerAsync.java:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.consumer.ConsumerRecord;
2 | import org.apache.kafka.clients.consumer.ConsumerRecords;
3 | import org.apache.kafka.clients.consumer.KafkaConsumer;
4 |
5 | import java.util.Arrays;
6 | import java.util.Properties;
7 |
8 | public class ConsumerAsync {
9 | public static void main(String[] args) {
10 | Properties props = new Properties(); //Properties 오브젝트를 시작합니다.
11 | props.put("bootstrap.servers", "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092"); //브로커 리스트를 정의합니다.
12 | props.put("group.id", "peter-consumer01"); //컨슈머 그룹 아이디 정의합니다.
13 | props.put("enable.auto.commit", "false"); //자동 커밋을 사용하지 않습니다.
14 | props.put("auto.offset.reset", "latest"); //컨슈머 오프셋을 찾지 못하는 경우 latest로 초기화 합니다. 가장 최근부터 메시지를 가져오게 됩니다.
15 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); //문자열을 사용했으므로 StringDeserializer 지정합니다.
16 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
17 | KafkaConsumer consumer = new KafkaConsumer<>(props); //Properties 오브젝트를 전달하여 새 컨슈머를 생성합니다.
18 | consumer.subscribe(Arrays.asList("peter-basic01")); //구독할 토픽을 지정합니다.
19 |
20 | try {
21 | while (true) { //무한 루프 시작입니다. 메시지를 가져오기 위해 카프카에 지속적으로 poll()을 하게 됩니다.
22 | ConsumerRecords records = consumer.poll(1000); //컨슈머는 폴링하는 것을 계속 유지하며, 타임 아웃 주기를 설정합니다.해당 시간만큼 블럭합니다.
23 | for (ConsumerRecord record : records) { //poll()은 레코드 전체를 리턴하고, 하나의 메시지만 가져오는 것이 아니므로, 반복문 처리합니다.
24 | System.out.printf("Topic: %s, Partition: %s, Offset: %d, Key: %s, Value: %s\n",
25 | record.topic(), record.partition(), record.offset(), record.key(), record.value());
26 | }
27 | consumer.commitAsync(); //현재 배치를 통해 읽은 모든 메시지들을 처리한 후, 추가 메시지를 폴링하기 전 현재의 오프셋을 비동기 커밋합니다.
28 | }
29 | } catch (Exception e){
30 | e.printStackTrace();
31 | } finally {
32 | consumer.close(); //컨슈머를 종료합니다.
33 | }
34 | }
35 | }
--------------------------------------------------------------------------------
/chapter3/src/main/java/ConsumerAuto.java:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.consumer.ConsumerRecord;
2 | import org.apache.kafka.clients.consumer.ConsumerRecords;
3 | import org.apache.kafka.clients.consumer.KafkaConsumer;
4 |
5 | import java.util.Arrays;
6 | import java.util.Properties;
7 |
8 | public class ConsumerAuto {
9 | public static void main(String[] args) {
10 | Properties props = new Properties(); //Properties 오브젝트를 시작합니다.
11 | props.put("bootstrap.servers", "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092"); //브로커 리스트를 정의합니다.
12 | props.put("group.id", "peter-consumer01"); //컨슈머 그룹 아이디 정의합니다.
13 | props.put("enable.auto.commit", "true"); //자동 커밋을 사용합니다.
14 | props.put("auto.offset.reset", "latest"); //컨슈머 오프셋을 찾지 못하는 경우 latest로 초기화 합니다. 가장 최근부터 메시지를 가져오게 됩니다.
15 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); //문자열을 사용했으므로 StringDeserializer 지정합니다.
16 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
17 | KafkaConsumer consumer = new KafkaConsumer<>(props); //Properties 오브젝트를 전달하여 새 컨슈머를 생성합니다.
18 | consumer.subscribe(Arrays.asList("peter-basic01")); //구독할 토픽을 지정합니다.
19 |
20 | try {
21 | while (true) { //무한 루프 시작입니다. 메시지를 가져오기 위해 카프카에 지속적으로 poll()을 하게 됩니다.
22 | ConsumerRecords records = consumer.poll(1000); //컨슈머는 폴링하는 것을 계속 유지하며, 타임 아웃 주기를 설정합니다.해당 시간만큼 블럭합니다.
23 | for (ConsumerRecord record : records) { //poll()은 레코드 전체를 리턴하고, 하나의 메시지만 가져오는 것이 아니므로, 반복문 처리합니다.
24 | System.out.printf("Topic: %s, Partition: %s, Offset: %d, Key: %s, Value: %s\n",
25 | record.topic(), record.partition(), record.offset(), record.key(), record.value());
26 | }
27 | }
28 | } catch (Exception e){
29 | e.printStackTrace();
30 | } finally {
31 | consumer.close(); //컨슈머를 종료합니다.
32 | }
33 | }
34 | }
--------------------------------------------------------------------------------
/chapter3/src/main/java/ConsumerSync.java:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.consumer.ConsumerRecord;
2 | import org.apache.kafka.clients.consumer.ConsumerRecords;
3 | import org.apache.kafka.clients.consumer.KafkaConsumer;
4 |
5 | import java.util.Arrays;
6 | import java.util.Properties;
7 |
8 | public class ConsumerSync {
9 | public static void main(String[] args) {
10 | Properties props = new Properties(); //Properties 오브젝트를 시작합니다.
11 | props.put("bootstrap.servers", "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092"); //브로커 리스트를 정의합니다.
12 | props.put("group.id", "peter-consumer01"); //컨슈머 그룹 아이디 정의합니다.
13 | props.put("enable.auto.commit", "false"); //자동 커밋을 사용하지 않습니다.
14 | props.put("auto.offset.reset", "latest"); //컨슈머 오프셋을 찾지 못하는 경우 latest로 초기화 합니다. 가장 최근부터 메시지를 가져오게 됩니다.
15 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); //문자열을 사용했으므로 StringDeserializer 지정합니다.
16 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
17 | KafkaConsumer consumer = new KafkaConsumer<>(props); //Properties 오브젝트를 전달하여 새 컨슈머를 생성합니다.
18 | consumer.subscribe(Arrays.asList("peter-basic01")); //구독할 토픽을 지정합니다.
19 |
20 | try {
21 | while (true) { //무한 루프 시작입니다. 메시지를 가져오기 위해 카프카에 지속적으로 poll()을 하게 됩니다.
22 | ConsumerRecords records = consumer.poll(1000); //컨슈머는 폴링하는 것을 계속 유지하며, 타임 아웃 주기를 설정합니다.해당 시간만큼 블럭합니다.
23 | for (ConsumerRecord record : records) { //poll()은 레코드 전체를 리턴하고, 하나의 메시지만 가져오는 것이 아니므로, 반복문 처리합니다.
24 | System.out.printf("Topic: %s, Partition: %s, Offset: %d, Key: %s, Value: %s\n",
25 | record.topic(), record.partition(), record.offset(), record.key(), record.value());
26 | }
27 | consumer.commitSync(); //현재 배치를 통해 읽은 모든 메시지들을 처리한 후, 추가 메시지를 폴링하기 전 현재의 오프셋을 커밋합니다.
28 | }
29 | } catch (Exception e){
30 | e.printStackTrace();
31 | } finally {
32 | consumer.close(); //컨슈머를 종료합니다.
33 | }
34 | }
35 | }
--------------------------------------------------------------------------------
/chapter3/src/main/java/PeterProducerCallback.java:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.producer.Callback;
2 | import org.apache.kafka.clients.producer.ProducerRecord;
3 | import org.apache.kafka.clients.producer.RecordMetadata;
4 |
5 | public class PeterProducerCallback implements Callback { //콜백을 사용하기 위해 org.apache.kafka.clients.producer.Callback를 구현하는 클래스가 필요합니다.
6 | private ProducerRecord record;
7 |
8 | public PeterProducerCallback(ProducerRecord record) {
9 | this.record = record;
10 | }
11 |
12 | @Override
13 | public void onCompletion(RecordMetadata metadata, Exception e) {
14 | if (e != null) {
15 | e.printStackTrace(); //카프카가 오류를 리턴하면 onCompletion()은 예외를 갖게 되며, 실제 운영환경에서는 추가적인 예외처리가 필요합니다.
16 | } else {
17 | System.out.printf("Topic: %s, Partition: %d, Offset: %d, Key: %s, Received Message: %s\n", metadata.topic(), metadata.partition()
18 | , metadata.offset(), record.key(), record.value());
19 | }
20 | }
21 | }
--------------------------------------------------------------------------------
/chapter3/src/main/java/ProducerAsync.java:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.producer.KafkaProducer;
2 | import org.apache.kafka.clients.producer.Producer;
3 | import org.apache.kafka.clients.producer.ProducerRecord;
4 |
5 | import java.util.Properties;
6 |
7 | public class ProducerAsync {
8 | public static void main(String[] args) {
9 | Properties props = new Properties(); //Properties 오브젝트를 시작합니다.
10 | props.put("bootstrap.servers", "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092"); //브로커 리스트를 정의합니다.
11 | props.put("key.serializer",
12 | "org.apache.kafka.common.serialization.StringSerializer"); //메시지 키와 벨류에 문자열을 지정하므로 내장된 StringSerializer를 지정합니다.
13 | props.put("value.serializer",
14 | "org.apache.kafka.common.serialization.StringSerializer");
15 |
16 | Producer producer = new KafkaProducer<>(props); //Properties 오브젝트를 전달해 새 프로듀서를 생성합니다.
17 |
18 | try {
19 | for (int i = 0; i < 3; i++) {
20 | ProducerRecord record = new ProducerRecord<>("peter-basic01", "Apache Kafka is a distributed streaming platform - " + i); //ProducerRecord 오브젝트를 생성합니다.
21 | producer.send(record, new PeterProducerCallback(record)); //프로듀서에서 레코드를 보낼 때 콜백 오브젝트를 같이 보냅니다.
22 | }
23 | } catch (Exception e){
24 | e.printStackTrace();
25 | } finally {
26 | producer.close(); // 프로듀서 종료
27 | }
28 | }
29 | }
--------------------------------------------------------------------------------
/chapter3/src/main/java/ProducerFireForgot.java:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.producer.KafkaProducer;
2 | import org.apache.kafka.clients.producer.Producer;
3 | import org.apache.kafka.clients.producer.ProducerRecord;
4 |
5 | import java.util.Properties;
6 |
7 | public class ProducerFireForgot {
8 | public static void main(String[] args) {
9 | Properties props = new Properties(); //Properties 오브젝트를 시작합니다.
10 | props.put("bootstrap.servers", "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092"); //브로커 리스트를 정의합니다.
11 | props.put("key.serializer",
12 | "org.apache.kafka.common.serialization.StringSerializer"); //메시지 키와 벨류에 문자열을 지정하므로 내장된 StringSerializer를 지정합니다.
13 | props.put("value.serializer",
14 | "org.apache.kafka.common.serialization.StringSerializer");
15 |
16 | Producer producer = new KafkaProducer<>(props); //Properties 오브젝트를 전달해 새 프로듀서를 생성합니다.
17 |
18 | try {
19 | for (int i = 0; i < 3; i++) {
20 | ProducerRecord record = new ProducerRecord<>("peter-basic01", "Apache Kafka is a distributed streaming platform - " + i); //ProducerRecord 오브젝트를 생성합니다.
21 | producer.send(record); //send()메소드를 사용하여 메시지를 전송 후 Java Future Ojbect로 RecordMetadata를 리턴 받지만, 리턴값을 무시하므로 메시지가 성공적으로 전송되었는지 알 수 없습니다.
22 | }
23 | } catch (Exception e){
24 | e.printStackTrace(); //카프카 브로커에게 메시지를 전송한 후의 에러는 무시하지만, 전송 전 에러가 발생하면 예외를 처리할 수 있습니다.
25 | } finally {
26 | producer.close(); // 프로듀서 종료
27 | }
28 | }
29 | }
--------------------------------------------------------------------------------
/chapter3/src/main/java/ProducerSync.java:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.producer.KafkaProducer;
2 | import org.apache.kafka.clients.producer.Producer;
3 | import org.apache.kafka.clients.producer.ProducerRecord;
4 | import org.apache.kafka.clients.producer.RecordMetadata;
5 |
6 | import java.util.Properties;
7 |
8 | public class ProducerSync {
9 | public static void main(String[] args) {
10 | Properties props = new Properties(); //Properties 오브젝트를 시작합니다.
11 | props.put("bootstrap.servers", "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092"); //브로커 리스트를 정의합니다.
12 | props.put("key.serializer",
13 | "org.apache.kafka.common.serialization.StringSerializer"); //메시지 키와 벨류에 문자열을 지정하므로 내장된 StringSerializer를 지정합니다.
14 | props.put("value.serializer",
15 | "org.apache.kafka.common.serialization.StringSerializer");
16 |
17 | Producer producer = new KafkaProducer<>(props); //Properties 오브젝트를 전달해 새 프로듀서를 생성합니다.
18 |
19 | try {
20 | for (int i = 0; i < 3; i++) {
21 | ProducerRecord record = new ProducerRecord<>("peter-basic01", "Apache Kafka is a distributed streaming platform - " + i); //ProducerRecord 오브젝트를 생성합니다.
22 | RecordMetadata metadata = producer.send(record).get(); //get() 메소드를 이용해 카프카의 응답을 기다립니다. 메시지가 성공적으로 전송되지 않으면 예외가 발생하고, 에러가 없다면 RecordMetadata를 얻게 됩니다.
23 | System.out.printf("Topic: %s, Partition: %d, Offset: %d, Key: %s, Received Message: %s\n", metadata.topic(), metadata.partition()
24 | , metadata.offset(), record.key(), record.value());
25 | }
26 | } catch (Exception e){
27 | e.printStackTrace(); //카프카로 메시지를 보내기 전과 보내는 동안 에러가 발생하면 예외가 발생합니다.
28 | } finally {
29 | producer.close(); // 프로듀서 종료
30 | }
31 | }
32 | }
--------------------------------------------------------------------------------
/chapter3/예제/예제 3-1.txt:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.producer.KafkaProducer;
2 | import org.apache.kafka.clients.producer.Producer;
3 | import org.apache.kafka.clients.producer.ProducerRecord;
4 |
5 | import java.util.Properties;
6 |
7 | public class ProducerFireForgot {
8 | public static void main(String[] args) {
9 | Properties props = new Properties(); //Properties 오브젝트를 시작.
10 | props.put("bootstrap.servers", "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092"); //브로커 리스트를 정의.
11 | props.put("key.serializer",
12 | "org.apache.kafka.common.serialization.StringSerializer"); //메시지 키와 벨류에 문자열을 사용하므로 내장된 StringSerializer를 지정.
13 | props.put("value.serializer",
14 | "org.apache.kafka.common.serialization.StringSerializer");
15 |
16 | Producer producer = new KafkaProducer<>(props); //Properties 오브젝트를 전달해 새 프로듀서를 생성.
17 |
18 | try {
19 | for (int i = 0; i < 10; i++) {
20 | ProducerRecord record = new ProducerRecord<>("peter-basic01", "Apache Kafka is a distributed streaming platform - " + i); //ProducerRecord 오브젝트를 생성.
21 | producer.send(record); //send()메소드를 사용하여 메시지를 전송 후 Java Future Ojbect로 RecordMetadata를 리턴 받지만, 리턴값을 무시하므로 메시지가 성공적으로 전송되었는지 알 수 없음.
22 | }
23 | } catch (Exception e){
24 | e.printStackTrace(); //카프카 브로커에게 메시지를 전송한 후의 에러는 무시하지만, 전송 전 에러가 발생하면 예외를 처리할 수 있음.
25 | } finally {
26 | producer.close(); // 프로듀서 종료
27 | }
28 | }
29 | }
--------------------------------------------------------------------------------
/chapter3/예제/예제 3-2.txt:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.producer.KafkaProducer;
2 | import org.apache.kafka.clients.producer.Producer;
3 | import org.apache.kafka.clients.producer.ProducerRecord;
4 | import org.apache.kafka.clients.producer.RecordMetadata;
5 |
6 | import java.util.Properties;
7 |
8 | public class ProducerSync {
9 | public static void main(String[] args) {
10 | Properties props = new Properties(); //Properties 오브젝트를 시작.
11 | props.put("bootstrap.servers", "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092"); //브로커 리스트를 정의.
12 | props.put("key.serializer",
13 | "org.apache.kafka.common.serialization.StringSerializer"); //메시지 키와 벨류에 문자열을 사용하므로 내장된 StringSerializer를 지정.
14 | props.put("value.serializer",
15 | "org.apache.kafka.common.serialization.StringSerializer");
16 |
17 | Producer producer = new KafkaProducer<>(props); //Properties 오브젝트를 전달해 새 프로듀서를 생성.
18 |
19 | try {
20 | for (int i = 0; i < 3; i++) {
21 | ProducerRecord record = new ProducerRecord<>("peter-basic01", "Apache Kafka is a distributed streaming platform - " + i); //ProducerRecord 오브젝트를 생성.
22 | RecordMetadata metadata = producer.send(record).get(); //get() 메소드를 이용해 카프카의 응답을 기다립니다. 메시지가 성공적으로 전송되지 않으면 예외가 발생하고, 에러가 없다면 RecordMetadata를 얻음.
23 | System.out.printf("Topic: %s, Partition: %d, Offset: %d, Key: %s, Received Message: %s\n", metadata.topic(), metadata.partition()
24 | , metadata.offset(), record.key(), record.value());
25 | }
26 | } catch (Exception e){
27 | e.printStackTrace(); //카프카로 메시지를 보내기 전과 보내는 동안 에러가 발생하면 예외가 발생함.
28 | } finally {
29 | producer.close(); // 프로듀서 종료
30 | }
31 | }
32 | }
--------------------------------------------------------------------------------
/chapter3/예제/예제 3-3.txt:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.producer.Callback;
2 | import org.apache.kafka.clients.producer.ProducerRecord;
3 | import org.apache.kafka.clients.producer.RecordMetadata;
4 |
5 | public class PeterProducerCallback implements Callback { //콜백을 사용하기 위해 org.apache.kafka.clients.producer.Callback를 구현하는 클래스가 필요함.
6 | private ProducerRecord record;
7 |
8 | public PeterProducerCallback(ProducerRecord record) {
9 | this.record = record;
10 | }
11 |
12 | @Override
13 | public void onCompletion(RecordMetadata metadata, Exception e) {
14 | if (e != null) {
15 | e.printStackTrace(); //카프카가 오류를 리턴하면 onCompletion()은 예외를 갖게 되며, 실제 운영환경에서는 추가적인 예외처리가 필요함.
16 | } else {
17 | System.out.printf("Topic: %s, Partition: %d, Offset: %d, Key: %s, Received Message: %s\n", metadata.topic(), metadata.partition()
18 | , metadata.offset(), record.key(), record.value());
19 | }
20 | }
21 | }
--------------------------------------------------------------------------------
/chapter3/예제/예제 3-4.txt:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.producer.KafkaProducer;
2 | import org.apache.kafka.clients.producer.Producer;
3 | import org.apache.kafka.clients.producer.ProducerRecord;
4 |
5 | import java.util.Properties;
6 |
7 | public class ProducerAsync {
8 | public static void main(String[] args) {
9 | Properties props = new Properties(); //Properties 오브젝트를 시작합니다.
10 | props.put("bootstrap.servers", "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092"); //브로커 리스트를 정의.
11 | props.put("key.serializer",
12 | "org.apache.kafka.common.serialization.StringSerializer"); //메시지 키와 벨류에 문자열을 지정하므로 내장된 StringSerializer를 지정함.
13 | props.put("value.serializer",
14 | "org.apache.kafka.common.serialization.StringSerializer");
15 |
16 | Producer producer = new KafkaProducer<>(props); //Properties 오브젝트를 전달해 새 프로듀서를 생성.
17 |
18 | try {
19 | for (int i = 0; i < 3; i++) {
20 | ProducerRecord record = new ProducerRecord<>("peter-basic01", "Apache Kafka is a distributed streaming platform - " + i); //ProducerRecord 오브젝트를 생성.
21 | producer.send(record, new PeterProducerCallback(record)); //프로듀서에서 레코드를 보낼 때 콜백 오브젝트를 같이 보냄.
22 | }
23 | } catch (Exception e){
24 | e.printStackTrace();
25 | } finally {
26 | producer.close(); // 프로듀서 종료
27 | }
28 | }
29 | }
--------------------------------------------------------------------------------
/chapter3/예제/예제 3-5.txt:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.consumer.ConsumerRecord;
2 | import org.apache.kafka.clients.consumer.ConsumerRecords;
3 | import org.apache.kafka.clients.consumer.KafkaConsumer;
4 |
5 | import java.util.Arrays;
6 | import java.util.Properties;
7 |
8 | public class ConsumerAuto {
9 | public static void main(String[] args) {
10 | Properties props = new Properties(); //Properties 오브젝트를 시작.
11 | props.put("bootstrap.servers", "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092"); //브로커 리스트를 정의.
12 | props.put("group.id", "peter-consumer01"); //컨슈머 그룹 아이디 정의.
13 | props.put("enable.auto.commit", "true"); //오토 커밋을 사용.
14 | props.put("auto.offset.reset", "latest"); //컨슈머 오프셋을 찾지 못하는 경우 latest로 초기화 합니다. 가장 최근부터 메시지를 가져옴.
15 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); //문자열을 사용했으므로 StringDeserializer 지정.
16 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
17 | KafkaConsumer consumer = new KafkaConsumer<>(props); //Properties 오브젝트를 전달하여 새 컨슈머를 생성.
18 | consumer.subscribe(Arrays.asList("peter-basic01")); //구독할 토픽을 지정.
19 |
20 | try {
21 | while (true) { //무한 루프 시작. 메시지를 가져오기 위해 카프카에 지속적으로 poll()을 함.
22 | ConsumerRecords records = consumer.poll(1000); //컨슈머는 폴링하는 것을 계속 유지하며, 타임 아웃 주기를 설정.해당 시간만큼 블럭.
23 | for (ConsumerRecord record : records) { //poll()은 레코드 전체를 리턴하고, 하나의 메시지만 가져오는 것이 아니므로, 반복문 처리.
24 | System.out.printf("Topic: %s, Partition: %s, Offset: %d, Key: %s, Value: %s\n",
25 | record.topic(), record.partition(), record.offset(), record.key(), record.value());
26 | }
27 | }
28 | } catch (Exception e){
29 | e.printStackTrace();
30 | } finally {
31 | consumer.close(); //컨슈머를 종료.
32 | }
33 | }
34 | }
--------------------------------------------------------------------------------
/chapter3/예제/예제 3-6.txt:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.consumer.ConsumerRecord;
2 | import org.apache.kafka.clients.consumer.ConsumerRecords;
3 | import org.apache.kafka.clients.consumer.KafkaConsumer;
4 |
5 | import java.util.Arrays;
6 | import java.util.Properties;
7 |
8 | public class ConsumerSync {
9 | public static void main(String[] args) {
10 | Properties props = new Properties(); //Properties 오브젝트를 시작.
11 | props.put("bootstrap.servers", "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092"); //브로커 리스트를 정의.
12 | props.put("group.id", "peter-consumer01"); //컨슈머 그룹 아이디 정의.
13 | props.put("enable.auto.commit", "false"); //오토 커밋을 사용하지 않음.
14 | props.put("auto.offset.reset", "latest"); //컨슈머 오프셋을 찾지 못하는 경우 latest로 초기화 합니다. 가장 최근부터 메시지를 가져옴.
15 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); //문자열을 사용했으므로 StringDeserializer 지정.
16 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
17 | KafkaConsumer consumer = new KafkaConsumer<>(props); //Properties 오브젝트를 전달하여 새 컨슈머를 생성.
18 | consumer.subscribe(Arrays.asList("peter-basic01")); //구독할 토픽을 지정.
19 |
20 | try {
21 | while (true) { //무한 루프 시작. 메시지를 가져오기 위해 카프카에 지속적으로 poll()을 함.
22 | ConsumerRecords records = consumer.poll(1000); //컨슈머는 폴링하는 것을 계속 유지하며, 타임 아웃 주기를 설정.해당 시간만큼 블럭함.
23 | for (ConsumerRecord record : records) { //poll()은 레코드 전체를 리턴하고, 하나의 메시지만 가져오는 것이 아니므로, 반복문 처리함.
24 | System.out.printf("Topic: %s, Partition: %s, Offset: %d, Key: %s, Value: %s\n",
25 | record.topic(), record.partition(), record.offset(), record.key(), record.value());
26 | }
27 | consumer.commitSync(); //현재 배치를 통해 읽은 모든 메시지들을 처리한 후, 추가 메시지를 폴링하기 전 현재의 오프셋을 동기 커밋.
28 | }
29 | } catch (Exception e){
30 | e.printStackTrace();
31 | } finally {
32 | consumer.close(); //컨슈머를 종료.
33 | }
34 | }
35 | }
--------------------------------------------------------------------------------
/chapter3/예제/예제 3-7.txt:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.consumer.ConsumerRecord;
2 | import org.apache.kafka.clients.consumer.ConsumerRecords;
3 | import org.apache.kafka.clients.consumer.KafkaConsumer;
4 |
5 | import java.util.Arrays;
6 | import java.util.Properties;
7 |
8 | public class ConsumerAsync {
9 | public static void main(String[] args) {
10 | Properties props = new Properties(); //Properties 오브젝트를 시작.
11 | props.put("bootstrap.servers", "peter-kafka01.foo.bar:9092,peter-kafka02.foo.bar:9092,peter-kafka03.foo.bar:9092"); //브로커 리스트를 정의.
12 | props.put("group.id", "peter-consumer01"); //컨슈머 그룹 아이디 정의.
13 | props.put("enable.auto.commit", "false"); //오토 커밋을 사용하지 않음.
14 | props.put("auto.offset.reset", "latest"); //컨슈머 오프셋을 찾지 못하는 경우 latest로 초기화. 가장 최근부터 메시지를 가져옴.
15 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); //문자열을 사용했으므로 StringDeserializer 지정.
16 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
17 | KafkaConsumer consumer = new KafkaConsumer<>(props); //Properties 오브젝트를 전달하여 새 컨슈머를 생성.
18 | consumer.subscribe(Arrays.asList("peter-basic01")); //구독할 토픽을 지정.
19 |
20 | try {
21 | while (true) { //무한 루프 시작. 메시지를 가져오기 위해 카프카에 지속적으로 poll()을 함.
22 | ConsumerRecords records = consumer.poll(1000); //컨슈머는 폴링하는 것을 계속 유지하며, 타임 아웃 주기를 설정.해당 시간만큼 블럭함.
23 | for (ConsumerRecord record : records) { //poll()은 레코드 전체를 리턴하고, 하나의 메시지만 가져오는 것이 아니므로, 반복문 처리.
24 | System.out.printf("Topic: %s, Partition: %s, Offset: %d, Key: %s, Value: %s\n",
25 | record.topic(), record.partition(), record.offset(), record.key(), record.value());
26 | }
27 | consumer.commitAsync(); //현재 배치를 통해 읽은 모든 메시지들을 처리한 후, 추가 메시지를 폴링하기 전 현재의 오프셋을 비동기 커밋합니다.
28 | }
29 | } catch (Exception e){
30 | e.printStackTrace();
31 | } finally {
32 | consumer.close(); //컨슈머를 종료.
33 | }
34 | }
35 | }
--------------------------------------------------------------------------------
/chapter4/4_commands.txt:
--------------------------------------------------------------------------------
1 | P115
2 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --create --topic peter-test01 --partitions 1 --replication-factor 3
3 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-test01 --describe
4 |
5 | P116
6 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-test01
7 | /usr/local/kafka/bin/kafka-dump-log.sh --print-data-log --files /data/kafka-logs/peter-test01-0/00000000000000000000.log
8 |
9 | P121
10 | cat /data/kafka-logs/replication-offset-checkpoint
11 |
12 | P122
13 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-test01
14 | cat /data/kafka-logs/replication-offset-checkpoint
15 |
16 | P132
17 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --create --topic peter-test02 --partitions 1 --replication-factor 2
18 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-test02 --describe
19 |
20 | P133
21 | cat /data/kafka-logs/peter-test02-0/leader-epoch-checkpoint
22 |
23 | P134
24 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-test02
25 | cat /data/kafka-logs/peter-test02-0/leader-epoch-checkpoint
26 |
27 | P135
28 | sudo systemctl stop kafka-server
29 | sudo systemctl status kafka-server
30 | cat /data/kafka-logs/peter-test02-0/leader-epoch-checkpoint
31 |
32 | P136
33 | sudo systemctl start kafka-server
34 |
35 | P141
36 | /usr/local/kafka/bin/kafka-configs.sh --bootstrap-server peter-kafka01.foo.bar:9092 --broker 1 --describe --all
37 |
38 | P142
39 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --create --topic peter-test03 --partitions 1 --replication-factor 3
40 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-test03
41 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-test03 --from-beginning
42 |
43 | P143
44 | /usr/local/kafka/bin/kafka-configs.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-test03 --add-config retention.ms=0 --alter
45 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-test03 --describe
46 | ls /data/kafka-logs/peter-test03-0/
47 |
48 | P144
49 | ls /data/kafka-logs/peter-test03-0/
50 | /usr/local/kafka/bin/kafka-configs.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-test03 --delete-config retention.ms --alter
51 |
52 | P145
53 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-test03 --describe
--------------------------------------------------------------------------------
/chapter5/5_commands.txt:
--------------------------------------------------------------------------------
1 | P164
2 | vi /home/ec2-user/producer.config
3 |
4 | P165
5 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-test04 --producer.config /home/ec2-user/producer.config
6 |
7 | P166
8 | cd /data/kafka-logs/peter-test04-0/
9 | ls
10 | /usr/local/kafka/bin/kafka-dump-log.sh --print-data-log --files /data/kafka-logs/peter-test04-0/00000000000000000001.snapshot
11 |
12 | P175
13 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --create --topic peter-test05 --partitions 1 --replication-factor 3
14 |
15 | P176
16 | sudo yum install -y java-1.8.0-openjdk java-1.8.0-openjdk-devel
17 | java -version
18 | cd kafka2/chapter5/
19 | java -jar ExactlyOnceProducer.jar
20 |
21 | P177
22 | cat /data/kafka-logs/peter-test02-0/leader-epoch-checkpoint
23 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --list
24 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic __transaction_state --consumer.config /home/ec2-user/consumer.config --formatter "kafka.coordinator.transaction.TransactionLog\$TransactionLogMessageFormatter" --from-beginning
25 |
26 | P179
27 | /usr/local/kafka/bin/kafka-dump-log.sh --print-data-log --files /data/kafka-logs/peter-test05-0/00000000000000000000.log
--------------------------------------------------------------------------------
/chapter5/ExactlyOnceProducer.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/onlybooks/kafka2/6aeabf38dc3bceb95786a23df67f026926fe555a/chapter5/ExactlyOnceProducer.jar
--------------------------------------------------------------------------------
/chapter5/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | org.example
8 | KafkaClientExample
9 | 1.0
10 |
11 |
12 |
13 |
14 | org.apache.kafka
15 | kafka-clients
16 | 2.7.2
17 |
18 |
19 |
20 |
21 | 8
22 | 8
23 |
24 |
25 |
--------------------------------------------------------------------------------
/chapter5/src/main/java/ExactlyOnceProducer.java:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.producer.*;
2 | import org.apache.kafka.common.serialization.StringSerializer;
3 |
4 | import java.util.Properties;
5 |
6 | public class ExactlyOnceProducer {
7 | public static void main(String[] args) {
8 | String bootstrapServers = "peter-kafka01.foo.bar:9092";
9 | Properties props = new Properties();
10 | props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
11 | props.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
12 | props.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
13 | props.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true"); // 정확히 한번 전송을 위한 설정
14 | props.setProperty(ProducerConfig.ACKS_CONFIG, "all"); // 정확히 한번 전송을 위한 설정
15 | props.setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5"); // 정확히 한번 전송을 위한 설정
16 | props.setProperty(ProducerConfig.RETRIES_CONFIG, "5"); // 정확히 한번 전송을 위한 설정
17 | props.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "peter-transaction-01"); // 정확히 한번 전송을 위한 설정
18 |
19 | Producer producer = new KafkaProducer<>(props);
20 |
21 | producer.initTransactions(); // 프로듀서 트랜잭션 초기화
22 | producer.beginTransaction(); // 프로듀서 트랜잭션 시작
23 | try {
24 | for (int i = 0; i < 1; i++) {
25 | ProducerRecord record = new ProducerRecord<>("peter-test05", "Apache Kafka is a distributed streaming platform - " + i);
26 | producer.send(record);
27 | producer.flush();
28 | System.out.println("Message sent successfully");
29 | }
30 | } catch (Exception e){
31 | producer.abortTransaction(); // 프로듀서 트랜잭션 중단
32 | e.printStackTrace();
33 | } finally {
34 | producer.commitTransaction(); // 프로듀서 트랜잭션 커밋
35 | producer.close();
36 | }
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/chapter5/예제/예제 5-1:
--------------------------------------------------------------------------------
1 | enable.idempotence=true
2 | max.in.flight.requests.per.connection=5
3 | retries=5
--------------------------------------------------------------------------------
/chapter5/예제/예제 5-2:
--------------------------------------------------------------------------------
1 | enable.idempotence=true
2 | max.in.flight.requests.per.connection=5
3 | retries=5
4 | acks=all
--------------------------------------------------------------------------------
/chapter5/예제/예제 5-3:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.producer.*;
2 | import org.apache.kafka.common.serialization.StringSerializer;
3 |
4 | import java.util.Properties;
5 |
6 | public class ExactlyOnceProducer {
7 | public static void main(String[] args) {
8 | String bootstrapServers = "peter-kafka01.foo.bar:9092";
9 | Properties props = new Properties();
10 | props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
11 | props.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
12 | props.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
13 | props.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true"); // 정확히 한번 전송을 위한 설정
14 | props.setProperty(ProducerConfig.ACKS_CONFIG, "all"); // 정확히 한번 전송을 위한 설정
15 | props.setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5"); // 정확히 한번 전송을 위한 설정
16 | props.setProperty(ProducerConfig.RETRIES_CONFIG, "5"); // 정확히 한번 전송을 위한 설정
17 | props.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "peter-transaction-01"); // 정확히 한번 전송을 위한 설정
18 |
19 | Producer producer = new KafkaProducer<>(props);
20 |
21 | producer.initTransactions(); // 프로듀서 트랜잭션 초기화
22 | producer.beginTransaction(); // 프로듀서 트랜잭션 시작
23 | try {
24 | for (int i = 0; i < 1; i++) {
25 | ProducerRecord record = new ProducerRecord<>("peter-test05", "Apache Kafka is a distributed streaming platform - " + i);
26 | producer.send(record);
27 | producer.flush();
28 | System.out.println("Message sent successfully");
29 | }
30 | } catch (Exception e){
31 | producer.abortTransaction(); // 프로듀서 트랜잭션 중단
32 | e.printStackTrace();
33 | } finally {
34 | producer.commitTransaction(); // 프로듀서 트랜잭션 커밋
35 | producer.close();
36 | }
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/chapter5/예제/예제 5-4:
--------------------------------------------------------------------------------
1 | exclude.internal.topics=false
--------------------------------------------------------------------------------
/chapter6/6_commands.txt:
--------------------------------------------------------------------------------
1 | P189
2 | sudo yum -y install python3
3 | python3 -m venv venv6
4 | source venv6/bin/activate
5 | pip install confluent-kafka
6 |
7 | P190
8 | sudo yum -y install git
9 | git clone https://github.com/onlybooks/kafka2.git
10 | cd kafka2/chapter6/
11 | python consumer_standard.py
12 |
13 | P192
14 | python producer.py
15 |
16 | P193
17 | /usr/local/kafka/bin/kafka-consumer-groups.sh --bootstrap-server peter-kafka01.foo.bar:9092 --group peter-consumer01 --describe
18 |
19 | P194
20 | /usr/local/kafka/bin/kafka-consumer-groups.sh --bootstrap-server peter-kafka01.foo.bar:9092 --group peter-consumer01 --describe
21 |
22 | P197
23 | python consumer_static.py
24 | /usr/local/kafka/bin/kafka-consumer-groups.sh --bootstrap-server peter-kafka01.foo.bar:9092 --group peter-consumer02 --describe
25 |
26 | P198
27 | /usr/local/kafka/bin/kafka-consumer-groups.sh --bootstrap-server peter-kafka01.foo.bar:9092 --group peter-consumer02 --describe
28 |
29 | P199
30 | /usr/local/kafka/bin/kafka-consumer-groups.sh --bootstrap-server peter-kafka01.foo.bar:9092 --group peter-consumer02 --describe
31 |
32 | P213
33 | cd kafka2/chapter6/
34 | java -jar ExactlyOnceConsumer.jar
35 |
36 | P214
37 | cd kafka2/chapter5/
38 | java -jar ExactlyOnceProducer.jar
39 | /usr/local/kafka/bin/kafka-dump-log.sh --print-data-log --files /data/kafka-logs/peter-test05-0/00000000000000000000.log
--------------------------------------------------------------------------------
/chapter6/ExactlyOnceConsumer.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/onlybooks/kafka2/6aeabf38dc3bceb95786a23df67f026926fe555a/chapter6/ExactlyOnceConsumer.jar
--------------------------------------------------------------------------------
/chapter6/consumer_standard.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import Consumer
2 |
3 | broker = 'peter-kafka01.foo.bar, peter-kafka02.foo.bar, peter-kafka03.foo.bar'
4 | group = 'peter-consumer01'
5 | topic = 'peter-test06'
6 |
7 | c = Consumer({
8 | 'bootstrap.servers': broker,
9 | 'group.id': group,
10 | 'auto.offset.reset': 'earliest'
11 | })
12 | c.subscribe([topic])
13 |
14 | while True:
15 | msg = c.poll(1.0)
16 |
17 | if msg is None:
18 | continue
19 | if msg.error():
20 | print("Consumer error: {}".format(msg.error()))
21 | continue
22 | print('Topic: {}, '
23 | 'Partition: {}, '
24 | 'Offset: {}, '
25 | 'Received message: {}'.format(msg.topic(),
26 | msg.partition(),
27 | msg.offset(),
28 | msg.value().decode('utf-8')))
29 | c.close()
30 |
--------------------------------------------------------------------------------
/chapter6/consumer_static.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import Consumer
2 | import socket
3 |
4 | hostname = socket.gethostname()
5 | broker = 'peter-kafka01.foo.bar'
6 | group = 'peter-consumer02'
7 | topic = 'peter-test06'
8 |
9 | c = Consumer({
10 | 'bootstrap.servers': broker,
11 | 'group.id': group,
12 | 'session.timeout.ms': 30000,
13 | 'group.instance.id': 'consumer-' + hostname,
14 | 'auto.offset.reset': 'earliest'
15 | })
16 | c.subscribe([topic])
17 |
18 | while True:
19 | msg = c.poll(1.0)
20 |
21 | if msg is None:
22 | continue
23 | if msg.error():
24 | print("Consumer error: {}".format(msg.error()))
25 | continue
26 | print('Topic: {}, '
27 | 'Partition: {}, '
28 | 'Offset: {}, '
29 | 'Received message: {}'.format(msg.topic(),
30 | msg.partition(),
31 | msg.offset(),
32 | msg.value().decode('utf-8')))
33 | c.close()
34 |
--------------------------------------------------------------------------------
/chapter6/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | org.example
8 | KafkaClientExample
9 | 1.0
10 |
11 |
12 |
13 |
14 | org.apache.kafka
15 | kafka-clients
16 | 2.7.2
17 |
18 |
19 |
20 |
21 | 8
22 | 8
23 |
24 |
25 |
--------------------------------------------------------------------------------
/chapter6/producer.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import Producer
2 |
3 | broker = 'peter-kafka01.foo.bar, peter-kafka02.foo.bar, peter-kafka03.foo.bar'
4 | topic = 'peter-test06'
5 |
6 | p = Producer({'bootstrap.servers': broker,
7 | 'compression.codec': 'lz4',
8 | 'acks': 1})
9 |
10 | # create some_data_source
11 | some_data_source = []
12 | for messageCount in range(1, 11):
13 | some_data_source.append('Apache Kafka is a distributed streaming platform - %d' % messageCount)
14 |
15 | def delivery_report(err, msg):
16 | """ Called once for each message produced to indicate delivery result.
17 | Triggered by poll() or flush(). """
18 | if err is not None:
19 | print('Message delivery failed: {}'.format(err))
20 | else:
21 | print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
22 |
23 | for data in some_data_source:
24 | # Trigger any available delivery report callbacks from previous produce() calls
25 | p.poll(0)
26 |
27 | # Asynchronously produce a message, the delivery report callback
28 | # will be triggered from poll() above, or flush() below, when the message has
29 | # been successfully delivered or failed permanently.
30 | p.produce(topic, data.encode('utf-8'), callback=delivery_report)
31 |
32 | # Wait for any outstanding messages to be delivered and delivery report
33 | # callbacks to be triggered.
34 | p.flush()
--------------------------------------------------------------------------------
/chapter6/src/main/java/ExactlyOnceConsumer.java:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.consumer.ConsumerConfig;
2 | import org.apache.kafka.clients.consumer.ConsumerRecord;
3 | import org.apache.kafka.clients.consumer.ConsumerRecords;
4 | import org.apache.kafka.clients.consumer.KafkaConsumer;
5 | import org.apache.kafka.clients.producer.ProducerConfig;
6 | import org.apache.kafka.common.serialization.StringDeserializer;
7 |
8 | import java.util.Arrays;
9 | import java.util.Properties;
10 |
11 | public class ExactlyOnceConsumer {
12 | public static void main(String[] args) {
13 | String bootstrapServers = "peter-kafka01.foo.bar:9092";
14 | Properties props = new Properties();
15 | props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
16 | props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
17 | props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
18 | props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "peter-consumer-01");
19 | props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
20 | props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
21 | props.setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); // 정확히 한번 전송을 위한 설정
22 |
23 | KafkaConsumer consumer = new KafkaConsumer<>(props);
24 | consumer.subscribe(Arrays.asList("peter-test05"));
25 |
26 | try {
27 | while (true) {
28 | ConsumerRecords records = consumer.poll(1000);
29 | for (ConsumerRecord record : records) {
30 | System.out.printf("Topic: %s, Partition: %s, Offset: %d, Key: %s, Value: %s\n",
31 | record.topic(), record.partition(), record.offset(), record.key(), record.value());
32 | }
33 | consumer.commitAsync();
34 | }
35 | } catch (Exception e) {
36 | e.printStackTrace();
37 | } finally {
38 | consumer.close();
39 | }
40 | }
41 | }
--------------------------------------------------------------------------------
/chapter6/예제/예제 6-1:
--------------------------------------------------------------------------------
1 | print('Topic: {}, '
2 | 'Partition: {}, '
3 | 'Offset: {}, '
4 | 'Received message: {}'.format(msg.topic(),
5 | msg.partition(),
6 | msg.offset(),
7 | msg.value().decode('utf-8')))
--------------------------------------------------------------------------------
/chapter6/예제/예제 6-2:
--------------------------------------------------------------------------------
1 | some_data_source = []
2 | for messageCount in range(1, 11):
3 | some_data_source.append('Apache Kafka is a distributed streaming platform - %d' % messageCount)
--------------------------------------------------------------------------------
/chapter6/예제/예제 6-3:
--------------------------------------------------------------------------------
1 | hostname = socket.gethostname()
2 | broker = 'peter-kafka01.foo.bar'
3 | group = 'peter-consumer02'
4 | topic = 'peter-test06'
5 |
6 | c = Consumer({
7 | 'bootstrap.servers': broker,
8 | 'group.id': group,
9 | 'session.timeout.ms': 30000,
10 | 'group.instance.id': 'consumer-' + hostname,
11 | 'auto.offset.reset': 'earliest'
12 | })
--------------------------------------------------------------------------------
/chapter6/예제/예제 6-4:
--------------------------------------------------------------------------------
1 | import org.apache.kafka.clients.consumer.ConsumerConfig;
2 | import org.apache.kafka.clients.consumer.ConsumerRecord;
3 | import org.apache.kafka.clients.consumer.ConsumerRecords;
4 | import org.apache.kafka.clients.consumer.KafkaConsumer;
5 | import org.apache.kafka.clients.producer.ProducerConfig;
6 | import org.apache.kafka.common.serialization.StringDeserializer;
7 |
8 | import java.util.Arrays;
9 | import java.util.Properties;
10 |
11 | public class ExactlyOnceConsumer {
12 | public static void main(String[] args) {
13 | String bootstrapServers = "peter-kafka01.foo.bar:9092";
14 | Properties props = new Properties();
15 | props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
16 | props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
17 | props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
18 | props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "peter-consumer-01");
19 | props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, " earliest");
20 | props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
21 | props.setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); // 정확히 한번 전송을 위한 설정
22 |
23 | KafkaConsumer consumer = new KafkaConsumer<>(props);
24 | consumer.subscribe(Arrays.asList("peter-test05"));
25 |
26 | try {
27 | while (true) {
28 | ConsumerRecords records = consumer.poll(1000);
29 | for (ConsumerRecord record : records) {
30 | System.out.printf("Topic: %s, Partition: %s, Offset: %d, Key: %s, Value: %s\n",
31 | record.topic(), record.partition(), record.offset(), record.key(), record.value());
32 | }
33 | consumer.commitAsync();
34 | }
35 | } catch (Exception e) {
36 | e.printStackTrace();
37 | } finally {
38 | consumer.close();
39 | }
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/chapter7/7_commands.txt:
--------------------------------------------------------------------------------
1 | P223
2 | cat /usr/local/kafka/config/log4j.properties
3 |
4 | P224
5 | sudo vi /usr/local/kafka/config/log4j.properties
6 | sudo systemctl restart kafka-server
7 |
8 | P225
9 | cat /usr/local/kafka/logs/server.log
10 |
11 | P227
12 | cat /usr/local/kafka/config/jmx
13 | netstat -ntl | grep 9999
14 |
15 | P228
16 | sudo amazon-linux-extras install -y docker
17 | sudo docker version
18 | sudo service docker start
19 | sudo usermod -a -G docker ec2-user
20 | sudo yum install -y git
21 | sudo chkconfig docker on
22 | sudo reboot
23 | sudo systemctl status docker
24 | sudo mkdir -p /etc/prometheus
25 | git clone https://github.com/onlybooks/kafka2.git
26 |
27 | P229
28 | sudo cp kafka2/chapter7/prometheus.yml /etc/prometheus/
29 | sudo docker run -d --network host -p 9090:9090 -v /etc/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml --name prometheus prom/prometheus
30 | sudo docker ps
31 |
32 | P230
33 | sudo docker run -d --network host -p 3000:3000 --name grafana grafana/grafana:7.3.7
34 | sudo docker ps
35 |
36 | P231
37 | sudo mkdir -p /usr/local/jmx
38 | sudo yum -y install git
39 | git clone https://github.com/onlybooks/kafka2.git
40 | sudo cp kafka2/chapter7/jmx_prometheus_httpserver-0.13.1-SNAPSHOT-jar-with-dependencies.jar /usr/local/jmx/
41 | sudo cp kafka2/chapter7/jmx_prometheus_httpserver.yml /usr/local/jmx/
42 |
43 | P233
44 | sudo cp kafka2/chapter7/jmx-exporter.service /etc/systemd/system
45 | sudo systemctl daemon-reload
46 | sudo systemctl start jmx-exporter
47 | sudo systemctl status jmx-exporter
48 | curl http://localhost:7071/metrics
49 |
50 | P234
51 | wget https://github.com/prometheus/node_exporter/releases/download/v1.0.1/node_exporter-1.0.1.linux-386.tar.gz
52 | sudo tar zxf node_exporter-1.0.1.linux-386.tar.gz -C /usr/local/
53 |
54 | P235
55 | sudo ln -s /usr/local/node_exporter-1.0.1.linux-386 /usr/local/node_exporter
56 | sudo cp kafka2/chapter7/node-exporter.service /etc/systemd/system
57 | sudo systemctl daemon-reload
58 | sudo systemctl start node-exporter
59 | sudo systemctl status node-exporter
60 |
61 | P237
62 | curl -X GET http://localhost:9090/api/v1/status/config | python -m json.tool
63 |
64 | P245
65 | cat kafka2/chapter7/kafka_metrics.json
66 |
67 | P251
68 | cd /home/ec2-user/kafka2/chapter2/ansible_playbook
69 | ansible-playbook -i hosts kafka-exporter.yml
70 |
71 | P252
72 | sudo vi /etc/prometheus/prometheus.yml
73 | sudo docker restart prometheus
--------------------------------------------------------------------------------
/chapter7/jmx-exporter.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=JMX Exporter for Kafka
3 | After=kafka-server.target
4 |
5 | [Service]
6 | Type=simple
7 | Restart=always
8 | ExecStart=/usr/bin/java -jar /usr/local/jmx/jmx_prometheus_httpserver-0.13.1-SNAPSHOT-jar-with-dependencies.jar 7071 /usr/local/jmx/jmx_prometheus_httpserver.yml
9 |
10 | [Install]
11 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter7/jmx_prometheus_httpserver-0.13.1-SNAPSHOT-jar-with-dependencies.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/onlybooks/kafka2/6aeabf38dc3bceb95786a23df67f026926fe555a/chapter7/jmx_prometheus_httpserver-0.13.1-SNAPSHOT-jar-with-dependencies.jar
--------------------------------------------------------------------------------
/chapter7/jmx_prometheus_httpserver.yml:
--------------------------------------------------------------------------------
1 | hostPort: 127.0.0.1:9999
2 | ssl: false
3 | rules:
4 | - pattern: ".*"
--------------------------------------------------------------------------------
/chapter7/node-exporter.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Node Exporter
3 | After=network-online.target
4 |
5 | [Service]
6 | Type=simple
7 | ExecStart=/usr/local/node_exporter/node_exporter
8 |
9 | [Install]
10 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/chapter7/prometheus.yml:
--------------------------------------------------------------------------------
1 | # prometheus config
2 | global:
3 | scrape_interval: 5s
4 | evaluation_interval: 5s
5 |
6 | scrape_configs:
7 | - job_name: 'peter-jmx-kafka'
8 | static_configs:
9 | - targets:
10 | - peter-kafka01.foo.bar:7071
11 | - peter-kafka02.foo.bar:7071
12 | - peter-kafka03.foo.bar:7071
13 |
14 | - job_name: 'peter-kafka-nodes'
15 | static_configs:
16 | - targets:
17 | - peter-kafka01.foo.bar:9100
18 | - peter-kafka02.foo.bar:9100
19 | - peter-kafka03.foo.bar:9100
20 |
21 | - job_name: 'peter-kafka-exporter'
22 | static_configs:
23 | - targets:
24 | - peter-kafka01.foo.bar:9308
25 | - peter-kafka02.foo.bar:9308
26 | - peter-kafka03.foo.bar:9308
--------------------------------------------------------------------------------
/chapter7/예제/예제 7-1:
--------------------------------------------------------------------------------
1 | log4j.logger.kafka=DEBUG
2 | log4j.logger.org.apache.kafka=DEBUG
--------------------------------------------------------------------------------
/chapter7/예제/예제 7-2:
--------------------------------------------------------------------------------
1 | hostPort: 127.0.0.1:9999
2 | ssl: false
3 | rules:
4 | - pattern: ".*"
5 |
--------------------------------------------------------------------------------
/chapter7/예제/예제 7-3:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=JMX Exporter for Kafka
3 | After=kafka-server.target
4 |
5 | [Service]
6 | Type=simple
7 | Restart=always
8 | ExecStart=/usr/bin/java -jar /usr/local/jmx/jmx_prometheus_httpserver-0.13.1-SNAPSHOT-jar-with-dependencies.jar 7071 /usr/local/jmx/jmx_prometheus_httpserver.yml
9 |
10 | [Install]
11 | WantedBy=multi-user.target
12 |
--------------------------------------------------------------------------------
/chapter7/예제/예제 7-4:
--------------------------------------------------------------------------------
1 | # prometheus config
2 | global:
3 | scrape_interval: 5s
4 | evaluation_interval: 5s
5 |
6 | scrape_configs:
7 | - job_name: 'peter-jmx-kafka'
8 | static_configs:
9 | - targets:
10 | - peter-kafka01.foo.bar:7071
11 | - peter-kafka02.foo.bar:7071
12 | - peter-kafka03.foo.bar:7071
13 |
14 | - job_name: 'peter-kafka-nodes'
15 | static_configs:
16 | - targets:
17 | - peter-kafka01.foo.bar:9100
18 | - peter-kafka02.foo.bar:9100
19 | - peter-kafka03.foo.bar:9100
20 |
21 | - job_name: 'peter-kafka-exporter'
22 | static_configs:
23 | - targets:
24 | - peter-kafka01.foo.bar:9308
25 | - peter-kafka02.foo.bar:9308
26 | - peter-kafka03.foo.bar:9308
--------------------------------------------------------------------------------
/chapter7/예제/예제 7-5:
--------------------------------------------------------------------------------
1 | - job_name: 'peter-kafka-exporter'
2 | static_configs:
3 | - targets:
4 | - peter-kafka01.foo.bar:9308
5 | - peter-kafka02.foo.bar:9308
6 | - peter-kafka03.foo.bar:9308
--------------------------------------------------------------------------------
/chapter8/8_commands.txt:
--------------------------------------------------------------------------------
1 | P256
2 | /usr/local/kafka/bin/kafka-topics.sh --version
3 |
4 | P257
5 | ls -l /usr/local/kafka/libs/kafka_*
6 |
7 | P259
8 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --delete --topic peter-test06
9 | sudo systemctl stop kafka-server
10 |
11 | P260
12 | cd chapter2/ansible_playbook
13 | ansible-playbook -i hosts kafka2.1.yml
14 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --create --topic peter-version2-1 --partitions 1 --replication-factor 3
15 |
16 | P261
17 | /usr/local/kafka/bin/kafka-topics.sh --zookeeper peter-zk01.foo.bar --create --topic peter-version2-1 --partitions 1 --replication-factor 3
18 | /usr/local/kafka/bin/kafka-console-producer.sh --broker-list peter-kafka01.foo.bar:9092 --topic peter-version2-1
19 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-version2-1 --from-beginning --group peter-consumer
20 |
21 | P262
22 | cd /usr/local/
23 | ll
24 | sudo cp kafka_2.12-2.1.0/config/server.properties kafka_2.12-2.6.0/config/server.properties
25 | sudo vi kafka_2.12-2.6.0/config/server.properties
26 |
27 | P263
28 | cd /usr/local/
29 | sudo systemctl stop kafka-server
30 | sudo rm -rf kafka
31 | sudo ln -sf kafka_2.12-2.6.0 kafka
32 | ll
33 |
34 | P264
35 | sudo systemctl start kafka-server
36 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-version2-1 --describe
37 | cd /usr/local/
38 | sudo systemctl stop kafka-server
39 | sudo rm -rf kafka
40 | sudo ln -sf kafka_2.12-2.6.0 kafka
41 | sudo systemctl start kafka-server
42 |
43 | P265
44 | cd /usr/local/
45 | sudo systemctl stop kafka-server
46 | sudo rm -rf kafka
47 | sudo ln -sf kafka_2.12-2.6.0 kafka
48 | sudo systemctl start kafka-server
49 |
50 | P266
51 | sudo vi /usr/local/kafka/config/server.properties
52 | sudo systemctl restart kafka-server
53 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-version2-1
54 |
55 | P267
56 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-version2-1 --group peter-consumer
57 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9092 --topic peter-version2-1 --from-beginning
58 |
59 | P269
60 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --create --topic peter-scaleout1 --partitions 4 --replication-factor 1
61 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --describe --topic peter-scaleout1
62 |
63 | P270
64 | cd chapter2/ansible_playbook
65 | ansible-playbook -i hosts kafka-scaleout.yml
66 | sudo vi /usr/local/kafka/config/server.properties
67 |
68 | P271
69 | sudo systemctl status kafka-server
70 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --create --topic peter-scaleout2 --partitions 4 --replication-factor 1
71 |
72 | P272
73 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --describe --topic peter-scaleout2
74 |
75 | P274
76 | /usr/local/kafka/bin/kafka-reassign-partitions.sh --bootstrap-server peter-kafka01.foo.bar:9092 --generate --topics-to-move-json-file reassign-partitions-topic.json --broker-list "1,2,3,4"
77 |
78 | P276
79 | /usr/local/kafka/bin/kafka-reassign-partitions.sh --bootstrap-server peter-kafka01.foo.bar:9092 --reassignment-json-file move.json --execute
80 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --describe --topic peter-scaleout1
81 |
82 | P279
83 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --delete --topic peter-scaleout1
84 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --delete --topic peter-scaleout2
85 | sudo systemctl stop kafka-server
--------------------------------------------------------------------------------
/chapter8/예제/예제 8-1:
--------------------------------------------------------------------------------
1 | inter.broker.protocol.version=2.1
2 | log.message.format.version=2.1
--------------------------------------------------------------------------------
/chapter8/예제/예제 8-2:
--------------------------------------------------------------------------------
1 | {"topics":
2 | [{"topic": "peter-scaleout1"}],
3 | "version":1
4 | }
--------------------------------------------------------------------------------
/chapter8/예제/예제 8-3:
--------------------------------------------------------------------------------
1 | {"topics":
2 | [{"topic": "peter-scaleout1"},{"topic": "peter-scaleout2"}],
3 | "version":1
4 | }
--------------------------------------------------------------------------------
/chapter8/예제/예제 8-4:
--------------------------------------------------------------------------------
1 | {
2 | "version": 1,
3 | "partitions": [
4 | {
5 | "topic": "peter-scaleout1",
6 | "partition": 0,
7 | "replicas": [
8 | 2
9 | ],
10 | "log_dirs": [
11 | "any"
12 | ]
13 | },
14 | {
15 | "topic": "peter-scaleout1",
16 | "partition": 1,
17 | "replicas": [
18 | 3
19 | ],
20 | "log_dirs": [
21 | "any"
22 | ]
23 | },
24 | {
25 | "topic": "peter-scaleout1",
26 | "partition": 2,
27 | "replicas": [
28 | 4
29 | ],
30 | "log_dirs": [
31 | "any"
32 | ]
33 | },
34 | {
35 | "topic": "peter-scaleout1",
36 | "partition": 3,
37 | "replicas": [
38 | 1
39 | ],
40 | "log_dirs": [
41 | "any"
42 | ]
43 | }
44 | ]
45 | }
--------------------------------------------------------------------------------
/chapter9/9_commands.txt:
--------------------------------------------------------------------------------
1 | P289
2 | sudo mkdir -p /usr/local/kafka/ssl
3 | cd /usr/local/kafka/ssl/
4 | export SSLPASS=peterpass
5 | sudo keytool -keystore kafka.server.keystore.jks -alias localhost -keyalg RSA -validity 365 -genkey -storepass $SSLPASS -keypass $SSLPASS -dname "CN=peter-kafka01.foo.bar" -storetype pkcs12
6 |
7 | P290
8 | ls
9 | keytool -list -v -keystore kafka.server.keystore.jks
10 |
11 | P292
12 | sudo openssl req -new -x509 -keyout ca-key -out ca-cert -days 356 -subj "/CN=foo.bar" -nodes
13 | ls
14 |
15 | P293
16 | sudo keytool -keystore kafka.server.truststore.jks -alias CARoot -importcert -file ca-cert -storepass $SSLPASS -keypass $SSLPASS
17 | keytool -list -v -keystore kafka.server.truststore.jks
18 |
19 | P295
20 | sudo keytool -keystore kafka.server.keystore.jks -alias localhost -certreq -file cert-file -storepass $SSLPASS -keypass $SSLPASS
21 | ls
22 | sudo openssl x509 -req -CA ca-cert -CAkey ca-key -in cert-file -out cert-signed -days 365 -CAcreateserial -passin pass:$SSLPASS
23 |
24 | P296
25 | sudo keytool -keystore kafka.server.keystore.jks -alias CARoot -importcert -file ca-cert -storepass $SSLPASS -keypass $SSLPASS
26 | sudo keytool -keystore kafka.server.keystore.jks -alias localhost -importcert -file cert-signed -storepass $SSLPASS -keypass $SSLPASS
27 | keytool -list -v -keystore kafka.server.keystore.jks
28 |
29 | P298
30 | sudo mkdir -p /usr/local/kafka/ssl
31 | export SSLPASS=peterpass
32 | sudo mkdir -p /usr/local/kafka/ssl
33 | export SSLPASS=peterpass
34 | sudo keytool -keystore kafka.server.keystore.jks -alias localhost -keyalg RSA -validity 365 -genkey -storepass $SSLPASS -keypass $SSLPASS -dname "CN=peter-kafka02.foo.bar" -storetype pkcs12
35 | sudo keytool -keystore kafka.server.keystore.jks -alias localhost -keyalg RSA -validity 365 -genkey -storepass $SSLPASS -keypass $SSLPASS -dname "CN=peter-kafka03.foo.bar" -storetype pkcs12
36 |
37 | P299
38 | ssh-keygen
39 | cat .ssh/id_rsa.pub
40 | vi /home/ec2-user/.ssh/authorized_keys
41 | vi /home/ec2-user/.ssh/authorized_keys
42 | cd /usr/local/kafka/ssl/
43 |
44 | P300
45 | scp ca-cert peter-kafka02.foo.bar:~
46 | scp ca-key peter-kafka02.foo.bar:~
47 | scp kafka.server.truststore.jks peter-kafka02.foo.bar:~
48 | scp ca-cert peter-kafka03.foo.bar:~
49 | scp ca-key peter-kafka03.foo.bar:~
50 | scp kafka.server.truststore.jks peter-kafka03.foo.bar:~
51 | sudo mv * /usr/local/kafka/ssl/
52 | cd /usr/local/kafka/ssl/
53 | sudo mv * /usr/local/kafka/ssl/
54 | cd /usr/local/kafka/ssl/
55 | sudo keytool -keystore kafka.server.keystore.jks -alias localhost -certreq -file cert-file -storepass $SSLPASS -keypass $SSLPASS
56 | sudo openssl x509 -req -CA ca-cert -CAkey ca-key -in cert-file -out cert-signed -days 365 -CAcreateserial -passin pass:$SSLPASS
57 | sudo keytool -keystore kafka.server.keystore.jks -alias localhost -certreq -file cert-file -storepass $SSLPASS -keypass $SSLPASS
58 | sudo openssl x509 -req -CA ca-cert -CAkey ca-key -in cert-file -out cert-signed -days 365 -CAcreateserial -passin pass:$SSLPASS
59 |
60 | P301
61 | sudo keytool -keystore kafka.server.keystore.jks -alias CARoot -importcert -file ca-cert -storepass $SSLPASS -keypass $SSLPASS
62 | sudo keytool -keystore kafka.server.keystore.jks -alias localhost -importcert -file cert-signed -storepass $SSLPASS -keypass $SSLPASS
63 | sudo keytool -keystore kafka.server.keystore.jks -alias CARoot -importcert -file ca-cert -storepass $SSLPASS -keypass $SSLPASS
64 | sudo keytool -keystore kafka.server.keystore.jks -alias localhost -importcert -file cert-signed -storepass $SSLPASS -keypass $SSLPASS
65 | keytool -list -v -keystore kafka.server.keystore.jks
66 | keytool -list -v -keystore kafka.server.keystore.jks
67 |
68 | P302
69 | sudo vi /usr/local/kafka/config/server.properties
70 | sudo systemctl restart kafka-server
71 |
72 | P303
73 | openssl s_client -connect peter-kafka01.foo.bar:9093 -tls1 /dev/null | grep -E 'Verify return code'
74 | cd /usr/local/kafka/ssl/
75 | sudo keytool -keystore kafka.client.truststore.jks -alias CARoot -importcert -file ca-cert -storepass $SSLPASS -keypass $SSLPASS
76 | /usr/local/kafka/bin/kafka-topics.sh --bootstrap-server peter-kafka01.foo.bar:9092 --create --topic peter-test07 --partitions 1 --replication-factor 3
77 |
78 | P304
79 | vi /home/ec2-user/ssl.config
80 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9093 --topic peter-test07 --producer.config /home/ec2-user/ssl.config
81 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9093 --topic peter-test07 --from-beginning --consumer.config /home/ec2-user/ssl.config
82 |
83 | P306
84 | cd ansible_playbook
85 | ansible-playbook -i hosts kerberos.yml
86 |
87 | P307
88 | sudo kadmin.local -q "add_principal -randkey peter01@FOO.BAR"
89 | sudo kadmin.local -q "add_principal -randkey peter02@FOO.BAR"
90 | sudo kadmin.local -q "add_principal -randkey admin@FOO.BAR"
91 | sudo kadmin.local -q "add_principal -randkey kafka/peter-kafka01.foo.bar@FOO.BAR"
92 | sudo kadmin.local -q "add_principal -randkey kafka/peter-kafka02.foo.bar@FOO.BAR"
93 | sudo kadmin.local -q "add_principal -randkey kafka/peter-kafka03.foo.bar@FOO.BAR"
94 | mkdir -p /home/ec2-user/keytabs/
95 | sudo kadmin.local -q "ktadd -k /home/ec2-user/keytabs/peter01.user.keytab peter01@FOO.BAR"
96 |
97 | P308
98 | sudo kadmin.local -q "ktadd -k /home/ec2-user/keytabs/peter02.user.keytab peter02@FOO.BAR"
99 | sudo kadmin.local -q "ktadd -k /home/ec2-user/keytabs/admin.user.keytab admin@FOO.BAR"
100 | sudo kadmin.local -q "ktadd -k /home/ec2-user/keytabs/peter-kafka01.service.keytab kafka/peter-kafka01.foo.bar@FOO.BAR"
101 | sudo kadmin.local -q "ktadd -k /home/ec2-user/keytabs/peter-kafka02.service.keytab kafka/peter-kafka02.foo.bar@FOO.BAR"
102 | sudo kadmin.local -q "ktadd -k /home/ec2-user/keytabs/peter-kafka03.service.keytab kafka/peter-kafka03.foo.bar@FOO.BAR"
103 | sudo chown -R ec2-user.ec2-user keytabs/
104 |
105 | P309
106 | scp -i keypair.pem -r peter-zk01.foo.bar:~/keytabs /home/ec2-user
107 | sudo mv keytabs /usr/local/kafka
108 | cat /etc/krb5.conf
109 | kinit -kt /usr/local/kafka/keytabs/peter01.user.keytab peter01
110 |
111 | P310
112 | klist
113 | kinit -kt /usr/local/kafka/keytabs/peter-kafka01.service.keytab kafka/peter-kafka01.foo.bar
114 |
115 | P311
116 | sudo vi /usr/local/kafka/config/server.properties
117 | sudo vi /usr/local/kafka/config/kafka_server_jaas.conf
118 |
119 | P312
120 | sudo vi /usr/local/kafka/config/jmx
121 | sudo systemctl restart kafka-server
122 | sudo netstat -ntlp | grep 9094
123 |
124 | P313
125 | vi kafka_client_jaas.conf
126 | export KAFKA_OPTS="-Djava.security.auth.login.config=/home/ec2-user/kafka_client_jaas.conf"
127 | vi kerberos.config
128 |
129 | P314
130 | kinit -kt /usr/local/kafka/keytabs/peter01.user.keytab peter01
131 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9094 --topic peter-test08 --producer.config kerberos.config
132 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9094 --topic peter-test08 --from-beginning --consumer.config kerberos.config
133 |
134 | P315
135 | kdestroy
136 | klist
137 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9094 --topic peter-test08 --from-beginning --consumer.config kerberos.config
138 |
139 | P317
140 | sudo vi /usr/local/kafka/config/server.properties
141 | sudo systemctl restart kafka-server
142 |
143 | P318
144 | unset KAFKA_OPTS
145 | /usr/local/kafka/bin/kafka-topics.sh --zookeeper peter-zk01.foo.bar:2181 --create --topic peter-test09 --partitions 1 --replication-factor 1
146 | /usr/local/kafka/bin/kafka-topics.sh --zookeeper peter-zk01.foo.bar:2181 --create --topic peter-test10 --partitions 1 --replication-factor 1
147 |
148 | P319
149 | /usr/local/kafka/bin/kafka-acls.sh --authorizer-properties zookeeper.connect=peter-zk01.foo.bar:2181 --add --allow-principal User:peter01 --operation Read --operation Write --operation DESCRIBE --topic peter-test09
150 |
151 | P320
152 | /usr/local/kafka/bin/kafka-acls.sh --authorizer-properties zookeeper.connect=peter-zk01.foo.bar:2181 --add --allow-principal User:peter02 --operation Read --operation Write --operation DESCRIBE --topic peter-test10
153 | /usr/local/kafka/bin/kafka-acls.sh --authorizer-properties zookeeper.connect=peter-zk01.foo.bar:2181 --list
154 |
155 | P321
156 | kinit -kt /usr/local/kafka/keytabs/peter01.user.keytab peter01
157 | export KAFKA_OPTS="-Djava.security.auth.login.config=/home/ec2-user/kafka_client_jaas.conf"
158 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9094 --topic peter-test09 --producer.config kerberos.config
159 |
160 | P322
161 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9094 --topic peter-test09 --from-beginning --consumer.config kerberos.config
162 |
163 | P323
164 | /usr/local/kafka/bin/kafka-acls.sh --authorizer-properties zookeeper.connect=peter-zk01.foo.bar:2181 --add --allow-principal User:peter01 --operation Read --group '*'
165 | /usr/local/kafka/bin/kafka-acls.sh --authorizer-properties zookeeper.connect=peter-zk01.foo.bar:2181 --list
166 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9094 --topic peter-test09 --from-beginning --consumer.config kerberos.config
167 |
168 | P324
169 | kinit -kt /usr/local/kafka/keytabs/peter02.user.keytab peter02
170 | export KAFKA_OPTS="-Djava.security.auth.login.config=/home/ec2-user/kafka_client_jaas.conf"
171 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9094 --topic peter-test10 --producer.config kerberos.config
172 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9094 --topic peter-test09 --producer.config kerberos.config
173 | kinit -kt /usr/local/kafka/keytabs/admin.user.keytab admin
174 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9094 --topic peter-test09 --producer.config kerberos.config
175 | /usr/local/kafka/bin/kafka-console-producer.sh --bootstrap-server peter-kafka01.foo.bar:9094 --topic peter-test10 --producer.config kerberos.config
176 |
177 | P325
178 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9094 --topic peter-test09 --from-beginning --consumer.config kerberos.config
179 | /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server peter-kafka01.foo.bar:9094 --topic peter-test10 --from-beginning --consumer.config kerberos.config
180 |
--------------------------------------------------------------------------------
/chapter9/예제/예제 9-1:
--------------------------------------------------------------------------------
1 | ssl.truststore.location=/usr/local/kafka/ssl/kafka.server.truststore.jks
2 | ssl.truststore.password=peterpass
3 | ssl.keystore.location=/usr/local/kafka/ssl/kafka.server.keystore.jks
4 | ssl.keystore.password=peterpass
5 | ssl.key.password=peterpass
6 | security.inter.broker.protocol=SSL
--------------------------------------------------------------------------------
/chapter9/예제/예제 9-2:
--------------------------------------------------------------------------------
1 | security.protocol=SSL
2 | ssl.truststore.location=/usr/local/kafka/ssl/kafka.client.truststore.jks
3 | ssl.truststore.password=peterpass
--------------------------------------------------------------------------------
/chapter9/예제/예제 9-3:
--------------------------------------------------------------------------------
1 | listeners=PLAINTEXT://0.0.0.0:9092,SSL://0.0.0.0:9093,SASL_PLAINTEXT://0.0.0.0:9094
2 | advertised.listeners=PLAINTEXT://peter-kafka01.foo.bar:9092,SSL://peter-kafka01.foo.bar:9093,SASL_PLAINTEXT://peter-kafka01.foo.bar:9094
3 |
4 | security.inter.broker.protocol=SASL_PLAINTEXT
5 | sasl.mechanism.inter.broker.protocol=GSSAPI
6 | sasl.enabled.mechanism=GSSAPI
7 | sasl.kerberos.service.name=kafka
--------------------------------------------------------------------------------
/chapter9/예제/예제 9-4:
--------------------------------------------------------------------------------
1 | KafkaServer {
2 | com.sun.security.auth.module.Krb5LoginModule required
3 | useKeyTab=true
4 | storeKey=true
5 | keyTab="/usr/local/kafka/keytabs/peter-kafka01.service.keytab"
6 | principal="kafka/peter-kafka01.foo.bar@FOO.BAR";
7 | };
--------------------------------------------------------------------------------
/chapter9/예제/예제 9-5:
--------------------------------------------------------------------------------
1 | KAFKA_OPTS="-Djava.security.auth.login.config=/usr/local/kafka/config/kafka_server_jaas.conf"
--------------------------------------------------------------------------------
/chapter9/예제/예제 9-6:
--------------------------------------------------------------------------------
1 | KafkaClient {
2 | com.sun.security.auth.module.Krb5LoginModule required
3 | useTicketCache=true;
4 | };
--------------------------------------------------------------------------------
/chapter9/예제/예제 9-7:
--------------------------------------------------------------------------------
1 | sasl.mechanism=GSSAPI
2 | security.protocol=SASL_PLAINTEXT
3 | sasl.kerberos.service.name=kafka
--------------------------------------------------------------------------------
/chapter9/예제/예제 9-8:
--------------------------------------------------------------------------------
1 | security.inter.broker.protocol=SASL_PLAINTEXT
2 | sasl.mechanism.inter.broker.protocol=GSSAPI
3 | sasl.enabled.mechanism=GSSAPI
4 | sasl.kerberos.service.name=kafka
5 | # 아래 내용 추가
6 | authorizer.class.name=kafka.security.authorizer.AclAuthorizer
7 | super.users=User:admin;User:kafka
--------------------------------------------------------------------------------