├── LICENSE ├── README.md ├── config.js ├── consumer.app.js ├── package.json ├── pm2.json └── producer.app.js /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 thatcoder 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Kafka single node setup 4 | 5 | On a single machine, a **3 broker** kafka instance is at best the minimum, for a hassle-free working. Also, **replication factor is set to 2**. 6 | 7 | Say X,Y and Z are our kafka brokers. With replication factor 2, the data in X will be copied to both Y & Z, the data in Y will be copied to X & Z and the data of Z is copied to X & Y. 8 | 9 | ### Prerequisites 10 | - have java >= 1.8 installed. 11 | - get **binary** distribution of Kafka from [here](https://kafka.apache.org/downloads) . 12 | 13 | ### Setup 14 | Extract the contents of the kafka archive to a convenient place and `cd` into it. Use a terminal multiplexer to run the components that make the kafka eco-system. 15 | 16 | #### Zookeeper 17 | - Edit the config file `config/server.properties` and change the `dataDir` entry to some place that does not get wiped after a reboot. 18 | Ex:`dataDir=/home/user/tmp/zookeeper` 19 | - Start the zookeeper instance with 20 | `$ bin/zookeeper-server-start.sh config/zookeeper.properties` 21 | 22 | #### Kafka brokers 23 | - In the `config` folder there would be a `server.properties` file. This is the kafka server's config file. We need 3 instances of kafka brokers. 24 | - Make a copy. `$ cp config/server.properties config/server.b1.properties` 25 | - In the copy make the following changes 26 | ``` 27 | broker.id=1 #unique id for our broker instance 28 | port=9092 #port where it listens 29 | delete.topic.enable=true #if we want to delete kafka topic stored in broker 30 | log.dirs=/home/thatcoder/kafka-logs/01 #to a place thats not volatile 31 | advertised.host.name=10.0.0.81 #prevents leader not found error when connecting from remote machine 32 | ``` 33 | 34 | - Make 2 more copies of this file and change the fields `broker.id`, `port` and `log.dirs` for each file. 35 | - Run the individual brokers like 36 | ``` 37 | $ bin/kafka-server-start.sh config/server.b1.properties 38 | $ bin/kafka-server-start.sh config/server.b2.properties 39 | $ bin/kafka-server-start.sh config/server.b3.properties 40 | ``` 41 | 42 | **Tip : ** Executing a `$ jps` on the shell would give all JVM instances. To kill the processes `kill -9 ` would do the trick. 43 | 44 | ##### Testing out the install 45 | - Create a topic with 46 | `$ bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 2 --partitions 3 --topic ` 47 | - Push data onto it 48 | `$ bin/kafka-console-producer.sh --broker-list localhost:9092,localhost:9093,localhost:9094 --sync --topic ` 49 | - Fetch data from it 50 | `$ bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic --from-beginning` 51 | 52 | # Program Setup 53 | 54 | To start the application in development mode, 55 | ``` 56 | npm run dev 57 | ``` 58 | 59 | In production mode use, 60 | 61 | ``` 62 | npm start 63 | ``` 64 | 65 | -------------------------------------------------------------------------------- /config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | kafka_topic: 'example', 3 | kafka_server: 'localhost:2181', 4 | }; 5 | -------------------------------------------------------------------------------- /consumer.app.js: -------------------------------------------------------------------------------- 1 | 2 | const kafka = require('kafka-node'); 3 | const bp = require('body-parser'); 4 | const config = require('./config'); 5 | 6 | try { 7 | const Consumer = kafka.HighLevelConsumer; 8 | const client = new kafka.Client(config.kafka_server); 9 | let consumer = new Consumer( 10 | client, 11 | [{ topic: config.kafka_topic, partition: 0 }], 12 | { 13 | autoCommit: true, 14 | fetchMaxWaitMs: 1000, 15 | fetchMaxBytes: 1024 * 1024, 16 | encoding: 'utf8', 17 | fromOffset: false 18 | } 19 | ); 20 | consumer.on('message', async function(message) { 21 | console.log('here'); 22 | console.log( 23 | 'kafka-> ', 24 | message.value 25 | ); 26 | }) 27 | consumer.on('error', function(err) { 28 | console.log('error', err); 29 | }); 30 | } 31 | catch(e) { 32 | console.log(e); 33 | } 34 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "kaas", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1", 8 | "start": "node producer.app.js", 9 | "dev": "nodemon producer.app.js" 10 | }, 11 | "keywords": [], 12 | "author": "", 13 | "license": "ISC", 14 | "dependencies": { 15 | "kafka-node": "^2.4.1", 16 | "nodemon": "^1.17.3" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /pm2.json: -------------------------------------------------------------------------------- 1 | { 2 | "apps": [{ 3 | "name": "kafka-producer", 4 | "script": "npm", 5 | "args": "start", 6 | "env": { 7 | "NODE_ENV": "production" 8 | } 9 | }] 10 | } 11 | -------------------------------------------------------------------------------- /producer.app.js: -------------------------------------------------------------------------------- 1 | 2 | const kafka = require('kafka-node'); 3 | const bp = require('body-parser'); 4 | const config = require('./config'); 5 | 6 | try { 7 | const Producer = kafka.Producer; 8 | const client = new kafka.Client(config.kafka_server); 9 | const producer = new Producer(client); 10 | const kafka_topic = 'example'; 11 | console.log(kafka_topic); 12 | let payloads = [ 13 | { 14 | topic: kafka_topic, 15 | messages: config.kafka_topic 16 | } 17 | ]; 18 | 19 | producer.on('ready', async function() { 20 | let push_status = producer.send(payloads, (err, data) => { 21 | if (err) { 22 | console.log('[kafka-producer -> '+kafka_topic+']: broker update failed'); 23 | } else { 24 | console.log('[kafka-producer -> '+kafka_topic+']: broker update success'); 25 | } 26 | }); 27 | }); 28 | 29 | producer.on('error', function(err) { 30 | console.log(err); 31 | console.log('[kafka-producer -> '+kafka_topic+']: connection errored'); 32 | throw err; 33 | }); 34 | } 35 | catch(e) { 36 | console.log(e); 37 | } 38 | --------------------------------------------------------------------------------