├── .gitignore ├── docker-compose.yaml ├── config.js ├── package.json ├── businessrules.js ├── README.md ├── utils.js ├── events.js ├── api.js ├── cli.js └── consumer.js /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | .vscode 3 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | mongo: 4 | image: "mongo" 5 | ports: 6 | - "27017:27017" 7 | redis: 8 | image: "redis" 9 | ports: 10 | - "6379:6379" 11 | kafka: 12 | image: "landoop/fast-data-dev" 13 | environment: 14 | - ADV_HOST=127.0.0.1 15 | ports: 16 | - "2181:2181" 17 | - "3030:3030" 18 | - "8081:8081" 19 | - "8082:8082" 20 | - "9092:9092" -------------------------------------------------------------------------------- /config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | TOPIC: 'requests', 3 | TOPIC_EVENTS: 'finalevents', 4 | KAFKA_HOST: '127.0.0.1:9092', 5 | PUBSUB_TOPIC: 'responses', 6 | MONGO_URL: 'mongodb://localhost:27017/db1', 7 | MONGO_COLLECTION: 'test', 8 | ENDPOINT_URL: 'http://127.0.0.1:3000/produce', 9 | API_PORT: 3000, 10 | API_CON_TIMEOUT: 5000, // ms 11 | PRODUCER_CONFIG: { 12 | requireAcks: 1, 13 | ackTimeoutMs: 100, 14 | partitionerType: 4 15 | } 16 | }; 17 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "kafka", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "api.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "async": "^2.6.0", 13 | "bluebird": "^3.5.1", 14 | "body-parser": "^1.18.2", 15 | "eslint": "^4.19.1", 16 | "express": "^4.16.3", 17 | "fastest-validator": "^0.6.6", 18 | "inquirer": "^5.2.0", 19 | "kafka-node": "^2.6.1", 20 | "lodash": "^4.17.10", 21 | "mongodb": "^3.0.7", 22 | "prompt": "^1.0.0", 23 | "redis": "^2.8.0", 24 | "request": "^2.85.0", 25 | "rxjs": "^5.5.2", 26 | "shortid": "^2.2.8", 27 | "stream-to-observable": "^0.2.0" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /businessrules.js: -------------------------------------------------------------------------------- 1 | function throwIf(val, error) { 2 | if (val) throw error; 3 | } 4 | 5 | function BusinessError(msg) { 6 | return new Error(msg); 7 | } 8 | 9 | const mustExists = (payload, agg) => throwIf(!agg, BusinessError('NOT_FOUND')); 10 | const mustBeCleanerOfJob = (payload, agg) => 11 | throwIf( 12 | agg.cleaners.indexOf(payload.cleanerId) === -1, 13 | BusinessError('MUST_BE_CLEANER_OF_JOB') 14 | ); 15 | const notYetAnswered = (payload, agg) => { 16 | throwIf( 17 | agg.answers[payload.cleanerId], 18 | BusinessError('YOU_ALREADY_ANSWERED') 19 | ); 20 | }; 21 | const jobNotYetAccepted = (payload, agg) => { 22 | throwIf(agg.acceptedCleaner, BusinessError('JOB_ALREADY_ACCEPTED')); 23 | }; 24 | 25 | module.exports = { 26 | mustExists, 27 | mustBeCleanerOfJob, 28 | notYetAnswered, 29 | jobNotYetAccepted 30 | }; 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nodejs Kafka 2 | 3 | 4 | ## Setup 5 | 6 | Make sure you have following on your local machine installed 7 | > docker - 18.03.1 8 | > docker-compose - 1.21.1 9 | > node - 9.9.0 10 | > npm - 5.6.0 11 | 12 | Install Project 13 | 14 | npm install 15 | docker-compose up 16 | 17 | Create Kafka Topics 18 | 19 | docker run --rm -it --net=host landoop/fast-data-dev kafka-topics --zookeeper 127.0.0.1:2181 --topic requests --replication-factor 1 --partitions 100 --create 20 | docker run --rm -it --net=host landoop/fast-data-dev kafka-topics --zookeeper 127.0.0.1:2181 --topic finalevents --replication-factor 1 --partitions 100 --create 21 | 22 | Kafka UI 23 | 24 | Visit http://127.0.0.1:3030 to inspect your kafka broker, topics, partitions etc. 25 | 26 | 27 | 28 | ## Usage 29 | 30 | Terminal1: Start Consumer (you can start multiple in different terminals to spread the load) 31 | 32 | node consumer.js 33 | 34 | Terminal2: Start API 35 | 36 | node api.js 37 | 38 | Terminal3: Start CLI 39 | 40 | node cli.js 41 | 42 | Start interacting with the CLI and observe what the API / Consumer does 43 | -------------------------------------------------------------------------------- /utils.js: -------------------------------------------------------------------------------- 1 | const events = require('./events'); 2 | let Validator = require('fastest-validator'); 3 | const v = new Validator(); 4 | 5 | const hashCode = function(stringOrBuffer) { 6 | let hash = 0; 7 | if (stringOrBuffer) { 8 | const string = stringOrBuffer.toString(); 9 | const length = string.length; 10 | 11 | for (let i = 0; i < length; i++) { 12 | hash = (hash * 31 + string.charCodeAt(i)) & 0x7fffffff; 13 | } 14 | } 15 | 16 | return hash === 0 ? 1 : hash; 17 | }; 18 | 19 | function getPartition(partitions, key) { 20 | key = key || ''; 21 | 22 | const index = hashCode(key) % partitions.length; 23 | return partitions[index]; 24 | } 25 | 26 | function getEventSchema(eventId) { 27 | const schema = events[eventId]; 28 | if (!schema || !schema.SCHEMA) throw new Error('schema for eventId('+eventId+') not found'); 29 | return schema.SCHEMA; 30 | } 31 | 32 | function getEventMockFunc(eventId) { 33 | const schema = events[eventId]; 34 | if (!schema || !schema.MOCK) throw new Error('mock for eventId('+eventId+') not found'); 35 | return schema.MOCK; 36 | } 37 | 38 | function getBusinessRulesOfEvent(eventId) { 39 | const schema = events[eventId]; 40 | if (!schema) throw new Error('schema for eventId('+eventId+') not found'); 41 | return schema.RULES || []; 42 | } 43 | 44 | function isValidEvent(payload) { 45 | const schema = getEventSchema(payload.eventId); 46 | return (v.validate(payload, schema) === true) 47 | } 48 | 49 | function throwIf(val, error) { 50 | if (val) throw error; 51 | } 52 | 53 | function getEventIds() { 54 | return Object.keys(events) 55 | } 56 | 57 | module.exports = { 58 | getPartition, 59 | isValidEvent, 60 | throwIf, 61 | getEventIds, 62 | getEventSchema, 63 | getEventMockFunc, 64 | getBusinessRulesOfEvent 65 | }; 66 | -------------------------------------------------------------------------------- /events.js: -------------------------------------------------------------------------------- 1 | const { 2 | mustExists, 3 | mustBeCleanerOfJob, 4 | notYetAnswered, 5 | jobNotYetAccepted 6 | } = require('./businessrules'); 7 | 8 | module.exports = { 9 | createJobRequest: { 10 | SCHEMA: { 11 | eventId: { type: 'string', equal: 'createJobRequest' }, 12 | cleaners: { type: 'array', items: { type: 'string' } }, 13 | jobRequestId: { type: 'string' }, 14 | tasks: { 15 | type: 'array', 16 | items: { 17 | type: 'object', 18 | props: { 19 | id: { type: 'string' }, 20 | name: { type: 'string' }, 21 | done: { type: 'boolean' } 22 | } 23 | } 24 | } 25 | }, 26 | MOCK: ({}) => ({ 27 | eventId: 'createJobRequest', 28 | cleaners: ['cleaner1', 'cleaner2'], 29 | jobRequestId: '_WILL_BE_REPLACED_BY_API_', 30 | tasks: [ 31 | { 32 | id: 'task1', 33 | name: 'task1name', 34 | done: false 35 | } 36 | ] 37 | }), 38 | RULES: [] 39 | }, 40 | accepteJobRequest: { 41 | SCHEMA: { 42 | eventId: { type: 'string', equal: 'accepteJobRequest' }, 43 | jobRequestId: { type: 'string' }, 44 | cleanerId: { type: 'string' } 45 | }, 46 | MOCK: ({ jobRequestId, cleanerId }) => ({ 47 | eventId: 'accepteJobRequest', 48 | jobRequestId, 49 | cleanerId 50 | }), 51 | RULES: [mustExists, mustBeCleanerOfJob, notYetAnswered, jobNotYetAccepted] 52 | }, 53 | declineJobRequest: { 54 | SCHEMA: { 55 | eventId: { type: 'string', equal: 'declineJobRequest' }, 56 | jobRequestId: { type: 'string' }, 57 | cleanerId: { type: 'string' } 58 | }, 59 | MOCK: ({ jobRequestId, cleanerId }) => ({ 60 | eventId: 'declineJobRequest', 61 | jobRequestId, 62 | cleanerId 63 | }), 64 | RULES: [mustExists, mustBeCleanerOfJob, notYetAnswered] 65 | } 66 | // startJobRequest: { 67 | // eventId: { type: 'string', equal: 'startJobRequest' }, 68 | // jobRequestId: { type: 'string' } 69 | // }, 70 | // finishJobRequestTask: { 71 | // eventId: { type: 'string', equal: 'finishJobRequestTask' }, 72 | // jobRequestId: { type: 'string' }, 73 | // taskId: { type: 'string' } 74 | // }, 75 | // finishJobRequest: { 76 | // eventId: { type: 'string', equal: 'finishJobRequest' }, 77 | // jobRequestId: { type: 'string' } 78 | // } 79 | }; 80 | -------------------------------------------------------------------------------- /api.js: -------------------------------------------------------------------------------- 1 | var express = require('express'); 2 | var bodyParser = require('body-parser'); 3 | const Bluebird = require('bluebird'); 4 | var shortid = require('shortid'); 5 | var kafka = require('kafka-node'); 6 | var redisClient = require('redis').createClient(); 7 | const { 8 | TOPIC, 9 | PRODUCER_CONFIG, 10 | KAFKA_HOST, 11 | PUBSUB_TOPIC, 12 | API_PORT, 13 | API_CON_TIMEOUT 14 | } = require('./config'); 15 | const { getPartition, throwIf, isValidEvent } = require('./utils'); 16 | 17 | var app = express(); 18 | const client = new kafka.KafkaClient({ kafkaHost: KAFKA_HOST }); 19 | const producer = new kafka.Producer(client, PRODUCER_CONFIG, getPartition); 20 | const admin = new kafka.Admin(client); 21 | 22 | const produceMsg = Bluebird.promisify(producer.send.bind(producer)); 23 | const offetGet = Bluebird.promisify(admin.describeGroups.bind(admin)); 24 | 25 | app.use(bodyParser.json()); 26 | 27 | const map = {}; 28 | 29 | function startListener(deps) { 30 | deps.redis.psubscribe(PUBSUB_TOPIC + ':*'); 31 | deps.redis.on('pmessage', function(pattern, channel, message) { 32 | const id = channel.split(':')[1]; 33 | if (deps.map[id]) { 34 | deps.map[id].resolve(JSON.parse(message)); 35 | delete deps.map[id]; 36 | } 37 | // TODO: flush cache, because maybe some entries may never be deleted 38 | }); 39 | } 40 | 41 | function timeout(time) { 42 | return new Promise((_, reject) => { 43 | setTimeout(() => reject(new Error('REMOTE_CALL_TIMEOUT')), time); 44 | }); 45 | } 46 | 47 | function enrichPayloadMaybe(payload) { 48 | switch (payload.eventId) { 49 | case 'createJobRequest': { 50 | return Object.assign({}, payload, { jobRequestId: shortid.generate() }); 51 | } 52 | default: { 53 | return payload; 54 | } 55 | } 56 | } 57 | 58 | function createRemoteCall(requestId) { 59 | const remoteCall = Bluebird.defer(); 60 | map[requestId] = remoteCall; 61 | return remoteCall.promise; 62 | } 63 | 64 | async function produceRouteHandler(req, res, next) { 65 | try { 66 | const payload = req.body; 67 | const enrichedPayload = enrichPayloadMaybe(payload); 68 | throwIf(!isValidEvent(enrichedPayload), new Error('EVENT_NOT_VALID')) 69 | console.log('request -> ', enrichedPayload); 70 | const remoteCall = createRemoteCall(payload.requestId); 71 | const [ 72 | kafkaCallResult, 73 | remoteCallResult, 74 | offsetResult 75 | ] = await Bluebird.all([ 76 | produceMsg([ 77 | { 78 | topic: TOPIC, 79 | messages: [ 80 | JSON.stringify({ 81 | ...enrichedPayload 82 | }) 83 | ], 84 | key: enrichedPayload.jobRequestId 85 | } 86 | ]), 87 | Bluebird.race([remoteCall, timeout(API_CON_TIMEOUT)]) 88 | ]); 89 | if (remoteCallResult.res === 'FAIL') { 90 | throw new Error(remoteCallResult.error); // blubble up 91 | } else { 92 | res.json({ ok: true, remoteCallResult }); 93 | } 94 | } catch (e) { 95 | res.json({ ok: false, error: e.message }); 96 | } 97 | } 98 | 99 | app.post('/produce', produceRouteHandler); 100 | 101 | app.listen(API_PORT, () => { 102 | console.log('API up'); 103 | startListener({ redis: redisClient, map }); 104 | }); 105 | -------------------------------------------------------------------------------- /cli.js: -------------------------------------------------------------------------------- 1 | const Rx = require('rxjs/Rx'); 2 | const inquirer = require('inquirer'); 3 | const Bluebird = require('bluebird'); 4 | const request = require('request'); 5 | var shortid = require('shortid'); 6 | 7 | const { ENDPOINT_URL } = require('./config'); 8 | const { 9 | isValidEvent, 10 | getEventIds, 11 | getEventMockFunc, 12 | getEventSchema 13 | } = require('./utils'); 14 | 15 | const askSendRequest = { 16 | name: 'request', 17 | type: 'expand', 18 | message: 'Send Request', 19 | choices: [ 20 | { key: 'y', name: 'Yes', value: 'yes' }, 21 | { key: 'n', name: 'No', value: 'no' } 22 | ] 23 | }; 24 | 25 | const askWhichEvent = { 26 | name: 'eventId', 27 | type: 'list', 28 | message: 'EventId', 29 | choices: getEventIds() 30 | }; 31 | 32 | var prompts = new Rx.Subject(); 33 | 34 | function reducer(state = {}) { 35 | return action => { 36 | state[action.name] = action.answer; 37 | }; 38 | } 39 | 40 | function mapEventIdToData(eventId) { 41 | const questions = []; 42 | const eventResolved = getEventSchema(eventId); 43 | Object.keys(eventResolved).forEach(key => { 44 | const val = eventResolved[key]; 45 | if (val.type === 'string') { 46 | questions.push({ 47 | name: key, 48 | type: 'input', 49 | message: 'Value for ' + key 50 | }); 51 | } 52 | }); 53 | return { questions }; 54 | } 55 | 56 | function mapEventIdToMockData(eventId, state) { 57 | const resolvedMockDataFunc = getEventMockFunc(eventId); 58 | return resolvedMockDataFunc(state); 59 | } 60 | 61 | function validate(state, cb, cbFail) { 62 | const payload = mapEventIdToMockData(state.eventId, state); 63 | if (isValidEvent(payload)) { 64 | cb(payload); 65 | } else { 66 | cbFail && cbFail(); 67 | } 68 | } 69 | 70 | function flushState(state) { 71 | Object.keys(state).forEach(key => { 72 | delete state[key]; 73 | }); 74 | } 75 | 76 | function sendRequest(payload, endpoint, ui, requestId, cb) { 77 | var options = { 78 | method: 'post', 79 | body: Object.assign({}, payload, { requestId }), 80 | json: true, 81 | url: endpoint 82 | }; 83 | let bar = '.'; 84 | const inter = setInterval(() => { 85 | bar += '.'; 86 | ui.updateBottomBar(bar); 87 | }, 100); 88 | request(options, function(err, res, body) { 89 | clearInterval(inter); 90 | if (err) { 91 | console.error('error posting json: ', err); 92 | cb(); 93 | } 94 | console.log(JSON.stringify(body, null, 2)); 95 | cb(); 96 | }); 97 | } 98 | 99 | function handleSendRequest(deps, ans) { 100 | if (ans.answer === askSendRequest.choices[0].value) { 101 | validate(deps.state, payload => { 102 | sendRequest(payload, ENDPOINT_URL, deps.ui, deps.requestId, () => { 103 | ui.updateBottomBar(''); 104 | deps.prompts.next(askSendRequest); 105 | }); 106 | }); 107 | } 108 | if (ans.answer === askSendRequest.choices[1].value) { 109 | flushState(deps.state); 110 | delete deps.requestId; 111 | deps.prompts.next(askWhichEvent); 112 | } 113 | } 114 | 115 | function filterQuestions(eventId) { 116 | return e => { 117 | if (eventId === 'createJobRequest') { 118 | return e.name !== 'eventId' && e.name !== 'jobRequestId'; // jobrequestid gets filled serverside 119 | } else { 120 | return e.name !== 'eventId'; 121 | } 122 | }; 123 | } 124 | 125 | function handleEventChoose(deps, ans) { 126 | deps.reduce(ans); 127 | deps.requestId = shortid.generate(); 128 | const { questions } = mapEventIdToData(deps.state.eventId); 129 | const filteredQuestions = questions.filter( 130 | filterQuestions(deps.state.eventId) 131 | ); // filter val which is set by first question 132 | filteredQuestions.forEach(e => deps.prompts.next(e)); 133 | if (filteredQuestions.length === 0) { 134 | validate(deps.state, () => deps.prompts.next(askSendRequest)); 135 | } 136 | } 137 | 138 | function handleDefault(deps, ans) { 139 | deps.reduce(ans); 140 | validate(deps.state, () => deps.prompts.next(askSendRequest)); 141 | } 142 | 143 | function handleAnswer(deps) { 144 | return ans => { 145 | switch (ans.name) { 146 | case askSendRequest.name: 147 | return handleSendRequest(deps, ans); 148 | case askWhichEvent.name: 149 | return handleEventChoose(deps, ans); 150 | default: 151 | return handleDefault(deps, ans); 152 | } 153 | }; 154 | } 155 | 156 | const state = {}; 157 | const reduce = reducer(state); 158 | const ui = new inquirer.ui.BottomBar(); 159 | ui.updateBottomBar(''); 160 | inquirer 161 | .prompt(prompts) 162 | .ui.process.subscribe( 163 | handleAnswer({ state, prompts, reduce, ui }), 164 | err => console.log('Error: ', err), 165 | () => console.log('Bye') 166 | ); 167 | 168 | prompts.next(askWhichEvent); // start action 169 | -------------------------------------------------------------------------------- /consumer.js: -------------------------------------------------------------------------------- 1 | var kafka = require('kafka-node'); 2 | const streamToObservable = require('stream-to-observable'); 3 | const Rx = require('rxjs/Rx'); 4 | const async = require('async'); 5 | const Bluebird = require('bluebird'); 6 | const { 7 | KAFKA_HOST, 8 | TOPIC, 9 | PRODUCER_CONFIG, 10 | TOPIC_EVENTS, 11 | PUBSUB_TOPIC, 12 | MONGO_URL, 13 | MONGO_COLLECTION 14 | } = require('./config'); 15 | const { getPartition, getBusinessRulesOfEvent } = require('./utils'); 16 | const client = new kafka.Client(); 17 | const consumerGroup = new kafka.ConsumerGroupStream( 18 | { 19 | kafkaHost: KAFKA_HOST, 20 | groupId: 'ExampleTestGroup', 21 | sessionTimeout: 15000, 22 | protocol: ['roundrobin'], 23 | fromOffset: 'latest', 24 | asyncPush: false, 25 | autoCommit: false 26 | }, 27 | TOPIC 28 | ); 29 | const producer = new kafka.Producer(client, PRODUCER_CONFIG, getPartition); 30 | var redisClient = require('redis').createClient(); 31 | const _ = require('lodash'); 32 | 33 | var MongoClient = require('mongodb').MongoClient; 34 | 35 | class Response { 36 | static Pass(doc) { 37 | return new Response({ res: 'PASS', doc }); 38 | } 39 | static Success(doc) { 40 | return new Response({ res: 'SUCCESS', doc }); 41 | } 42 | static Fail(doc, error) { 43 | return new Response({ res: 'FAIL', doc, error }); 44 | } 45 | constructor(obj) { 46 | this.res = obj.res; 47 | this.error = obj.error; 48 | this.doc = obj.doc; 49 | } 50 | } 51 | Response.errors = { 52 | PROCESSING_ERROR: 'PROCESSING_ERROR' 53 | }; 54 | 55 | const reducer = (state, key) => () => { 56 | state[key] = state[key] + 1; 57 | }; 58 | 59 | const process = deps => async message => { 60 | const json = JSON.parse(message.value); 61 | debug('process')(message); 62 | try { 63 | await Bluebird.promisify( 64 | async.retryable( 65 | { 66 | times: 3, 67 | interval: function(retryCount) { 68 | return 50 * Math.pow(2, retryCount); 69 | } 70 | }, 71 | processTask 72 | ) 73 | ).bind(async)(deps, message, json); 74 | } catch (e) { 75 | console.log(e); 76 | await deps.pubsub( 77 | PUBSUB_TOPIC + ':' + json.requestId, 78 | JSON.stringify(Response.Fail(message, Response.errors.PROCESSING_ERROR)) 79 | ); 80 | // maybe commit to fail topic 81 | await deps.commit(message, true); // commit offset 82 | } 83 | return message; 84 | }; 85 | 86 | function mustBeResponse(input) { 87 | if (!input instanceof Response) { 88 | throw new Error('runAction must return ResponseClass'); 89 | } 90 | } 91 | 92 | async function processTask(deps, message, payload) { 93 | debug('run')(message); 94 | const res = await runAction(deps, message, payload); 95 | mustBeResponse(res); 96 | await deps.producer([ 97 | { 98 | topic: TOPIC_EVENTS, 99 | messages: [message.value], 100 | key: message.key 101 | } 102 | ]); // forward message to final event store 103 | await deps.pubsub( 104 | PUBSUB_TOPIC + ':' + payload.requestId, 105 | JSON.stringify(res) 106 | ); 107 | await deps.commit(message, true); // commit offset 108 | return message; 109 | } 110 | 111 | const debug = part => msg => { 112 | console.log(part + ' -> ' + new Date(), JSON.parse(msg.value).requestId); 113 | }; 114 | 115 | const pauseOrResume = (state, stream) => () => { 116 | if (state.in - state.out > 20 && !stream.isPaused()) { 117 | console.log('pause'); 118 | stream.pause(); 119 | } 120 | if (state.in - state.out <= 20 && stream.isPaused()) { 121 | console.log('unpause'); 122 | stream.resume(); 123 | } 124 | }; 125 | 126 | const connectMongo = function(cb) { 127 | MongoClient.connect(MONGO_URL, (err, client) => { 128 | if (err) return console.log(err); 129 | cb(client.db().collection(MONGO_COLLECTION)); 130 | }); 131 | }; 132 | 133 | async function duplicationCheck(deps, payload) { 134 | return await deps.mongo.findOne({ 135 | requestsHandled: { $in: [payload.requestId] } 136 | }); 137 | } 138 | 139 | async function loadAggregate(deps, payload) { 140 | return await deps.mongo.findOne({ 141 | jobRequestId: payload.jobRequestId 142 | }); 143 | // could validate aggregate here 144 | } 145 | 146 | // TODO: add write concerns 147 | async function updateAggregate(deps, payload, agg) { 148 | // could validate aggregate here 149 | const res = await deps.mongo.findOneAndUpdate( 150 | { 151 | jobRequestId: payload.jobRequestId 152 | }, 153 | { $set: agg }, 154 | { new: true } 155 | ); 156 | if (res.ok !== 1) throw new Error('mongo error'); 157 | return res.value; 158 | } 159 | 160 | // TODO: add write concerns 161 | async function createAggregate(deps, payload, agg) { 162 | return await deps.mongo.insert(agg); 163 | } 164 | 165 | function reduceBr(array) { 166 | return (deps, payload, agg) => { 167 | _.each(array, f => f(payload, agg)); 168 | }; 169 | } 170 | 171 | async function createHandler(deps, payload, message) { 172 | const dup = await duplicationCheck(deps, payload); 173 | if (dup) return Response.Pass(dup); 174 | const [err] = runBusinessLogic(deps, payload, {}); 175 | if (err) return Response.Fail(message, err); 176 | const newAgg = aggregateReducer(payload, {}); 177 | await createAggregate(deps, payload, newAgg); 178 | return Response.Success(newAgg); 179 | } 180 | 181 | async function defaultHandler(deps, payload, message) { 182 | const dup = await duplicationCheck(deps, payload); 183 | if (dup) return Response.Pass(dup); 184 | const agg = await loadAggregate(deps, payload); 185 | const [err] = runBusinessLogic(deps, payload, agg); 186 | if (err) return Response.Fail(message, err); 187 | const newAgg = aggregateReducer(payload, agg); 188 | await updateAggregate(deps, payload, newAgg); 189 | return Response.Success(newAgg); 190 | } 191 | 192 | const eventActions = { 193 | createJobRequest: createHandler, 194 | accepteJobRequest: defaultHandler, 195 | declineJobRequest: defaultHandler 196 | }; 197 | 198 | function addRequestId(requestsHandled=[], requestId) { 199 | if (!requestId) return requestsHandled; // if not defined skip 200 | return _.uniq([...requestsHandled, requestId]); 201 | } 202 | 203 | function aggregateReducer(payload, agg) { 204 | switch (payload.eventId) { 205 | case 'accepteJobRequest': { 206 | return { 207 | ...agg, 208 | acceptedCleaner: payload.cleanerId, 209 | answers: { ...agg.answers, [payload.cleanerId]: 'accepted' }, 210 | requestsHandled: addRequestId(agg.requestsHandled, payload.requestId) 211 | }; 212 | } 213 | case 'declineJobRequest': { 214 | return { 215 | ...agg, 216 | acceptedCleaner: payload.cleanerId, 217 | answers: { ...agg.answers, [payload.cleanerId]: 'declined' }, 218 | requestsHandled: addRequestId(agg.requestsHandled, payload.requestId) 219 | }; 220 | } 221 | case 'createJobRequest': { 222 | return { 223 | cleaners: payload.cleaners, 224 | tasks: payload.tasks, 225 | jobRequestId: payload.jobRequestId, 226 | answers: {}, 227 | requestsHandled: addRequestId(agg.requestsHandled, payload.requestId) 228 | }; 229 | } 230 | default: { 231 | return agg; 232 | } 233 | } 234 | } 235 | 236 | function runBusinessLogic(deps, payload, agg) { 237 | const rules = deps.getBusinessRulesOfEvent(payload.eventId); 238 | try { 239 | reduceBr(rules)(deps, payload, agg); 240 | return []; 241 | } catch (e) { 242 | return [e.message]; 243 | } 244 | } 245 | 246 | function resolveHandler(sources = {}) { 247 | return (source, id) => { 248 | const resolvedAction = sources[source][id]; 249 | if (!resolvedAction) 250 | throw new Error( 251 | 'handler not found for id:' + id + ' in source:' + source 252 | ); 253 | return resolvedAction; 254 | }; 255 | } 256 | 257 | // execute command 258 | async function runAction(deps, message, payload) { 259 | const resolvedAction = deps.getHandler('eventActions', payload.eventId); 260 | return await resolvedAction(deps, payload, message); 261 | } 262 | 263 | const flow = deps => 264 | streamToObservable(deps.stream) 265 | .do(debug('in')) 266 | .do(reducer(state, 'in')) 267 | .do(pauseOrResume(state, consumerGroup)) 268 | .flatMap(process(deps), null, 1) // concurrency 1 269 | .do(reducer(state, 'out')) 270 | .do(pauseOrResume(state, consumerGroup)) 271 | .do(debug('out')); 272 | 273 | const state = { in: 0, out: 0 }; 274 | 275 | connectMongo(mongo => { 276 | console.log('mongo connected'); 277 | client.once('ready', () => { 278 | console.log('kafka ready'); 279 | flow({ 280 | pubsub: Bluebird.promisify(redisClient.publish).bind(redisClient), 281 | commit: Bluebird.promisify(consumerGroup.commit.bind(consumerGroup)), 282 | producer: Bluebird.promisify(producer.send.bind(producer)), 283 | mongo, 284 | stream: consumerGroup, 285 | getBusinessRulesOfEvent, 286 | getHandler: resolveHandler({ 287 | eventActions 288 | }) 289 | }).subscribe(_.noop); 290 | }); 291 | }); 292 | --------------------------------------------------------------------------------