├── base-version.txt
├── aplsource
├── kafka
│ ├── commit.aplf
│ ├── new.aplf
│ ├── version.aplf
│ ├── new_topic_partition.aplf
│ ├── committed.aplf
│ ├── errormsg.aplf
│ ├── delivery.aplf
│ ├── setconf.aplf
│ ├── set_offset.aplf
│ ├── set_topic_partition.aplf
│ ├── produce.aplf
│ ├── subscribe_topic_partition.aplf
│ ├── consume.aplf
│ └── NA.aplf
├── Init.aplf
├── Tests
│ ├── ASSERT.aplf
│ ├── TestTopics.aplf
│ ├── TestCommit.aplf
│ ├── TestSameGroup.aplf
│ └── TestDiffGroup.aplf
├── Samples
│ ├── ProducerLoop.aplf
│ ├── ConsumerCGroupC.aplf
│ ├── ConsumerBGroupAB.aplf
│ ├── ConsumerAGroupAB.aplf
│ ├── TopicConsumer.aplf
│ ├── SimpleConsumer.aplf
│ ├── ConsumerLoop.aplf
│ ├── ConsumerLoopCommit.aplf
│ ├── SimpleProducer.aplf
│ ├── TopicProducer.aplf
│ ├── DirectSubscribe.aplf
│ ├── Example.aplf
│ ├── ManualTPList.aplf
│ └── SAMPLES.md
├── Record.aplc
├── Producer.aplc
└── Consumer.aplc
├── kafka
├── packages.config
├── dllmain.cpp
├── kafka.h
├── kafka.vcxproj
└── kafka.cpp
├── .gitignore
├── .gitattributes
├── kafka.dyalogbuild
├── mk_kafka.sh
├── LICENSE
├── kafka.sln
├── CI
├── publish.sh
└── gh-release.sh
├── kafkaBuild.bat
├── README.md
├── kafka.make
├── Jenkinsfile
└── SPEC.md
/base-version.txt:
--------------------------------------------------------------------------------
1 | 0.1
--------------------------------------------------------------------------------
/aplsource/kafka/commit.aplf:
--------------------------------------------------------------------------------
1 | r←commit(cons tplist)
2 | r←Commit cons tplist
3 |
--------------------------------------------------------------------------------
/aplsource/kafka/new.aplf:
--------------------------------------------------------------------------------
1 | r←new
2 | ⍝ create a new kafka object
3 | r←2⊃InitKafka 1
4 |
--------------------------------------------------------------------------------
/aplsource/kafka/version.aplf:
--------------------------------------------------------------------------------
1 | r←version
2 | ⍝ get the version of the librdkafka
3 | r←Version 512 512
4 |
--------------------------------------------------------------------------------
/aplsource/kafka/new_topic_partition.aplf:
--------------------------------------------------------------------------------
1 | r←new_topic_partition
2 | ⍝ Create a new topic list
3 | r←NewTopicPartitionList 1
4 | r←2⊃r
5 |
--------------------------------------------------------------------------------
/aplsource/Init.aplf:
--------------------------------------------------------------------------------
1 | Init path
2 | path,← '/' {0=≢⍵:'' ⋄ ∨/'/\'∊¯1↑⍵:''⋄ ⍺ } path
3 | kafka.NA path,'kafka'
4 | Producer.kafka←#.kafka
5 | Consumer.kafka←#.kafka
6 |
--------------------------------------------------------------------------------
/kafka/packages.config:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/aplsource/kafka/committed.aplf:
--------------------------------------------------------------------------------
1 | r←committed(cons tplist);err;offset
2 | (err offset)←2↑Committed cons tplist 1
3 | :If 0≠err
4 | r←err''
5 | :Else
6 | r←0 offset
7 | :EndIf
8 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | /.vs
3 | /x64
4 | /Debug
5 | /kafka/Debug
6 | /kafka/*.vcxproj.filters
7 | /kafka/*.vcxproj.user
8 | /packages
9 | /distribution
10 | /Win32
11 | /demo*
12 |
13 | test.bat
14 |
--------------------------------------------------------------------------------
/aplsource/kafka/errormsg.aplf:
--------------------------------------------------------------------------------
1 | r←errormsg errid;len;msg;err
2 | ⍝ get the text for an error number
3 | ⍝ errid error number
4 | (err msg len)←3↑DRMessageError errid 512 512
5 | r←err (len↑msg)
6 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Windows line endings: diaf
2 | * text=auto eol=lf
3 | *.py text eol=lf
4 | *.md text eol=lf
5 | *.yml text eol=lf
6 | *.jpg binary
7 | *.png binary
8 | *.gif binary
9 | *.apl? linguist-language=APL
--------------------------------------------------------------------------------
/aplsource/Tests/ASSERT.aplf:
--------------------------------------------------------------------------------
1 | ASSERT←{⍺←''
2 | failure←'>>> FAILURE <<<'
3 | success←'>>> SUCCESS <<<'
4 | ⍵:{}⎕←((~0∊⍴⍺)/(80↑⍺),': '),success
5 | {}⎕←((~0∊⍴⍺)/(80↑⍺),': '),failure
6 | }
7 |
--------------------------------------------------------------------------------
/aplsource/kafka/delivery.aplf:
--------------------------------------------------------------------------------
1 | r←delivery(prod length)
2 | ⍝ return the delivery reports for a producer
3 | ⍝ prod producer instance
4 | ⍝ length max number of delivery reports
5 | r←DeliveryReport prod length length length
6 |
--------------------------------------------------------------------------------
/aplsource/kafka/setconf.aplf:
--------------------------------------------------------------------------------
1 | r←setconf(inst opt val);err;msg;len
2 | ⍝ set an option for a kafka instance
3 | ⍝ inst the kafka instance could be either Producer or Consumer
4 | ⍝ opt option
5 | ⍝ val value
6 |
7 | (err msg len)←3↑SetKafkaConf inst opt val 512 512
8 | :If 0≠err
9 | r←err (len↑msg)
10 | :Else
11 | r←0 ''
12 | :EndIf
13 |
--------------------------------------------------------------------------------
/aplsource/kafka/set_offset.aplf:
--------------------------------------------------------------------------------
1 | r←set_offset(topic_partition_list topicname partition offset)
2 | ⍝ Add a topic to a topiclist
3 | ⍝ topic_partition_list topic partition list
4 | ⍝ topicname the topic to add to the list
5 | r←SetOffset topic_partition_list topicname partition offset
6 | :If 0≠⊃r
7 | r←r
8 | :Else
9 | r←0 ''
10 | :EndIf
11 |
--------------------------------------------------------------------------------
/aplsource/kafka/set_topic_partition.aplf:
--------------------------------------------------------------------------------
1 | r←set_topic_partition(topic_partition_list topicname partition)
2 | ⍝ Add a topic to a topiclist
3 | ⍝ topic_partition_list topic partition list
4 | ⍝ topicname the topic to add to the list
5 | r←SetTopicPartitionList topic_partition_list topicname partition
6 | :If 0≠⊃r
7 | r←r
8 | :Else
9 | r←0 ''
10 | :EndIf
11 |
--------------------------------------------------------------------------------
/aplsource/kafka/produce.aplf:
--------------------------------------------------------------------------------
1 | r←produce(prod topic_name payload key partition);err;msgid;msg;len
2 | ⍝ Produce a messages to the kafka server
3 | ⍝ prod porducer instance
4 | ⍝ topic_name topic name
5 | ⍝ payload payload
6 | ⍝ key key
7 | ⍝ partition partition
8 | (err msgid msg len)←4↑Produce prod topic_name payload(≢payload)key(≢key)partition 1 512 512
9 | r←err msgid (len↑msg)
--------------------------------------------------------------------------------
/aplsource/kafka/subscribe_topic_partition.aplf:
--------------------------------------------------------------------------------
1 | r←subscribe_topic_partition(cons topic_partition_list);err;msg;len
2 | ⍝ Subscribe to a list of topics
3 | ⍝ cons Instance of a COnsumer
4 | ⍝ topic_partition_list list of topics
5 |
6 | (err msg len)←3↑SubscribeConsumerTPList cons topic_partition_list 512 512
7 | :If 0≠err
8 | r←err (len↑msg)
9 | :Else
10 | r←0 ''
11 | :EndIf
12 |
--------------------------------------------------------------------------------
/kafka.dyalogbuild:
--------------------------------------------------------------------------------
1 | DyalogBuild: 0.1
2 | ID : KafkaInterface, Version=1.0
3 | Description: Interface to connect to the librdkafka api
4 | Defaults : ⎕IO←⎕ML←1
5 | TARGET : kafka.dws
6 | EXEC : 'kafka' ⎕ns ''
7 | APL : aplsource/kafka/*.aplf, Target=#.kafka
8 | APL : aplsource/*.aplc, Target=#
9 | APL : aplsource/Init.aplf, Target=#
10 | APL : aplsource/Sample.aplf, Target=#
11 | APL : aplsource/Sample2.aplf, Target=#
12 | APL : aplsource/Sample3.aplf, Target=#
13 | APL : aplsource/Sample4.aplf, Target=#
14 |
15 |
--------------------------------------------------------------------------------
/kafka/dllmain.cpp:
--------------------------------------------------------------------------------
1 | // dllmain.cpp : Defines the entry point for the DLL application
2 | #define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
3 | // Windows Header Files
4 | #include
5 |
6 | BOOL APIENTRY DllMain( HMODULE hModule,
7 | DWORD ul_reason_for_call,
8 | LPVOID lpReserved
9 | )
10 | {
11 | switch (ul_reason_for_call)
12 | {
13 | case DLL_PROCESS_ATTACH:
14 | case DLL_THREAD_ATTACH:
15 | case DLL_THREAD_DETACH:
16 | case DLL_PROCESS_DETACH:
17 | break;
18 | }
19 | return TRUE;
20 | }
21 |
22 |
--------------------------------------------------------------------------------
/aplsource/kafka/consume.aplf:
--------------------------------------------------------------------------------
1 | r←consume(cons tl pl kl);err;topic;tlen;pay;paylen;key;keylen;part;offset;msg;len
2 | ⍝ Consume the next message
3 | ⍝ cons the consumer instance
4 | ⍝ tl topic lenght
5 | ⍝ pl payload length
6 | ⍝ kl key length
7 |
8 | (err topic tlen pay paylen key keylen part offset msg len)←11↑Consume cons tl tl pl pl kl kl 1 1 512 512
9 | :Select err
10 | :Case 0 ⍝ we got a message
11 | r←0 (tlen↑topic) (paylen↑pay) (keylen↑key) part offset
12 | :Case 1 ⍝ no message
13 | r←err (len↑msg)
14 | :Case 2 ⍝ we are need more space
15 | r←consume cons,(10+tlen paylen keylen)
16 | :Else ⍝ Kafka error
17 | r←err (len↑msg)
18 | :EndSelect
19 |
--------------------------------------------------------------------------------
/aplsource/Samples/ProducerLoop.aplf:
--------------------------------------------------------------------------------
1 | ProducerLoop;i;config;start
2 |
3 | #.Init'.'
4 |
5 | ⍝ Producer configurations
6 | config←0 2⍴⍬
7 | config⍪←'bootstrap.servers' 'localhost:9092'
8 | config⍪←'client.id' 'producerclient'
9 | ⎕←'Init new producer with config:'
10 | config
11 | producer←⎕NEW #.Producer config
12 |
13 | ⎕←'Start producing messages for 120 seconds every 5 seconds:'
14 | start←3⊃⎕AI
15 | :While 120000>3⊃⎕AI-start
16 | producer.produce_record ⎕NEW #.Record('topic1'('Time: ',⍕3⊃⎕AI)('key',⍕?5))
17 | ⎕DL 5
18 | producer.produce_record ⎕NEW #.Record('topic2'('Time: ',⍕3⊃⎕AI)('key',⍕?5))
19 | ⎕DL 5
20 | :EndWhile
21 | ⎕←'Outstanding messages:'
22 | ⎕DL 0.5
23 | producer.update_outstanding
24 |
--------------------------------------------------------------------------------
/aplsource/kafka/NA.aplf:
--------------------------------------------------------------------------------
1 | NA file;Describe;res;funcs;z;version;pp
2 | version←#.⎕WG'AplVersion'
3 | :Select ⊃version
4 | :CaseList 'Linux-64' 'Linux' 'AIX' 'AIX-64'
5 | file,←'.so'
6 | :Case 'Mac-64'
7 | file,←'.dylib'
8 | :Else
9 | ⍝ do nothing
10 | :EndSelect
11 | ⎕NA'I4 ',file,'|Describe >0T1 =I4'
12 | :If 0=⊃res←Describe 1024 1024
13 | funcs←⎕JSON 2⊃res
14 | z←('\%P'⎕R file)funcs.Patterns
15 | :If 17>2⊃⎕VFI{(¯1+⍵⍳'.')↑⍵}2⊃version
16 | :If '-64'≡¯3↑⊃version
17 | pp←' U8' ' >U8'
18 | :Else
19 | pp←' U4' ' >U4'
20 | :EndIf
21 | z←(' P' ' >P'⎕R pp)¨z
22 | :EndIf
23 | ⎕NA¨z
24 | :Else
25 | 'Shared lib failed'⎕SIGNAL 6
26 | :EndIf
27 |
--------------------------------------------------------------------------------
/mk_kafka.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # mk_kafka.sh bits
3 | #exec >$0.$PLATFORM.$BITS.log 2>&1
4 | set -x
5 | set -e
6 |
7 | Usage()
8 | {
9 | cat <
11 | Bits is 32 or 64
12 |
13 | eg
14 | $0 64
15 | !!
16 | exit 1
17 | }
18 |
19 | [ 1 -ne $# ] && Usage
20 |
21 | BITS=$1
22 |
23 | case $BITS in
24 | 32)
25 | ;;
26 | 64)
27 | ;;
28 | *) Usage
29 | ;;
30 | esac
31 |
32 |
33 |
34 | case $PLATFORM in
35 | linux|pi)
36 | ARCH=`uname -m`
37 | MAKE="make"
38 | EXT=so
39 | ;;
40 | darwin|mac|mac_arm)
41 | ARCH=`uname -m`
42 | MAKE="make"
43 | EXT=dylib
44 | ;;
45 | aix)
46 | ARCH=
47 | MAKE="gmake"
48 | EXT=so
49 | ;;
50 | *) ARCH=
51 | MAKE="make"
52 | EXT=so
53 | ;;
54 | esac
55 |
56 | if [ "$PLATFORM" = "linux" ]; then
57 | BITS=$BITS PLATFORM=$PLATFORM ARCH=$ARCH $MAKE -f kafka.make clean
58 | fi
59 |
60 |
61 | BITS=$BITS PLATFORM=$PLATFORM ARCH=$ARCH $MAKE -f kafka.make
62 |
63 |
--------------------------------------------------------------------------------
/aplsource/Samples/ConsumerCGroupC.aplf:
--------------------------------------------------------------------------------
1 | ConsumerCGroupC;start;cr;config
2 | ⍝ ConsumerC is in a different group of ConsumerA and ConsumerB, GroupC
3 | ⍝ It read from all the partitions
4 | #.Init'.'
5 |
6 | ⍝ consumer configurations
7 | config←0 2⍴⍬
8 | config⍪←'bootstrap.servers' 'localhost:9092'
9 | config⍪←'client.id' 'consumerCclient'
10 | config⍪←'group.id' 'consumerCgroup'
11 | config⍪←'auto.offset.reset' 'earliest'
12 | ⎕←'Init new consumer with config:'
13 | config
14 | consumer←⎕NEW #.Consumer config
15 |
16 | ⍝ Subscribe consumer to topic
17 | ⎕←'Subscribe consumer on topics "topic1" and "topic2":'
18 | consumer.subscribe'topic1' 'topic2'
19 | ⎕←'Waiting for rebalance before starting consuming...'
20 | ⎕DL 5
21 |
22 | ⎕←'Consume the queue:'
23 | start←3⊃⎕AI
24 | :While 1
25 | cr←consumer.consume_record
26 | :If 1=⊃cr
27 | :AndIf 20000>(3⊃⎕AI)-start
28 | :Continue
29 | :ElseIf 0=⊃cr
30 | (2⊃cr).(Topic Payload Key Partition)
31 | start←0
32 | :Else
33 | cr
34 | :Leave
35 | :EndIf
36 | :EndWhile
37 |
38 | ⍝ ⎕EX'consumer'
39 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Dyalog
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/aplsource/Samples/ConsumerBGroupAB.aplf:
--------------------------------------------------------------------------------
1 | ConsumerBGroupAB;config;start;cr
2 | ⍝ ConsumerB is in the same group as ConsumerA, GroupAB
3 | ⍝ they share the messages consuming from different partitions
4 |
5 | #.Init'.'
6 |
7 | ⍝ consumer configurations
8 | config←0 2⍴⍬
9 | config⍪←'bootstrap.servers' 'localhost:9092'
10 | config⍪←'client.id' 'consumerBclient'
11 | config⍪←'group.id' 'consumerABgroup'
12 | config⍪←'auto.offset.reset' 'earliest'
13 | ⎕←'Init new consumer with config:'
14 | config
15 | consumer←⎕NEW #.Consumer config
16 |
17 | ⍝ Subscribe consumer to topic
18 | ⎕←'Subscribe consumer on topics "topic1" and "topic2":'
19 | consumer.subscribe'topic1' 'topic2'
20 | ⎕←'Waiting for rebalance before starting consuming...'
21 | ⎕DL 5
22 |
23 | ⎕←'Consumes the queue:'
24 | start←3⊃⎕AI
25 | :While 1
26 | cr←consumer.consume_record
27 | :If 1=⊃cr
28 | :AndIf 20000>(3⊃⎕AI)-start
29 | :Continue
30 | :ElseIf 0=⊃cr
31 | (2⊃cr).(Topic Payload Key Partition)
32 | start←0
33 | :Else
34 | cr
35 | :Leave
36 | :EndIf
37 | :EndWhile
38 |
39 | ⍝ ⎕EX'consumer'
40 |
--------------------------------------------------------------------------------
/aplsource/Samples/ConsumerAGroupAB.aplf:
--------------------------------------------------------------------------------
1 | ConsumerAGroupAB;config;start;cr
2 | ⍝ ConsumerA is in the same group as ConsumerB, GroupAB
3 | ⍝ they share the messages consuming from different partitions
4 |
5 | #.Init'.'
6 |
7 | ⍝ consumer configurations
8 | config←0 2⍴⍬
9 | config⍪←'bootstrap.servers' 'localhost:9092'
10 | config⍪←'client.id' 'consumerAclient'
11 | config⍪←'group.id' 'consumerABgroup'
12 | config⍪←'auto.offset.reset' 'earliest'
13 | ⎕←'Init new consumer with config:'
14 | config
15 | ⍝ Init consumer
16 | consumer←⎕NEW #.Consumer config
17 |
18 | ⍝ Subscribe consumer to topic
19 | ⎕←'Subscribe consumer on topics "topic1" and "topic2":'
20 | consumer.subscribe'topic1' 'topic2'
21 | ⎕←'Waiting for rebalance before starting consuming...'
22 | ⎕DL 5
23 |
24 | ⎕←'Consume the queue:'
25 | start←3⊃⎕AI
26 | :While 1
27 | cr←consumer.consume_record
28 | :If 1=⊃cr
29 | :AndIf 20000>(3⊃⎕AI)-start
30 | :Continue
31 | :ElseIf 0=⊃cr
32 | (2⊃cr).(Topic Payload Key Partition)
33 | start←0
34 | :Else
35 | cr
36 | :Leave
37 | :EndIf
38 | :EndWhile
39 |
40 | ⍝⎕EX'consumer'
41 |
--------------------------------------------------------------------------------
/aplsource/Samples/TopicConsumer.aplf:
--------------------------------------------------------------------------------
1 | TopicConsumer;config;consumer;start;cr
2 | ⍝ A consumer consume all messages from two different topics
3 |
4 | #.Init'.'
5 |
6 | ⍝ Consumer configurations
7 | config←0 2⍴⍬
8 | config⍪←'bootstrap.servers' 'localhost:9092'
9 | config⍪←'client.id' 'consumerTclient'
10 | config⍪←'group.id' 'consumerTgroup'
11 | config⍪←'auto.offset.reset' 'earliest'
12 | ⎕←'Init new consumer with config:'
13 | config
14 | consumer←⎕NEW #.Consumer config
15 |
16 | ⍝ Subscribe consumer to topic1 and topic2
17 | ⎕←'Subscribe consumer on topic "topic1" and "topic2":'
18 | consumer.subscribe'topic1' 'topic2'
19 | ⎕←'Waiting for rebalance before starting consuming...'
20 | ⎕DL 5
21 |
22 | ⍝ Consume messages
23 | ⎕←'Consume first message:'
24 | consumer.consume
25 | ⎕←'Consume the queue:'
26 | start←3⊃⎕AI
27 | :While 1
28 | cr←consumer.consume_record
29 | :If 1=⊃cr
30 | :AndIf 20000>(3⊃⎕AI)-start
31 | :Continue
32 | :ElseIf 0=⊃cr
33 | (2⊃cr).(Topic Payload Key Partition)
34 | start←0
35 | :Else
36 | cr
37 | :Leave
38 | :EndIf
39 | :EndWhile
40 | ⎕←'Close consumer:'
41 | ⎕EX'consumer'
42 |
--------------------------------------------------------------------------------
/aplsource/Samples/SimpleConsumer.aplf:
--------------------------------------------------------------------------------
1 | SimpleConsumer;config;consumer;start;cr;err
2 | ⍝ Consumer consume all messages from a topic
3 |
4 | #.Init'.'
5 |
6 | ⍝ Consumer configurations
7 | config←0 2⍴⍬
8 | config⍪←'bootstrap.servers' 'localhost:9092'
9 | config⍪←'client.id' 'consumerclient'
10 | config⍪←'group.id' 'consumergroup'
11 | config⍪←'auto.offset.reset' 'earliest' ⍝ Start consuming from the beginning if no offset is found
12 | ⎕←'Init new consumer with config:'
13 | config
14 | consumer←⎕NEW #.Consumer config
15 |
16 | ⍝ Subscribe consumer to topic
17 | ⎕←'Subscribe consumer to topic "topic":'
18 | consumer.subscribe'topic'
19 | ⎕←'Waiting for rebalance before starting consuming...'
20 | ⎕DL 5
21 |
22 | ⍝ Consume messages
23 | ⎕←'Consume first message:'
24 | consumer.consume
25 | ⎕←'Consume the queue:'
26 | start←3⊃⎕AI
27 | :While 1
28 | cr←consumer.consume_record
29 | :If 1=⊃cr
30 | :AndIf 20000>(3⊃⎕AI)-start
31 | :Continue
32 | :ElseIf 0=⊃cr
33 | (2⊃cr).(Topic Payload Key Partition)
34 | start←0
35 | :Else
36 | cr
37 | :Leave
38 | :EndIf
39 | :EndWhile
40 | ⎕←'Close consumer:'
41 | ⎕EX'consumer'
42 |
--------------------------------------------------------------------------------
/aplsource/Samples/ConsumerLoop.aplf:
--------------------------------------------------------------------------------
1 | ConsumerLoop;config;consumer;start;last_consumed;cr
2 | ⍝ Consumer consumes messages from two topics synchronously
3 |
4 | #.Init'.'
5 |
6 | ⍝ consumer configurations
7 | config←0 2⍴⍬
8 | config⍪←'bootstrap.servers' 'localhost:9092'
9 | config⍪←'client.id' 'consumerLclient'
10 | config⍪←'group.id' 'consumerLgroup'
11 | config⍪←'auto.offset.reset' 'earliest'
12 |
13 | ⎕←'Init new consumer with config:'
14 | config
15 | consumer←⎕NEW #.Consumer config
16 |
17 | ⍝ Subscribe consumer to topic
18 | ⎕←'Subscribe consumer on topics "topic1" and "topic2":'
19 | consumer.subscribe'topic1' 'topic2'
20 | ⎕←'Waiting for rebalance before starting consuming...'
21 | ⎕DL 5
22 |
23 | ⎕←'Start polling messages:'
24 | start←3⊃⎕AI
25 | last_consumed←3⊃⎕AI
26 | :While 60000>(3⊃⎕AI)-last_consumed ⍝ Stop listening when not consuming for more than one minute
27 | cr←consumer.consume_record
28 | :If 1=⊃cr
29 | :AndIf 20000>(3⊃⎕AI)-start
30 | :Continue
31 | :ElseIf 0=⊃cr
32 | (2⊃cr).(Topic Payload Key Partition)
33 | last_consumed←3⊃⎕AI
34 | start←0
35 | :EndIf
36 | :EndWhile
37 | ⎕←'Have not received messages in 60 seconds... Stop waiting.'
38 | ⎕EX'consumer'
39 |
--------------------------------------------------------------------------------
/aplsource/Record.aplc:
--------------------------------------------------------------------------------
1 | :Class Record
2 | :field Public Topic
3 | :field Public Payload
4 | :field Public Key
5 | :field Public Partition
6 | :field Public Offset
7 |
8 | ∇Clear
9 | Topic←''
10 | Payload←''
11 | Key←''
12 | Partition←¯1
13 | Offset←¯1
14 | ∇
15 |
16 | ∇Make
17 | :access public
18 | :implements constructor
19 | Clear
20 | ∇
21 |
22 | ∇Make1 topic
23 | :access public
24 | :implements constructor
25 | Clear
26 | Topic←topic
27 | ∇
28 |
29 | ∇Make2 (topic payload)
30 | :access public
31 | :implements constructor
32 | Clear
33 | Topic←topic
34 | Payload←payload
35 | ∇
36 |
37 |
38 | ∇Make3 (topic payload key)
39 | :access public
40 | :implements constructor
41 | Clear
42 | Topic←topic
43 | Payload←payload
44 | Key←key
45 | ∇
46 |
47 | ∇Make4 (topic payload key partition)
48 | :access public
49 | :implements constructor
50 | Topic←topic
51 | Payload←payload
52 | Key←key
53 | Partition←partition
54 | ∇
55 |
56 | ∇Make5 (topic payload key partition offset)
57 | :access public
58 | :implements constructor
59 | Topic←topic
60 | Payload←payload
61 | Key←key
62 | Partition←partition
63 | Offset←offset
64 | ∇
65 |
66 |
67 | ∇r←asArg
68 | :access public
69 | r← Topic Payload Key Partition
70 | ∇
71 |
72 |
73 |
74 |
75 | :EndClass
76 |
--------------------------------------------------------------------------------
/aplsource/Samples/ConsumerLoopCommit.aplf:
--------------------------------------------------------------------------------
1 | ConsumerLoopCommit;config;consumer;start;commit_count;count_msg;last_consumed;cr
2 | ⍝ Consumer consumes messages from two topics synchronously and commits offsets every 20 messages
3 |
4 | #.Init'.'
5 |
6 | ⍝ consumer configurations
7 | config←0 2⍴⍬
8 | config⍪←'bootstrap.servers' 'localhost:9092'
9 | config⍪←'client.id' 'consumerLclient'
10 | config⍪←'group.id' 'consumerLgroup'
11 | config⍪←'auto.offset.reset' 'earliest'
12 | config⍪←'enable.auto.commit' 'false'
13 | ⎕←'Init new consumer with config:'
14 | config
15 | consumer←⎕NEW #.Consumer config
16 |
17 | ⍝ Subscribe consumer to topic
18 | ⎕←'Subscribe consumer on topics "topic1" and "topic2":'
19 | consumer.subscribe'topic1' 'topic2'
20 | ⎕←'Waiting for rebalance before starting consuming...'
21 | ⎕DL 5
22 |
23 | ⎕←'Start polling messages:'
24 | start←3⊃⎕AI
25 | commit_count←20
26 | count_msg←0
27 | last_consumed←3⊃⎕AI
28 | :While 60000>(3⊃⎕AI)-last_consumed
29 | cr←consumer.consume_record
30 | :If 1=⊃cr
31 | :AndIf 20000>(3⊃⎕AI)-start
32 | :Continue
33 | :ElseIf 0=⊃cr
34 | (2⊃cr).(Topic Payload Key Partition)
35 | last_consumed←3⊃⎕AI
36 | start←0
37 | count_msg←count_msg+1
38 | :If 0=commit_count|count_msg
39 | ⎕←'Commit consumer:'
40 | consumer.commit
41 | :EndIf
42 | :EndIf
43 | :EndWhile
44 |
45 | ⎕EX'consumer'
46 |
--------------------------------------------------------------------------------
/kafka.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio Version 17
4 | VisualStudioVersion = 17.11.35219.272
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "kafka", "kafka\kafka.vcxproj", "{5457142E-4872-4BE3-BBCB-6A0BAAD2298C}"
7 | EndProject
8 | Global
9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
10 | Debug|x64 = Debug|x64
11 | Debug|x86 = Debug|x86
12 | Release|x64 = Release|x64
13 | Release|x86 = Release|x86
14 | EndGlobalSection
15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
16 | {5457142E-4872-4BE3-BBCB-6A0BAAD2298C}.Debug|x64.ActiveCfg = Debug|x64
17 | {5457142E-4872-4BE3-BBCB-6A0BAAD2298C}.Debug|x64.Build.0 = Debug|x64
18 | {5457142E-4872-4BE3-BBCB-6A0BAAD2298C}.Debug|x86.ActiveCfg = Debug|Win32
19 | {5457142E-4872-4BE3-BBCB-6A0BAAD2298C}.Debug|x86.Build.0 = Debug|Win32
20 | {5457142E-4872-4BE3-BBCB-6A0BAAD2298C}.Release|x64.ActiveCfg = Release|x64
21 | {5457142E-4872-4BE3-BBCB-6A0BAAD2298C}.Release|x64.Build.0 = Release|x64
22 | {5457142E-4872-4BE3-BBCB-6A0BAAD2298C}.Release|x86.ActiveCfg = Release|Win32
23 | {5457142E-4872-4BE3-BBCB-6A0BAAD2298C}.Release|x86.Build.0 = Release|Win32
24 | EndGlobalSection
25 | GlobalSection(SolutionProperties) = preSolution
26 | HideSolutionNode = FALSE
27 | EndGlobalSection
28 | GlobalSection(ExtensibilityGlobals) = postSolution
29 | SolutionGuid = {D40761FE-9AA6-4CD1-B462-27A4DA2D2E8A}
30 | EndGlobalSection
31 | EndGlobal
32 |
--------------------------------------------------------------------------------
/aplsource/Samples/SimpleProducer.aplf:
--------------------------------------------------------------------------------
1 | SimpleProducer(n p);config;producer;i
2 | ⍝ Produce n+1 msgs on a topic. The partitions are assigned
3 | ⍝ depending on the key value (p is number of keys), and messages
4 | ⍝ with same key will be written into the same partition.
5 |
6 | ⍝ Topic created with, e.g.
7 | ⍝
8 | ⍝ kafka-topics.sh \
9 | ⍝ --bootstrap-server localhost:9092 \
10 | ⍝ --create --topic "topic" \
11 | ⍝ --partitions 3
12 |
13 | :If n<1
14 | ⎕←'Please produce at least one message.'
15 | :Return
16 | :EndIf
17 |
18 | :If p<1
19 | ⎕←'Minimum number of key required is 1.'
20 | :Return
21 | :EndIf
22 |
23 | #.Init'.'
24 |
25 | ⍝ Producer configurations
26 | config←0 2⍴⍬
27 | config⍪←'bootstrap.servers' 'localhost:9092'
28 | config⍪←'client.id' 'producerclient'
29 | ⎕←'Init new producer with config:'
30 | config
31 | producer←⎕NEW #.Producer config
32 | ⍝ Produce messages on topic
33 | ⎕←'Produce a message on topic "topic":'
34 | producer.produce'topic' 'payload0' 'key0'
35 |
36 | ⎕←'Produce messages on topic "topic":'
37 | ⍝ Produce n bundled messages on topic topic and ask for dr
38 | :For i :In ⍳n
39 | producer.produce_record ⎕NEW #.Record('topic'('payload',⍕i)('key',⍕p|i-1))
40 | :If 0=10|i
41 | ⎕←'Outstanding messages:'
42 | ⎕DL 0.5
43 | producer.update_outstanding
44 | :EndIf
45 | :EndFor
46 |
47 | ⎕←'Outstanding messages:'
48 | ⎕DL 0.5
49 | producer.update_outstanding
50 |
51 |
52 | ⎕←'Close producer:'
53 | ⎕EX'producer'
54 |
--------------------------------------------------------------------------------
/aplsource/Samples/TopicProducer.aplf:
--------------------------------------------------------------------------------
1 | TopicProducer(n p);config;producer;i
2 | ⍝ A producer producing 2×n msgs on two topics.
3 | ⍝ The partitions are assigned depending on the
4 | ⍝ key value (p is number of keys), and messages
5 | ⍝ with same key will be written into the same partition.
6 |
7 |
8 | ⍝ Topics created with, e.g.
9 | ⍝
10 | ⍝ kafka-topics.sh \
11 | ⍝ --bootstrap-server localhost:9092 \
12 | ⍝ --create --topic "topic1" \
13 | ⍝ --partitions 2
14 | ⍝
15 | ⍝ kafka-topics.sh \
16 | ⍝ --bootstrap-server localhost:9092 \
17 | ⍝ --create --topic "topic2" \
18 | ⍝ --partitions 3
19 |
20 | :If n<1
21 | ⎕←'Please produce at least one message.'
22 | :Return
23 | :EndIf
24 |
25 | :If p<1
26 | ⎕←'Minimum number of key required is 1.'
27 | :Return
28 | :EndIf
29 |
30 | #.Init'.'
31 |
32 | ⍝ Producer configurations
33 | config←0 2⍴⍬
34 | config⍪←'bootstrap.servers' 'localhost:9092'
35 | config⍪←'client.id' 'producerclient'
36 | ⎕←'Init new producer with config:'
37 | config
38 | producer←⎕NEW #.Producer config
39 |
40 | ⍝ Produce n bundled messages on topic topic1 and ask for dr
41 | :For i :In ⍳n
42 | producer.produce_record ⎕NEW #.Record('topic1'('payload',⍕i)('key',⍕p|i-1))
43 | :If 0=10|i
44 | ⎕←'Outstanding messages:'
45 | ⎕DL 0.5
46 | producer.update_outstanding
47 | :EndIf
48 | :EndFor
49 |
50 | ⍝ Produce n bundled messages on topic topic2 and ask for dr
51 | :For i :In ⍳n
52 | producer.produce_record ⎕NEW #.Record('topic2'('payload',⍕i)('key',⍕p|i-1))
53 | :If 0=10|i
54 | ⎕←'Outstanding messages:'
55 | ⎕DL 0.5
56 | producer.update_outstanding
57 | :EndIf
58 | :EndFor
59 |
60 | ⎕←'Close producer:'
61 | ⎕EX'producer'
62 |
--------------------------------------------------------------------------------
/CI/publish.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | [ "$JOB_NAME" ]
5 | [ "$BUILD_NUMBER" ]
6 |
7 | umask 002
8 | mountpoint /devt; echo "Devt is mounted: good"
9 |
10 | [ -d distribution ] || { echo "Error: distribution directory not found."; exit 1; }
11 |
12 | BASE_VERSION=`cat base-version.txt`
13 |
14 | VERSION="${BASE_VERSION%%.0}.`git rev-list HEAD --count`" # "%%.0" strips trailing ".0"
15 | if [ "${JOB_NAME:0:6}" = "Github" ]; then
16 | JOB_NAME=${JOB_NAME#*/}
17 | fi
18 |
19 | mkdir -p /devt/builds/$JOB_NAME/$BUILD_NUMBER
20 | cp -r distribution/* /devt/builds/$JOB_NAME/$BUILD_NUMBER
21 | rm -f /devt/builds/$JOB_NAME/latest
22 | ln -s $BUILD_NUMBER /devt/builds/$JOB_NAME/latest
23 |
24 | # For each directory X found in /devt/builds/$JOB_NAME/latest, create a zip file
25 | for dir in /devt/builds/$JOB_NAME/latest/*; do
26 | if [ -d "$dir" ]; then
27 | dir_name=$(basename "$dir")
28 |
29 | if [ "$dir_name" = "mac" ]; then
30 | # Special handling for mac directory - create separate zips for each architecture
31 | pushd /devt/builds/$JOB_NAME/latest/mac >/dev/null
32 | for arch in arm64 x64; do
33 | if [ -d "$arch" ]; then
34 | zip_file="/devt/builds/$JOB_NAME/$BUILD_NUMBER/dyalog-kafka.mac-${arch}-${VERSION}.zip"
35 | echo "Creating zip file: $zip_file"
36 | zip -r "$zip_file" "$arch" || true
37 | fi
38 | done
39 | popd >/dev/null
40 | else
41 | zip_file="/devt/builds/$JOB_NAME/$BUILD_NUMBER/dyalog-kafka.${dir_name}.${VERSION}.zip"
42 | echo "Creating zip file: $zip_file"
43 |
44 | pushd /devt/builds/$JOB_NAME/latest >/dev/null
45 | zip -r "$zip_file" "$dir_name" || true
46 | popd >/dev/null
47 | fi
48 | fi
49 | done
50 |
51 | # Tidy up old builds
52 | r=/devt/builds/${JOB_NAME}
53 | ls "$r" | grep -v "latest" | sort -n | head -n-10 | while read x; do
54 | echo "deleting $r/$x"
55 | rm -rf "$r/$x" || true # Continue even if deletion fails
56 | done
--------------------------------------------------------------------------------
/aplsource/Samples/DirectSubscribe.aplf:
--------------------------------------------------------------------------------
1 | DirectSubscribe n;config;producer;topic_list;consumer;start;cr;err
2 | ⍝ Simple example to show how to configure and subscribe to a topic
3 | ⍝ list directly when the consumer is created.
4 |
5 | #.Init'.'
6 |
7 | ⍝ Producer config
8 | config←0 2⍴⍬
9 | config⍪←'bootstrap.servers' 'localhost:9092'
10 | config⍪←'client.id' 'bhc'
11 |
12 | ⎕←'Init new producer with config:'
13 | config
14 | producer←⎕NEW #.Producer config
15 |
16 | ⎕←'Produce two messages:'
17 | ⍝ Produce
18 | producer.produce_record ⎕NEW #.Record('cars' 'ferrari' 'sportcars')
19 | producer.produce_record ⎕NEW #.Record('plants' 'iris' 'flowers' 0)
20 |
21 | ⎕←'Outstanding messages:'
22 | ⎕DL 0.5
23 | producer.update_outstanding
24 |
25 | ⎕←'Produce in loop:'
26 | :For i :In ⍳n
27 | producer.produce_record ⎕NEW #.Record('animals'(100↑'Payload',⍕i)('key',⍕4|i))
28 | :If 0=10|i
29 | ⎕←'Outstanding messages:'
30 | ⎕DL 0.5
31 | producer.update_outstanding
32 | :EndIf
33 | :EndFor
34 |
35 | ⎕←'Init new consumer with config and topic list:'
36 | ⍝ Consumer config and topic_list
37 | config←0 2⍴⍬
38 | config⍪←'bootstrap.servers' 'localhost:9092'
39 | config⍪←'client.id' 'bhcgrs3550'
40 | config⍪←'group.id' 'dyalog'
41 | config⍪←'auto.offset.reset' 'earliest' ⍝ Start consuming from the beginning if no offset is found
42 | topic_list←'animals' 'cars' 'plants'
43 | config
44 | topic_list
45 | ⍝ Directly configure and subcribe when creating the consumer
46 | consumer←⎕NEW #.Consumer(config topic_list)
47 |
48 | ⎕←'Waiting for rebalance before starting consuming...'
49 | ⎕DL 5
50 | ⍝ Consume the queue
51 | start←3⊃⎕AI
52 | :While 1
53 | cr←consumer.consume_record
54 | :If 1=⊃cr
55 | :AndIf 20000>(3⊃⎕AI)-start
56 | ⎕DL 0.2
57 | :Continue
58 | :ElseIf 0=⊃cr
59 | (2⊃cr).(Topic Payload Key Partition)
60 | start←0
61 | :Else
62 | cr
63 | :Leave
64 | :EndIf
65 | :EndWhile
66 |
67 | ⎕←'Close producer and consumer:'
68 | ⍝ Tidy up
69 | ⎕EX'producer'
70 | ⎕EX'consumer'
71 |
--------------------------------------------------------------------------------
/kafkaBuild.bat:
--------------------------------------------------------------------------------
1 | :PREP
2 |
3 | for /f "tokens=2,3 delims=/" %%a in ("%1") do set prod=%%a&set branch=%%b
4 |
5 | echo Building %prod% from %branch%
6 | GOTO VS
7 |
8 |
9 | :VS
10 | GOTO USE_VS17
11 | IF "%branch%" == "main" (
12 | GOTO USE_VS17
13 | )
14 | IF EXIST "c:\Program Files (x86)\Microsoft Visual Studio 8" GOTO USE_VS8
15 | IF EXIST "c:\Program Files (x86)\Microsoft Visual Studio 9.0" GOTO USE_VS9
16 | @echo "unsure of VS version"
17 | GOTO Error
18 |
19 | :USE_VS8
20 | @echo Using VS8
21 | set VSDIR=c:\Program Files (x86)\Microsoft Visual Studio 8
22 | GOTO BUILD
23 |
24 | :USE_VS9
25 | @echo Using VS9
26 | set VSDIR=c:\Program Files (x86)\Microsoft Visual Studio 9.0
27 | GOTO BUILD
28 |
29 | :USE_VS14
30 | @echo Using VS14
31 | set VSDIR=c:\Program Files (x86)\Microsoft Visual Studio 14.0
32 | GOTO BUILD
33 |
34 | :USE_VS17
35 | @echo Using VS17
36 | set VSDIR=C:\Program Files\Microsoft Visual Studio\2022\Professional
37 | GOTO BUILD
38 |
39 | :BUILD
40 |
41 | set WORKSPACE=%CD%
42 | IF EXIST "%TEMP%\kafka" rmdir /q /s %TEMP%\kafka
43 |
44 | dotnet new classlib --name kafka -o %TEMP%\kafka --force
45 | dotnet add %TEMP%\kafka package librdkafka.redist --version 2.5.0
46 | dotnet publish %TEMP%\kafka
47 |
48 | xcopy %USERPROFILE%\.nuget\packages\librdkafka.redist\2.5.0 .\packages\librdkafka.redist.2.5.0\ /S /E /Y
49 |
50 | IF EXIST "%TEMP%\kafka" rmdir /q /s %TEMP%\kafka
51 |
52 | for /f "skip=1" %%d in ('wmic os get localdatetime') do if not defined mydate set mydate=%%d
53 | set BDATE=%mydate:~0,8%-%mydate:~8,4%
54 |
55 | set VS80COMNTOOLS=%VSDIR%\Common7\Tools\
56 |
57 | @echo %PATH%
58 | IF EXIST "%VSDIR%\VC\vcvarsall.bat" call "%VSDIR%\VC\vcvarsall.bat"
59 | IF EXIST "%VSDIR%\VC\Auxiliary\Build\vcvarsall.bat" call "%VSDIR%\VC\Auxiliary\Build\vcvarsall.bat" x86_amd64
60 | if %ERRORLEVEL% NEQ 0 GOTO Error
61 |
62 | for %%x in (Release,Debug) do (
63 | echo %%x
64 | for %%y in (x86,x64) do (
65 | echo %%x %%y
66 | msbuild %WORKSPACE%\kafka.sln /p:Configuration=%%x;Platform=%%y
67 | )
68 | )
69 |
70 |
71 |
72 | if %ERRORLEVEL% NEQ 0 GOTO Error
73 |
74 | set /a RESULT=0
75 | GOTO End
76 |
77 | :Error
78 | set /a RESULT=1
79 |
80 | :End
81 | IF EXIST ".\packages" rmdir /q /s .\packages
82 | EXIT /B %RESULT%
83 |
--------------------------------------------------------------------------------
/aplsource/Samples/Example.aplf:
--------------------------------------------------------------------------------
1 | Example n;i;config;producer;topic_list;consumer;cr
2 | ⍝ Produce and consume messages
3 |
4 | ⍝ Topics created with
5 | ⍝
6 | ⍝ kafka-topics.sh \
7 | ⍝ --bootstrap-server localhost:9092 \
8 | ⍝ --create --topic "animals" \
9 | ⍝ --partitions 3
10 | ⍝
11 | ⍝ kafka-topics.sh \
12 | ⍝ --bootstrap-server localhost:9092 \
13 | ⍝ --create --topic "cars" \
14 | ⍝ --partitions 3
15 | ⍝
16 | ⍝ kafka-topics.sh \
17 | ⍝ --bootstrap-server localhost:9092 \
18 | ⍝ --create --topic "plants" \
19 | ⍝ --partitions 3
20 |
21 | ⍝ Call to Init function
22 | ⍝ Init'path/to/dir/housing/kafka/shared/lib'
23 | #.Init'.'
24 |
25 | ⍝ Set up the producer
26 | config←0 2⍴⍬
27 | config⍪←'bootstrap.servers' 'localhost:9092'
28 | config⍪←'client.id' 'bhc'
29 |
30 | producer←⎕NEW #.Producer config
31 |
32 | ⍝ Produce onto the "animals" topic the message "tiger" with key "cats"
33 | producer.produce'animals' 'tiger' 'cats'
34 |
35 | ⍝ Produce n messages onto the animals topic in a loop by using the Record interface
36 | :For i :In ⍳n
37 | producer.produce_record ⎕NEW #.Record('animals'(75↑'Payload',⍕i)('key',⍕4|i))
38 | :If 0=10|i
39 | ⎕DL 0.5
40 | producer.update_outstanding ⍝ Ask for delivery report
41 | :EndIf
42 | :EndFor
43 |
44 | ⍝ Produce a few messages to the other topics, too
45 | producer.produce_record ⎕NEW #.Record('cars' 'ferrari' 'sportcars')
46 | producer.produce_record ⎕NEW #.Record('plants' 'iris' 'flowers')
47 | ⍝ Ask for delivery report
48 | ⎕DL 0.1
49 | producer.update_outstanding
50 |
51 |
52 | ⍝ Set up the consumer
53 | config←0 2⍴⍬
54 | config⍪←'bootstrap.servers' 'localhost:9092'
55 | config⍪←'client.id' 'bhcgrs3550'
56 | config⍪←'group.id' 'dyalog'
57 | config⍪←'auto.offset.reset' 'earliest' ⍝ Start consuming from the beginning if no offset is found
58 |
59 | topic_list←'animals' 'cars' 'plants'
60 | consumer←⎕NEW #.Consumer config
61 | consumer.subscribe topic_list
62 | ⎕←'Waiting for rebalance before starting consuming...'
63 | ⎕DL 5
64 |
65 | ⍝ Let's drain the topics
66 | :While 0=⊃cr←consumer.consume_record
67 | (2⊃cr).(Topic Payload Key Partition Offset)
68 | :EndWhile
69 |
70 | ⍝ Tidy up
71 | ⎕EX'producer'
72 | ⎕EX'consumer'
73 |
--------------------------------------------------------------------------------
/aplsource/Samples/ManualTPList.aplf:
--------------------------------------------------------------------------------
1 | ManualTPList;start;cr;producer;consumer;bb;so;ef;l;err
2 | ⍝ Simple example to show how to manually create a new topic partition list
3 | ⍝ and produce and consume messages from it.
4 | ⍝ This example also shows how the consumer can be configured by
5 | ⍝ using consumer.configure instead of parsing the config when creating it.
6 |
7 | ⍝ Topic created with, e.g.
8 | ⍝
9 | ⍝ kafka-topics.sh \
10 | ⍝ --bootstrap-server localhost:9092 \
11 | ⍝ --create --topic "animals" \
12 | ⍝ --partitions 3
13 | ⍝
14 |
15 | #.Init'.'
16 |
17 | bb←⎕NEW #.Record('animals' 'Blackbird' 'birds')
18 | so←⎕NEW #.Record('animals' 'Sole' 'fish')
19 | ef←⎕NEW #.Record('animals' 'Elefant' 'mamal')
20 |
21 | ⎕←'Init new producer:'
22 | ⍝ Producer
23 | producer←⎕NEW #.Producer
24 | producer.configure'bootstrap.servers' 'localhost:9092'
25 | producer.configure'client.id' 'martina'
26 |
27 | ⎕←'Produce:'
28 | ⍝ Produce
29 | producer.produce_record bb
30 | producer.produce_record so
31 | producer.produce_record ef
32 |
33 | ⎕←'Ask for delivery report:'
34 | ⎕DL 0.5
35 | producer.delivery_report 10 ⍝ Ask for delivery reports
36 |
37 | ⎕←'Init new consumer and set config:'
38 | ⍝ Consumer configs
39 | consumer←⎕NEW #.Consumer
40 |
41 | consumer.configure'bootstrap.servers' 'localhost:9092'
42 | consumer.configure'client.id' 'martina'
43 | consumer.configure'group.id' 'dyalog'
44 | consumer.configure'auto.offset.reset' 'earliest' ⍝ Start consuming from the beginning if no offset is found
45 |
46 | ⎕←'Create a new topic partition list and set topic:'
47 | ⍝ Manually create a new topic partition list l
48 | l←consumer.topic_partition
49 | ⍝ Set topics (and partition, wich will be ignored when subscribing to it)
50 | consumer.set_topic_partition l'animals' 0
51 | ⎕←'Subscribe consumer to list:'
52 | consumer.subscribe_topic_partition l
53 | ⎕←'Waiting for rebalance before starting consuming...'
54 | ⎕DL 5
55 |
56 | ⍝ Consume
57 | start←3⊃⎕AI
58 | :While 1
59 | cr←consumer.consume_record
60 | :If 1=⊃cr
61 | :AndIf 20000>(3⊃⎕AI)-start
62 | ⎕DL 0.2
63 | :Continue
64 | :ElseIf 0=⊃cr
65 | (2⊃cr).(Topic Payload Key Partition)
66 | start←0
67 | :Else
68 | cr
69 | :Leave
70 | :EndIf
71 | :EndWhile
72 |
73 | ⎕←'Close producer and consumer:'
74 | ⎕EX'producer'
75 | ⎕EX'consumer'
76 |
--------------------------------------------------------------------------------
/aplsource/Samples/SAMPLES.md:
--------------------------------------------------------------------------------
1 | The [Samples](../../aplsource/Samples/) directory contains code samples to show how to produce and consume messages:
2 |
3 | - [Example](../../aplsource/Samples/Example.aplf): a single function showing how to produce messages on multiple topics and consume them.
4 | - [SimpleProducer](../../aplsource/Samples/SimpleConsumer.aplf): produces n+1 messages on a topic. The partitions are assigned depending on the key value (input p is the number of keys), messages with same key will be written into the same partition.
5 | - [SimpleConsumer](../../aplsource/Samples/SimpleConsumer.aplf): consumes messages from a topic.
6 | - [TopicProducer](../../aplsource/Samples/TopicConsumer.aplf): produces 2×n messages on topics "topic1" and "topic2". The partitions are assigned depending on the key value (input p is number of keys), messages with same key will be written into the same partition.
7 | - [TopicConsumer](../../aplsource/Samples/TopicConsumer.aplf): consumes messages from topics "topic1" and "topic2".
8 | - [ConsumerAGroupAB](../../aplsource/Samples/ConsumerAGroupAB.aplf) and [ConsumerBGroupAB](../../aplsource/Samples/ConsumerBGroupAB.aplf): two consumers belonging to the same consumer group (GroupAB) share the messages when consuming from the same topics.
9 | - [ConsumerCGroupC](../../aplsource/Samples/ConsumerCGroupC.aplf): consumer belonging to a different consumer group (GroupC), consumes the queue again, when reading topics already consumed, for example, by ConsumerA and ConsumerB.
10 | - [ConsumerLoop](../../aplsource/Samples/ConsumerLoop.aplf): consumes messages from two topics synchronously, while they are being produced, in a loop. The consumer will stop listening if no message has been consumed within 60 seconds.
11 | - [ConsumerLoopCommit](../../aplsource/Samples/ConsumerLoop.aplf): consumes messages from two topics synchronously in a loop and commit offsets to the server every 20 messages.
12 | - [ProducerLoop](../../aplsource/Samples/ProducerLoop.aplf): produces messages for 120 seconds every 5 seconds on topic1 and topic2.
13 | - [DirectSubscribe](../../aplsource/Samples/DirectSubscribe.aplf): simple example to show how to configure and subscribe to a topic list directly when the consumer is created.
14 | - [ManualTPList](../../aplsource/Samples/ManualTPList.aplf): simple example to show how to manually create a new topic partition list, produce and consume messages from it. This example also shows how the consumer can be configured by using the "consumer.configure" interface.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Dyalog-Kafka
2 |
3 | The aim of the Dyalog-Kafka project is to provide a binding to part of the [Confluent librdkafka](https://github.com/confluentinc/librdkafka) library such that we can access [Kafka](https://kafka.apache.org/) from Dyalog APL.
4 |
5 | > **Note** KAFKA is a [registered trademark](https://kafka.apache.org/trademark) of [The Apache Software Foundation](https://www.apache.org/) and has been licensed for use by [Dyalog Ltd](https://www.dyalog.com/). Dyalog Ltd has no affiliation with and is not endorsed by The Apache Software Foundation.
6 |
7 | ## Build and installation
8 |
9 | ### Precompiled Library
10 | Compiled libraries for supported platforms — AIX, Linux, macOS (arm64 and x64), and Windows — are available on the [Releases](https://github.com/Dyalog/kafka/tags) page.
11 |
12 | ### Building from Source
13 | You can also build the library from source. To do so, clone this repository and ensure you have a C++ development toolchain installed.
14 |
15 | #### Windows
16 |
17 | Open `kafka.sln` in Visual Studio 2022, and build the project.
18 |
19 | Copy all the dlls from the OutDir (`kafka\x64\Debug` for 64 bit debug version) to a directory and `Start Apl` in that directory.
20 |
21 | #### Linux
22 |
23 | To build on linux for 64 bit
24 | ```
25 | PLATFORM=linux ./mk_kafka.sh 64
26 | ```
27 | The output files are in distribution/linux/x64
28 |
29 | #### AIX
30 |
31 | TODO
32 |
33 | #### MacOS
34 |
35 | Install the `librdkafka` via Homebrew:
36 | ```
37 | brew install librdkafka
38 | ```
39 | Build the wrapper:
40 | ```
41 | cd /kafka
42 | c++ -shared -fpic -oMACbin/kafka.dylib -DFORmac kafka/kafka.cpp -lrdkafka -L/opt/homebrew/opt/librdkafka/lib -I/opt/homebrew/opt/librdkafka/include/librdkafka
43 | ```
44 |
45 | or like linux but this requires dotnet to be installed to get the librdkafka package
46 |
47 | ```
48 | PLATFORM=mac ./mk_kafka.sh 64
49 | ```
50 |
51 |
52 | ## Initialising
53 |
54 | Now start Dyalog. You need a running Kafka instance. In the session, type
55 |
56 | ```apl
57 | ]cd path/to/kafka/repo
58 | ]link.create # aplsource
59 | ```
60 |
61 | For users who can not use `]link`, it is possible to create the `kafka.dws` workspace.
62 | The workspace can be built using the ]DBuild tool and saving it
63 | ```apl
64 | ]DBuild path/to/kafka.dyalogbuild
65 | )WSID
66 | )SAVE
67 | ```
68 | and then `)LOAD kafka.dws`.
69 |
70 | Finally, initialise the library, passing as the argument the path where the `kafka.[so|a|dylib]` shared library was installed
71 | ```apl
72 | Init 'path/to/dir/housing/shared/lib' ⍝ to load the dll
73 | ```
74 |
75 | For further instructions, see the file [SPEC.md](SPEC.md), and sample code examples in [Samples](aplsource/Samples/).
76 |
--------------------------------------------------------------------------------
/kafka/kafka.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #if defined(FORmac) || defined(FORlinux) || defined(FORaix)
3 | #define LIBRARY_API __attribute__((visibility("default")))
4 | #endif
5 |
6 | #if defined(FORWIN)
7 | #ifdef KAFKA_EXPORTS
8 | # define LIBRARY_API __declspec(dllexport)
9 | #else
10 | # define LIBRARY_API __declspec(dllimport)
11 | #endif
12 | #endif
13 | extern "C"
14 | {
15 | /* Version
16 | * get the textual version and the native version number of Kafka
17 | * args
18 | * version buffer for the textual representation of the version of kafka
19 | * len size of the buffer provided
20 | *
21 | * return kafka version in ative format (HEX)
22 | */
23 | LIBRARY_API int Version(char* version, int len);
24 |
25 | /* Describe
26 | * Describes the functions exposed from the sharedlibrary
27 | *
28 | * args
29 | * buffer space the the json description of the functions exported
30 | * psize size of the buffer provide in entry and size of space needed on exit
31 | */
32 | LIBRARY_API int32_t Describe(char* buffer, int32_t* psize);
33 |
34 |
35 | LIBRARY_API int InitKafka(void** kafka);
36 | //LIBRARY_API int UninitKafka(void* kafka);
37 | LIBRARY_API int UninitProducer(void* prod);
38 | LIBRARY_API int UninitConsumer(void* cons);
39 |
40 | LIBRARY_API int SetKafkaConf(void* kafka, char* key, char* val, char* errtxt, int *plen);
41 |
42 | LIBRARY_API int NewTopicConf(void** topicconf);
43 | LIBRARY_API int DelTopicConf(void* topicconf);
44 | LIBRARY_API int SetTopicConf(void* topicconf, char* key, char* val, char* errtxt, int *plen);
45 |
46 | //LIBRARY_API int CreateTopic(void** topic, void* kafka, char* topic_name, void* topic_conf);
47 | LIBRARY_API int NewTopicPartitionList(void** subscr);
48 | LIBRARY_API int SetTopicPartitionList(void* subscr, char* topic, int32_t partition);
49 | LIBRARY_API int SetOffset(void* subscr, char* topic, int32_t partition, int64_t offset);
50 |
51 | LIBRARY_API int SubscribeConsumerTPList(void* kafka, void* subscr, char* errtxt, int *plen);
52 | LIBRARY_API int Produce(void* prod, char* topic, char* payload, uint32_t paylen, char* key, uint32_t keylen, int32_t partition, uint64_t* msgid, char* errtxt, int *plen);
53 |
54 | LIBRARY_API int Consume(void* cons, char* topic, uint32_t* topiclen, char* payload, uint32_t* paylen, char* key, uint32_t* keylen, int32_t* partition, int64_t* offset, char* errtxt, int *plen);
55 | LIBRARY_API int Commit(void* cons, void* subscr);
56 | LIBRARY_API int Committed(void* cons, void* subscr, int64_t* offset);
57 |
58 | LIBRARY_API int DeliveryReport(void* prod, unsigned long long* msgid, int* err, int* plength);
59 | LIBRARY_API int DRMessageError(int* err, char* errtxt, int *plen);
60 | // Do we need these?
61 | }
62 |
--------------------------------------------------------------------------------
/aplsource/Producer.aplc:
--------------------------------------------------------------------------------
1 | :Class Producer
2 | :field public shared kafka
3 | :field private prod
4 | :field private outstanding
5 |
6 | ⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝
7 | ⍝ Constructor
8 | ∇ make
9 | :Access Public
10 | :Implements constructor
11 | prod←kafka.new
12 | outstanding←0 2⍴0
13 | ∇
14 |
15 | ∇ make_config config;i;opt;val
16 | :Access Public
17 | :Implements constructor
18 | prod←kafka.new
19 | :If (2=⍴⍴config)∧(2=¯1↑⍴config)
20 | configure¨↓config
21 | :Else
22 | ⍝ todo error
23 | :EndIf
24 | outstanding←0 2⍴0
25 | ∇
26 |
27 | ⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝
28 | ⍝ Destructor
29 | ∇ unmake;_
30 | :Implements destructor
31 | _←kafka.UninitProducer prod
32 | ∇
33 |
34 | ⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝
35 | ⍝ Configuration Settings
36 | ∇ r←configure(opt val)
37 | :Access public
38 | r←kafka.setconf(prod opt val)
39 | ∇
40 |
41 | ⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝
42 | ⍝ Producer
43 |
44 |
45 | ∇ r←produce arg;topic;payload;key;partition;z;msgid;err
46 | (topic payload key partition)←4↑ arg, (≢arg)↓ '' '' '' ¯1
47 | :Access public
48 | (z msgid err)←kafka.produce prod topic payload key partition
49 | :If z=0
50 | outstanding⍪←msgid 0
51 | r←z,msgid
52 | :Else
53 | :If 0≠⍴err
54 | r←z,msgid,'ERROR START PRODUCER: ',err
55 | :Else
56 | r←z,msgid,'ERROR: ',2⊃parse_error z
57 | :EndIf
58 | :EndIf
59 | ∇
60 |
61 |
62 | ∇ r←produce_record record;err;msgid;z
63 | :Access public
64 | r←produce record.asArg
65 | ∇
66 |
67 | ⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝
68 | ⍝ Delivery reports
69 | ∇ r←delivery_report length;len;err;msgid;z
70 | :Access public
71 | (z msgid err len)←kafka.delivery prod length
72 | r←z((len↑msgid),[1.5](len↑err))
73 | ∇
74 |
75 | ∇ r←update_outstanding;err;del;ok;ix;outstanding_err
76 | :Access public
77 | :While 1
78 | (err del)←delivery_report 100
79 |
80 | :If 0=err
81 | :AndIf 0<≢del
82 | ok←0=del[;2]
83 | outstanding←(~outstanding[;1]∊ok/del[;1])⌿outstanding
84 | ix←(outstanding[;1]⍳(~ok)/del[;1])~1+≢outstanding
85 | outstanding[ix;2]←(~ok)/del[;2]
86 | :Else
87 | :Leave
88 | :EndIf
89 | :EndWhile
90 | outstanding_err←outstanding
91 | outstanding←(0=outstanding[;2])⌿outstanding
92 | r←err outstanding_err
93 | ∇
94 |
95 | ∇ r←delivery_report_err length;len;err;msgid;z
96 | :Access public
97 | (z msgid err len)←kafka.delivery prod length
98 |
99 | r←z,((len↑msgid),[1.5](len↑err))
100 | r←r[{(,⍵)/,⍳⍴⍵}0≠r[;3];]
101 | ∇
102 |
103 |
104 | ∇ r←parse_error errid
105 | :Access public
106 | r←kafka.errormsg(errid)
107 | ∇
108 |
109 | ⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝ Helpers
110 | ∇ r←get_producer
111 | :Access public
112 | r←prod
113 | ∇
114 |
115 | :EndClass
116 |
117 |
--------------------------------------------------------------------------------
/kafka.make:
--------------------------------------------------------------------------------
1 | KAFKA_MODS := \
2 | kafka \
3 |
4 | ifeq ($(ARCH),x86_64)
5 | ARCH=x64
6 | endif
7 |
8 | KAFKA=$(PWD)
9 |
10 | DIST=$(KAFKA)/distribution/$(PLATFORM)/$(ARCH)/$(BITS)
11 |
12 | BIN=$(KAFKA)/$(PLATFORM)$(ARCH)$(BITS)bin
13 |
14 |
15 | ifeq ($(PLATFORM),WIN)
16 |
17 | else ifeq ($(PLATFORM),linux)
18 | CC=gcc
19 | CPP=g++
20 | KAFKAINC=~/.nuget/packages/librdkafka.redist/2.5.0/build/native/include/librdkafka
21 | KAFKALIBS=~/.nuget/packages/librdkafka.redist/2.5.0/runtimes/linux-$(ARCH)/native/librdkafka.so
22 | KAFKARDLICENSE=~/.nuget/packages/librdkafka.redist/2.5.0/LICENSES.txt
23 | EXT=so
24 | KAFKABINSRC=nuget
25 | else ifeq ($(PLATFORM),aix)
26 | KAFKACFLAGS=-m$(BITS)
27 | KAFKALDFLAGS=-m$(BITS)
28 | KAFKAEXTLIBS=-lssl -lcrypto
29 | CC=ibm-clang_r
30 | CPP=ibm-clang++_r
31 | KAFKAINC=$(KAFKA)/librdkafka$(BITS)/src
32 | KAFKALIBS=$(KAFKA)/librdkafka$(BITS)/src/librdkafka.a
33 | EXT=so
34 | KAFKABINSRC=build
35 | KAFKARDLICENSE=$(KAFKA)/librdkafka$(BITS)/LICENSES.txt
36 | else ifeq ($(PLATFORM),mac)
37 | CC=cc
38 | CPP=c++
39 | KAFKAINC=~/.nuget/packages/librdkafka.redist/2.5.0/build/native/include/librdkafka
40 | KAFKALIBS=~/.nuget/packages/librdkafka.redist/2.5.0/runtimes/osx-$(ARCH)/native/librdkafka.dylib
41 | KAFKARDLICENSE=~/.nuget/packages/librdkafka.redist/2.5.0/LICENSES.txt
42 | EXT=dylib
43 | KAFKABINSRC=nuget
44 | else
45 | CC=cc
46 | CPP=c++
47 | endif
48 |
49 |
50 | KAFKA_OBJS:= $(KAFKA_MODS:%=$(BIN)/%.o)
51 |
52 | all: $(DIST)/kafka.$(EXT) $(DIST)/librdkafka.$(EXT) $(DIST)/LICENSES.librdkafka
53 |
54 |
55 | $(BIN)/kafka.$(EXT): $(KAFKA_OBJS) $(KAFKALIBS)
56 | $(CPP) $(KAFKALDFLAGS) -shared -o $@ $(KAFKA_OBJS) $(KAFKALIBS) $(KAFKAEXTLIBS)
57 |
58 | $(BIN)/%.o: kafka/%.cpp $(BIN) $(KAFKAINC)
59 | $(CPP) $(KAFKACFLAGS) -c -o $@ -DFOR$(PLATFORM) -I $(KAFKAINC) -fpic $<
60 |
61 | $(BIN):
62 | mkdir -p $@
63 |
64 | $(DIST):
65 | mkdir -p $@
66 |
67 | $(DIST)/kafka.$(EXT): $(DIST) $(BIN)/kafka.$(EXT)
68 | cp $(BIN)/kafka.$(EXT) $@
69 |
70 | $(DIST)/librdkafka.$(EXT): $(KAFKALIBS)
71 | cp $< $@
72 | cp $(KAFKARDLICENSE) $(DIST)/LICENSES.librdkafka
73 |
74 | $(DIST)/LICENSES.librdkafka: $(KAFKARDLICENSE)
75 | cp $< $@
76 |
77 |
78 | $(KAFKAINC): $(KAFKALIBS)
79 |
80 |
81 | $(KAFKALIBS): $(KAFKA)/$(KAFKABINSRC).pseudo
82 |
83 | $(KAFKA)/librdkafka$(BITS):
84 | git clone -b dyalog-build git@github.com:Dyalog/librdkafka $(KAFKA)/librdkafka$(BITS)
85 |
86 |
87 | $(KAFKA)/build.pseudo: $(KAFKA)/librdkafka$(BITS)
88 | cd $(KAFKA)/librdkafka$(BITS) && ./configure --prefix=$(KAFKA)/kafkalib$(BITS) --install-deps --cc=ibm-clang_r --cxx=ibm-clang++_r --CFLAGS="-D__aix" --mbits=$(BITS) --ARFLAGS=-X$(BITS) --LDFLAGS=" -lssl -lcrypto"
89 | cd $(KAFKA)/librdkafka$(BITS) && make libs
90 | touch $(KAFKA)/build.pseudo
91 |
92 | $(KAFKA)/nuget.pseudo:
93 | cd $(BIN) && dotnet new classlib --name kafka -o . --force
94 | cd $(BIN) && dotnet add package librdkafka.redist --version 2.5.0
95 | cd $(BIN) && dotnet publish
96 | touch $(KAFKA)/nuget.pseudo
97 |
98 | $(BIN)/librdkafka.$(EXT) : $(KAFKALIBS)
99 | cp $< $@
100 |
101 | clean:
102 | rm -rf $(BIN)
103 | rm -rf $(DIST)
104 | rm -rf $(KAFKA)/librdkafka$(BITS)
105 | rm -rf $(KAFKA)/build.pseudo $(KAFKA)/nuget.pseudo
106 |
--------------------------------------------------------------------------------
/aplsource/Consumer.aplc:
--------------------------------------------------------------------------------
1 | :Class Consumer
2 | :field public shared kafka
3 | :field private cons
4 | :field private topic_partition_list
5 |
6 | ⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝
7 | ⍝ Constructor
8 |
9 | ∇ make
10 | :Access Public
11 | :Implements constructor
12 | cons←kafka.new
13 | ∇
14 |
15 | ∇ make_config config;i;opt;val
16 | :Access Public
17 | :Implements constructor
18 | cons←kafka.new
19 | :If (2=⍴⍴config)∧(2=¯1↑⍴config)
20 | configure¨↓config
21 | :Else
22 | ⍝ todo error
23 | :EndIf
24 | ∇
25 |
26 | ∇ make_config_topic(config topic_list);i;opt;val
27 | :Access Public
28 | :Implements constructor
29 | cons←kafka.new
30 | :If (2=⍴⍴config)∧(2=¯1↑⍴config)
31 | configure¨↓config
32 | :Else
33 | ⍝ todo error
34 | :EndIf
35 | subscribe topic_list
36 | ∇
37 |
38 | ⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝
39 | ⍝ Destructor
40 |
41 | ∇ unmake;_
42 | :Implements destructor
43 | _←kafka.UninitConsumer cons
44 | ∇
45 |
46 | ⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝
47 | ⍝ Configuration Settings
48 |
49 | ∇ r←configure(opt val)
50 | :Access public
51 | r←kafka.setconf cons opt val
52 | ∇
53 |
54 | ⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝
55 | ⍝ Set topic and partition given a list name.
56 | ⍝ NB: it takes topic name, not topic handle
57 |
58 | ∇ r←topic_partition
59 | :Access public
60 | r←kafka.new_topic_partition
61 | ∇
62 |
63 | ∇ r←set_topic_partition(topic_partition_list topicname partition)
64 | :Access public
65 | r←kafka.set_topic_partition topic_partition_list topicname partition
66 | ∇
67 |
68 | ∇ r←set_offset(topic_partition_list topicname partition offset)
69 | :Access public
70 | r←kafka.set_offset topic_partition_list topicname partition offset
71 | ∇
72 |
73 | ∇ r←subscribe_topic_partition topic_partition_list
74 | :Access public
75 | r←kafka.subscribe_topic_partition cons topic_partition_list
76 | ∇
77 |
78 | ∇ r←subscribe topic_list;topic;topic_partition_list;i
79 | :Access public
80 |
81 | :If 0≠≢topic_list
82 | topic_partition_list←topic_partition
83 | set_topic_partition¨((topic_partition_list,¨⊂¨(,⊆topic_list)~⊂''),¨⊂0)
84 | r←subscribe_topic_partition topic_partition_list
85 | :Else
86 | ⍝ Throw error
87 | :EndIf
88 | ∇
89 |
90 | ⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝
91 | ⍝ Consumer
92 | ∇ r←consume
93 | :Access public
94 | r←kafka.consume cons 200 2048 200
95 | ∇
96 |
97 | ∇ r←consume_record
98 | :Access public
99 | r←kafka.consume cons 200 2048 200
100 | :If 0=⊃r
101 | r←0(⎕NEW #.Record(r[2 3 4 5 6]))
102 | :EndIf
103 | ∇
104 |
105 | ∇ r←commit
106 | :Access public
107 | r←kafka.commit cons 0
108 | ∇
109 |
110 | ∇ r←committed(topic partition);topic_partition_list
111 | :Access public
112 | topic_partition_list←topic_partition
113 | r←set_topic_partition(topic_partition_list topic partition)
114 | r←kafka.committed cons topic_partition_list
115 | ∇
116 | ⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝⍝
117 | ⍝ Helpers
118 | ∇ r←get_consumer
119 | :Access public
120 | r←cons
121 | ∇
122 |
123 | :EndClass
124 |
125 |
126 |
--------------------------------------------------------------------------------
/aplsource/Tests/TestTopics.aplf:
--------------------------------------------------------------------------------
1 | TestTopics;config;consumer;start;consumed_msgs;cr;_;producer;produced_msgs;i;msgs;t_consumed_msgs;s_consumed_msgs;t_produced_msgs;s_produced_msgs
2 | ⍝ Produce 15 msgs on topic1 and 20 on topic2
3 | ⍝ Consume all the msgs in the queue and check that
4 | ⍝ the consumed msgs are the same and
5 | ⍝ in the same order within each key.
6 |
7 | #.Init'.'
8 |
9 | ⍝ Consumer configurations
10 | config←0 2⍴⍬
11 | config⍪←'bootstrap.servers' 'localhost:9092'
12 | config⍪←'client.id' 'consumerclient'
13 | config⍪←'group.id' 'consumergroup'
14 | config⍪←'auto.offset.reset' 'earliest'
15 | ⎕←'Init new consumer with config:'
16 | config
17 | consumer←⎕NEW #.Consumer config
18 |
19 | ⍝ Subscribe consumer to topic
20 | ⎕←'Subscribe consumer to topics "topic1" and "topic2":'
21 | consumer.subscribe'topic1' 'topic2'
22 | ⎕←'Waiting for rebalance before starting consuming...'
23 | ⎕DL 5
24 |
25 | ⎕←'Consume the queue before producing messages for testing:'
26 | start←3⊃⎕AI
27 | consumed_msgs←⍬
28 | :While 1
29 | cr←consumer.consume_record
30 | :If 1=⊃cr
31 | :AndIf 20000>(3⊃⎕AI)-start
32 | :Continue
33 | :ElseIf 0=⊃cr
34 | _←⊂(2⊃cr).(Topic Payload Key Partition)
35 | start←0
36 | :Else
37 | cr
38 | :Leave
39 | :EndIf
40 | :EndWhile
41 |
42 | ⍝ Producer configurations
43 | config←0 2⍴⍬
44 | config⍪←'bootstrap.servers' 'localhost:9092'
45 | config⍪←'client.id' 'producerclient'
46 | ⎕←'Init new producer with config:'
47 | config
48 | ⍝ Init new Producer
49 | producer←⎕NEW #.Producer config
50 | ⎕←'Produce messages on topic1:'
51 | ⍝ Produce bundled messages on topic and ask for dr
52 | produced_msgs←⍬
53 | :For i :In ⍳15
54 | msgs←'topic1'('payload',⍕i)('key',⍕5|i)
55 | producer.produce_record ⎕NEW #.Record(msgs)
56 | produced_msgs,←⊂msgs
57 | :If 0=10|i
58 | ⎕←'Outstanding messages:'
59 | ⎕DL 0.5
60 | producer.update_outstanding
61 | :EndIf
62 | :EndFor
63 | ⎕←'Outstanding messages:'
64 | ⎕DL 0.5
65 | producer.update_outstanding
66 |
67 | ⎕←'Produce messages on topic2:'
68 | :For i :In ⍳20
69 | msgs←'topic2'('payload',⍕i)('key',⍕5|i)
70 | producer.produce_record ⎕NEW #.Record(msgs)
71 | produced_msgs,←⊂msgs
72 | :If 0=10|i
73 | ⎕←'Outstanding messages:'
74 | ⎕DL 0.5
75 | producer.update_outstanding
76 | :EndIf
77 | :EndFor
78 |
79 | ⎕←'Close producer:'
80 | ⎕EX'producer'
81 |
82 | ⎕←'Now consume the testing queue:'
83 | start←3⊃⎕AI
84 | consumed_msgs←⍬
85 | :While 1
86 | cr←consumer.consume_record
87 | :If 1=⊃cr
88 | :AndIf 20000>(3⊃⎕AI)-start
89 | :Continue
90 | :ElseIf 0=⊃cr
91 | consumed_msgs,←⊂(2⊃cr).(Topic Payload Key Partition)
92 | start←0
93 | :Else
94 | cr
95 | :Leave
96 | :EndIf
97 | :EndWhile
98 |
99 | ⎕←'Close consumer:'
100 | ⎕EX'consumer'
101 |
102 | ⍝ Sort first by topic and then by key. The order is guaranteed in key
103 | t_consumed_msgs←1{⍵[⍋⍵[;⍺];]}↑¯1↓¨consumed_msgs
104 | s_consumed_msgs←3{⍵[⍋⍵[;⍺];]}t_consumed_msgs
105 |
106 | t_produced_msgs←1{⍵[⍋⍵[;⍺];]}↑produced_msgs
107 | s_produced_msgs←3{⍵[⍋⍵[;⍺];]}t_produced_msgs
108 |
109 | 'Produced and consumed same messages in the correct order from two topics'ASSERT s_consumed_msgs≡s_produced_msgs
110 |
--------------------------------------------------------------------------------
/Jenkinsfile:
--------------------------------------------------------------------------------
1 | pipeline {
2 | agent none
3 | environment {
4 | SVNCREDS = credentials('83601f39-7c4c-4a13-a0d3-59fcda340753')
5 | }
6 | stages {
7 | stage('Parallel build') {
8 | parallel {
9 | stage('Build on Mac Intel') {
10 | agent {
11 | label 'mac && x86 && build && 20.0'
12 | }
13 | steps {
14 | sh '''#!/bin/bash
15 | set -e
16 | echo "====== macOS: starting build"
17 | export BITS=64
18 |
19 | PLATFORM=mac ./mk_kafka.sh $BITS
20 | echo "====== macOS: finished build"
21 | '''
22 | stash name: 'dist-mac', includes: 'distribution/mac/x64/'
23 | }
24 | }
25 | stage('Build on Mac Arm') {
26 | agent {
27 | label 'mac && arm && build && 20.0'
28 | }
29 | steps {
30 | sh '''#!/bin/bash
31 | set -e
32 | echo "====== macOS: starting build"
33 | export BITS=64
34 |
35 | PLATFORM=mac ./mk_kafka.sh $BITS
36 | echo "====== macOS: finished build"
37 | '''
38 | stash name: 'dist-mac_arm', includes: 'distribution/mac/arm64/'
39 | }
40 | }
41 | stage('Build on AIX') {
42 | agent {
43 | label 'p9-7217'
44 | }
45 | steps {
46 | sh '''#!/bin/bash
47 | set -e
48 | echo "====== AIX: starting build"
49 | export PATH=/opt/freeware/bin:$PATH
50 |
51 | PLATFORM=aix ./mk_kafka.sh 64
52 | PLATFORM=aix ./mk_kafka.sh 32
53 | echo "====== AIX: finished build"
54 | '''
55 | stash name: 'dist-aix', includes: 'distribution/aix/'
56 | }
57 | }
58 | stage('Build on Linux') {
59 | agent {
60 | docker {
61 | image 'dyalogci/ubuntu:20.04-build'
62 | registryCredentialsId '0435817a-5f0f-47e1-9dcc-800d85e5c335'
63 | args '-v /devt:/devt'
64 | }
65 | }
66 | steps {
67 | sh '''#!/bin/bash
68 | set -e
69 | echo "====== Linux: starting build"
70 | export BITS=64
71 |
72 | PLATFORM=linux ./mk_kafka.sh $BITS
73 | echo "====== Linux: finished build"
74 | '''
75 | stash name: 'dist-linux', includes: 'distribution/linux/x64/'
76 | }
77 | }
78 | stage('Build on Windows') {
79 | agent {
80 | label 'win && build && 20.0'
81 | }
82 | steps {
83 | bat 'kafkaBuild.bat'
84 | stash name: 'dist-win', includes: 'distribution/win/'
85 | }
86 | }
87 | }
88 | }
89 | stage('Publish') {
90 | agent {
91 | docker {
92 | image 'dyalogci/node:lts'
93 | registryCredentialsId '0435817a-5f0f-47e1-9dcc-800d85e5c335'
94 | args '-v /devt:/devt'
95 | }
96 | }
97 | environment {
98 | GHTOKEN = credentials('250bdc45-ee69-451a-8783-30701df16935')
99 | }
100 | steps {
101 | unstash 'dist-win'
102 | unstash 'dist-mac'
103 | unstash 'dist-mac_arm'
104 | unstash 'dist-linux'
105 | unstash 'dist-aix'
106 | sh './CI/publish.sh'
107 | sh './CI/gh-release.sh'
108 | }
109 | }
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/aplsource/Tests/TestCommit.aplf:
--------------------------------------------------------------------------------
1 | TestCommit;config;consumer;start;consumed_msgs;cr;_;committed_beforep0;committed_beforep1;producer;produced_msgs;i;msgs;commit_count;count_msg;msgs_p0;msgs_p1;committed_afterp0;committed_afterp1
2 | ⍝ This test needs a topic "topic" with exactly two partitions.
3 | ⍝ Produce 15 msgs on topic (with two partitions)
4 | ⍝ Consume all the msgs in the queue and check that
5 | ⍝ the offset has been correctly committed
6 |
7 | #.Init'.'
8 |
9 | ⍝ Consumer configurations
10 | config←0 2⍴⍬
11 | config⍪←'bootstrap.servers' 'localhost:9092'
12 | config⍪←'client.id' 'consumerclient'
13 | config⍪←'group.id' 'consumergroup'
14 | config⍪←'auto.offset.reset' 'earliest'
15 | config⍪←'enable.auto.commit' 'false'
16 | ⎕←'Init new consumer with config:'
17 | config
18 |
19 | ⍝ Init new Consumer
20 | consumer←⎕NEW #.Consumer config
21 |
22 | ⍝ Subscribe consumer to topic
23 | ⎕←'Subscribe consumer to topic "topic":'
24 | consumer.subscribe'topic'
25 |
26 | ⎕←'Consume the queue before producing messages for testing:'
27 | start←3⊃⎕AI
28 | consumed_msgs←⍬
29 | :While 1
30 | cr←consumer.consume_record
31 | :If 1=⊃cr
32 | :AndIf 20000>(3⊃⎕AI)-start
33 | :Continue
34 | :ElseIf 0=⊃cr
35 | _←⊂(2⊃cr).(Topic Payload Key Partition)
36 | start←0
37 | :Else
38 | cr
39 | :Leave
40 | :EndIf
41 | :EndWhile
42 | ⍝ Commit
43 | ⎕←'Commit offset:'
44 | consumer.commit
45 |
46 | ⍝ Get the last committed offset. ¯1001 means RD_KAFKA_OFFSET_INVALID
47 | ⍝ from the doc "in case there was no stored offset for that partition".
48 | :If 0=⊃committed_beforep0←consumer.committed'topic' 0
49 | :If ¯1001=2⊃committed_beforep0
50 | committed_beforep0[2]←0 ⍝ So we set to 0
51 | :EndIf
52 | :Else
53 | ⎕←'Error in getting the committed offset for partition 0. Error code: ',⍕⊃committed_beforep0
54 | :Return
55 | :EndIf
56 | :If 0=⊃committed_beforep1←consumer.committed'topic' 1
57 | :If ¯1001=2⊃committed_beforep1
58 | committed_beforep1[2]←0
59 | :EndIf
60 | :Else
61 | ⎕←'Error in getting the committed offset for partition 1. Error code: ',⍕⊃committed_beforep1
62 | :Return
63 | :EndIf
64 |
65 |
66 | ⍝ Producer configurations
67 | config←0 2⍴⍬
68 | config⍪←'bootstrap.servers' 'localhost:9092'
69 | config⍪←'client.id' 'producerclient'
70 | ⎕←'Init new producer with config:'
71 | config
72 |
73 | ⍝ Init new Producer
74 | producer←⎕NEW #.Producer config
75 |
76 | ⎕←'Produce messages on topic:'
77 | ⍝ Produce bundled messages on topic and ask for dr
78 | produced_msgs←⍬
79 | :For i :In ⍳20
80 | msgs←'topic'('payload',⍕i)('key',⍕5|i) ⍝ Producing on topic, on two partitions
81 | producer.produce_record ⎕NEW #.Record(msgs)
82 | produced_msgs,←⊂msgs
83 | :If 0=10|i
84 | ⎕←'Outstanding messages:'
85 | ⎕DL 0.5
86 | producer.update_outstanding
87 | :EndIf
88 | :EndFor
89 |
90 | ⎕←'Close producer:'
91 | ⎕EX'producer'
92 |
93 | ⎕←'Now consume the testing queue:'
94 | start←3⊃⎕AI
95 | commit_count←5 ⍝ commit every 5 consumed messages
96 | count_msg←0
97 | consumed_msgs←⍬
98 | :While 1
99 | cr←consumer.consume_record
100 | :If 1=⊃cr
101 | :AndIf 20000>(3⊃⎕AI)-start
102 | :Continue
103 | :ElseIf 0=⊃cr
104 | consumed_msgs,←⊂(2⊃cr).(Topic Payload Key Partition Offset)
105 | count_msg←count_msg+1
106 | :If 0=commit_count|count_msg
107 | ⎕←'Commit consumer:'
108 | consumer.commit
109 | :EndIf
110 | :Else
111 | cr
112 | :Leave
113 | :EndIf
114 | :EndWhile
115 |
116 | ⎕←'Commit consumer one last time:'
117 | consumer.commit
118 |
119 | msgs_p0←+/~4⌷[2]↑consumed_msgs
120 | msgs_p1←+/4⌷[2]↑consumed_msgs
121 |
122 | :If 0≠⊃committed_afterp0←consumer.committed'topic' 0
123 | ⎕←'Error in getting the committed offset for partition 0. Error code: ',⍕⊃committed_afterp0
124 | :Return
125 | :EndIf
126 | :If 0≠⊃committed_afterp1←consumer.committed'topic' 1
127 | ⎕←'Error in getting the committed offset for partition 1. Error code: ',⍕⊃committed_afterp1
128 | :Return
129 | :EndIf
130 |
131 | ⎕←'Close consumer:'
132 | ⎕EX'consumer'
133 |
134 | 'Manually commit offset on partition 0 and 1 of topic="topic"'ASSERT(msgs_p0=committed_afterp0[2]-committed_beforep0[2])∧msgs_p1=committed_afterp1[2]-committed_beforep1[2]
135 |
--------------------------------------------------------------------------------
/aplsource/Tests/TestSameGroup.aplf:
--------------------------------------------------------------------------------
1 | TestSameGroup;config;consumerA;consumerB;start;consumed_msgsA;cr;consumed_msgsB;_;producer;produced_msgs;msgs;i;consumed_msgsAB;t_consumed_msgsAB;s_consumed_msgsAB;t_produced_msgs;s_produced_msgs
2 | ⍝ Produce 15 msgs on topic1 and 20 on topic2
3 | ⍝ Consume all the msgs in the queue from two consumers
4 | ⍝ and check that the consumers consumes the produced
5 | ⍝ msgs and that they are the same and
6 | ⍝ in the same order within keys.
7 |
8 | #.Init'.'
9 |
10 | ⍝ Consumer configurations
11 | config←0 2⍴⍬
12 | config⍪←'bootstrap.servers' 'localhost:9092'
13 | config⍪←'client.id' 'consumerAclient'
14 | config⍪←'group.id' 'consumerABgroup'
15 | config⍪←'auto.offset.reset' 'earliest'
16 | ⎕←'Init new consumerA with config:'
17 | config
18 |
19 | ⍝ Init new Consumer
20 | consumerA←⎕NEW #.Consumer config
21 |
22 |
23 | ⍝ Consumer configurations
24 | config←0 2⍴⍬
25 | config⍪←'bootstrap.servers' 'localhost:9092'
26 | config⍪←'client.id' 'consumerBclient'
27 | config⍪←'group.id' 'consumerABgroup'
28 | config⍪←'auto.offset.reset' 'earliest'
29 | ⎕←'Init new consumerB with config:'
30 | config
31 |
32 | ⍝ Init new Consumer
33 | consumerB←⎕NEW #.Consumer config
34 |
35 | ⍝ Subscribe consumer to topic
36 | ⎕←'Subscribe consumers to topics "topic1" and "topic2":'
37 | consumerA.subscribe'topic1' 'topic2'
38 | consumerB.subscribe'topic1' 'topic2'
39 | ⎕←'Waiting for rebalance before starting consuming...'
40 | ⎕DL 5
41 |
42 | ⎕←'Consume the queueA before producing:'
43 | start←3⊃⎕AI
44 | consumed_msgsA←⍬
45 | :While 1
46 | cr←consumerA.consume_record
47 | :If 1=⊃cr
48 | :AndIf 20000>(3⊃⎕AI)-start
49 | :Continue
50 | :ElseIf 0=⊃cr
51 | _←⊂(2⊃cr).(Topic Payload Key Partition)
52 | start←0
53 | :Else
54 | cr
55 | :Leave
56 | :EndIf
57 | :EndWhile
58 |
59 | ⎕←'Consume the queueB before producing:'
60 | start←3⊃⎕AI
61 | consumed_msgsB←⍬
62 | :While 1
63 | cr←consumerB.consume_record
64 | :If 1=⊃cr
65 | :AndIf 20000>(3⊃⎕AI)-start
66 | :Continue
67 | :ElseIf 0=⊃cr
68 | _←⊂(2⊃cr).(Topic Payload Key Partition)
69 | start←0
70 | :Else
71 | cr
72 | :Leave
73 | :EndIf
74 | :EndWhile
75 |
76 | ⍝ Producer configurations
77 | config←0 2⍴⍬
78 | config⍪←'bootstrap.servers' 'localhost:9092'
79 | config⍪←'client.id' 'producerclient'
80 | ⎕←'Init new producer with config:'
81 | config
82 |
83 | ⍝ Init new Producer
84 | producer←⎕NEW #.Producer config
85 |
86 | ⍝ Produce bundled messages on topic and ask for dr
87 | ⎕←'Produce messages on topic1:'
88 | produced_msgs←⍬
89 | :For i :In ⍳15
90 | msgs←'topic1'('payload',⍕i)('key',⍕5|i)
91 | producer.produce_record ⎕NEW #.Record(msgs)
92 | produced_msgs,←⊂msgs
93 | :If 0=10|i
94 | ⎕←'Outstanding messages:'
95 | ⎕DL 0.5
96 | producer.update_outstanding
97 | :EndIf
98 | :EndFor
99 | ⎕←'Outstanding messages:'
100 | ⎕DL 0.5
101 | producer.update_outstanding
102 |
103 | ⎕←'Produce messages on topic2:'
104 | :For i :In ⍳20
105 | msgs←'topic2'('payload',⍕i)('key',⍕5|i)
106 | producer.produce_record ⎕NEW #.Record(msgs)
107 | produced_msgs,←⊂msgs
108 | :If 0=10|i
109 | ⎕←'Outstanding messages:'
110 | ⎕DL 0.5
111 | producer.update_outstanding
112 | :EndIf
113 | :EndFor
114 |
115 | ⎕←'Close producer:'
116 | ⎕EX'producer'
117 |
118 | ⎕←'ConsumerA consumes the testing queue:'
119 | start←3⊃⎕AI
120 | consumed_msgsA←⍬
121 | :While 1
122 | cr←consumerA.consume_record
123 | :If 1=⊃cr
124 | :AndIf 20000>(3⊃⎕AI)-start
125 | :Continue
126 | :ElseIf 0=⊃cr
127 | consumed_msgsA,←⊂(2⊃cr).(Topic Payload Key Partition)
128 | start←0
129 | :Else
130 | cr
131 | :Leave
132 | :EndIf
133 | :EndWhile
134 |
135 | ⎕←'ConsumerB consumes the testing queue:'
136 | start←3⊃⎕AI
137 | consumed_msgsB←⍬
138 | :While 1
139 | cr←consumerB.consume_record
140 | :If 1=⊃cr
141 | :AndIf 20000>(3⊃⎕AI)-start
142 | :Continue
143 | :ElseIf 0=⊃cr
144 | consumed_msgsB,←⊂(2⊃cr).(Topic Payload Key Partition)
145 | start←0
146 | :Else
147 | cr
148 | :Leave
149 | :EndIf
150 | :EndWhile
151 |
152 | ⎕←'Close consumerA and consumerB:'
153 | ⎕EX'consumerA'
154 | ⎕EX'consumerB'
155 |
156 | ⍝ Sort first by topic and then by key. The order is guaranteed in key
157 | consumed_msgsAB←consumed_msgsA,consumed_msgsB
158 | t_consumed_msgsAB←1{⍵[⍋⍵[;⍺];]}↑¯1↓¨consumed_msgsAB
159 | s_consumed_msgsAB←3{⍵[⍋⍵[;⍺];]}t_consumed_msgsAB
160 |
161 | t_produced_msgs←1{⍵[⍋⍵[;⍺];]}↑produced_msgs
162 | s_produced_msgs←3{⍵[⍋⍵[;⍺];]}t_produced_msgs
163 |
164 | 'Produced and consumed messages from within the same consumer group'ASSERT s_consumed_msgsAB≡s_produced_msgs
165 |
--------------------------------------------------------------------------------
/aplsource/Tests/TestDiffGroup.aplf:
--------------------------------------------------------------------------------
1 | TestDiffGroup;config;consumerA;consumerB;start;consumed_msgsA;cr;consumed_msgsB;_;producer;produced_msgs;msgs;i;t_consumed_msgsA;s_consumed_msgsA;t_consumed_msgsB;s_consumed_msgsB;t_produced_msgs;s_produced_msgs
2 | ⍝ Produce 15 msgs on topic1 and 20 on topic2
3 | ⍝ Consume all the msgs in the queue from two consumers
4 | ⍝ and check that each consumers consumes the produced
5 | ⍝ msgs and that they are the same and
6 | ⍝ in the same order within keys.
7 |
8 | #.Init'.'
9 |
10 | ⍝ Consumer configurations
11 | config←0 2⍴⍬
12 | config⍪←'bootstrap.servers' 'localhost:9092'
13 | config⍪←'client.id' 'consumerAclient'
14 | config⍪←'group.id' 'consumerAgroup'
15 | config⍪←'auto.offset.reset' 'earliest'
16 | ⎕←'Init new consumerA with config:'
17 | config
18 |
19 | ⍝ Init new Consumer
20 | consumerA←⎕NEW #.Consumer config
21 |
22 |
23 | ⍝ Consumer configurations
24 | config←0 2⍴⍬
25 | config⍪←'bootstrap.servers' 'localhost:9092'
26 | config⍪←'client.id' 'consumerBclient'
27 | config⍪←'group.id' 'consumerBgroup'
28 | config⍪←'auto.offset.reset' 'earliest'
29 | ⎕←'Init new consumerA with config:'
30 | config
31 |
32 | ⍝ Init new Consumer
33 | consumerB←⎕NEW #.Consumer config
34 |
35 | ⍝ Subscribe consumer to topic
36 | ⎕←'Subscribe consumers to topics "topic1" and "topic2":'
37 | consumerA.subscribe'topic1' 'topic2'
38 | consumerB.subscribe'topic1' 'topic2'
39 | ⎕←'Waiting for rebalance before starting consuming...'
40 | ⎕DL 5
41 |
42 | ⎕←'Consume the queueA before producing:'
43 | start←3⊃⎕AI
44 | consumed_msgsA←⍬
45 | :While 1
46 | cr←consumerA.consume_record
47 | :If 1=⊃cr
48 | :AndIf 20000>(3⊃⎕AI)-start
49 | :Continue
50 | :ElseIf 0=⊃cr
51 | _←⊂(2⊃cr).(Topic Payload Key Partition)
52 | start←0
53 | :Else
54 | cr
55 | :Leave
56 | :EndIf
57 | :EndWhile
58 |
59 | ⎕←'Consume the queueB before producing:'
60 | start←3⊃⎕AI
61 | consumed_msgsB←⍬
62 | :While 1
63 | cr←consumerB.consume_record
64 | :If 1=⊃cr
65 | :AndIf 20000>(3⊃⎕AI)-start
66 | :Continue
67 | :ElseIf 0=⊃cr
68 | _←⊂(2⊃cr).(Topic Payload Key Partition)
69 | start←0
70 | :Else
71 | cr
72 | :Leave
73 | :EndIf
74 | :EndWhile
75 |
76 | ⍝ Producer configurations
77 | config←0 2⍴⍬
78 | config⍪←'bootstrap.servers' 'localhost:9092'
79 | config⍪←'client.id' 'producerclient'
80 | ⎕←'Init new producer with config:'
81 | config
82 |
83 | ⍝ Init new Producer
84 | producer←⎕NEW #.Producer config
85 |
86 | ⍝ Produce bundled messages on topic and ask for dr
87 | ⎕←'Produce messages on topic1:'
88 | produced_msgs←⍬
89 | :For i :In ⍳15
90 | msgs←'topic1'('payload',⍕i)('key',⍕5|i)
91 | producer.produce_record ⎕NEW #.Record(msgs)
92 | produced_msgs,←⊂msgs
93 | :If 0=10|i
94 | ⎕←'Outstanding messages:'
95 | ⎕DL 0.5
96 | producer.update_outstanding
97 | :EndIf
98 | :EndFor
99 | ⎕←'Outstanding messages:'
100 | ⎕DL 0.5
101 | producer.update_outstanding
102 |
103 | ⎕←'Produce messages on topic2:'
104 | :For i :In ⍳20
105 | msgs←'topic2'('payload',⍕i)('key',⍕5|i)
106 | producer.produce_record ⎕NEW #.Record(msgs)
107 | produced_msgs,←⊂msgs
108 | :If 0=10|i
109 | ⎕←'Outstanding messages:'
110 | ⎕DL 0.5
111 | producer.update_outstanding
112 | :EndIf
113 | :EndFor
114 |
115 | ⎕←'Close producer:'
116 | ⎕EX'producer'
117 |
118 | ⎕←'ConsumerA consumes the testing queue:'
119 | start←3⊃⎕AI
120 | consumed_msgsA←⍬
121 | :While 1
122 | cr←consumerA.consume_record
123 | :If 1=⊃cr
124 | :AndIf 20000>(3⊃⎕AI)-start
125 | :Continue
126 | :ElseIf 0=⊃cr
127 | consumed_msgsA,←⊂(2⊃cr).(Topic Payload Key Partition)
128 | start←0
129 | :Else
130 | cr
131 | :Leave
132 | :EndIf
133 | :EndWhile
134 |
135 | ⎕←'ConsumerB consumes the testing queue:'
136 | start←3⊃⎕AI
137 | consumed_msgsB←⍬
138 | :While 1
139 | cr←consumerB.consume_record
140 | :If 1=⊃cr
141 | :AndIf 20000>(3⊃⎕AI)-start
142 | :Continue
143 | :ElseIf 0=⊃cr
144 | consumed_msgsB,←⊂(2⊃cr).(Topic Payload Key Partition)
145 | start←0
146 | :Else
147 | cr
148 | :Leave
149 | :EndIf
150 | :EndWhile
151 |
152 |
153 | ⎕←'Close consumerA and consumerB:'
154 | ⎕EX'consumerA'
155 | ⎕EX'consumerB'
156 |
157 | ⍝ Sort first by topic and then by key. The order is guaranteed in key
158 | t_consumed_msgsA←1{⍵[⍋⍵[;⍺];]}↑¯1↓¨consumed_msgsA
159 | s_consumed_msgsA←3{⍵[⍋⍵[;⍺];]}t_consumed_msgsA
160 | t_consumed_msgsB←1{⍵[⍋⍵[;⍺];]}↑¯1↓¨consumed_msgsB
161 | s_consumed_msgsB←3{⍵[⍋⍵[;⍺];]}t_consumed_msgsB
162 |
163 |
164 | t_produced_msgs←1{⍵[⍋⍵[;⍺];]}↑produced_msgs
165 | s_produced_msgs←3{⍵[⍋⍵[;⍺];]}t_produced_msgs
166 |
167 |
168 | 'Produced and consumed messages from two different consumer groups'ASSERT(s_consumed_msgsB≡s_produced_msgs)∧s_consumed_msgsA≡s_produced_msgs
169 |
--------------------------------------------------------------------------------
/CI/gh-release.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | GIT_BRANCH=${JOB_NAME#*/*/}
5 | GIT_COMMIT=$(git rev-parse HEAD)
6 |
7 | case $GIT_BRANCH in
8 | main|kafka[0-9]\.[0-9])
9 | echo "creating ${GIT_BRANCH} release"
10 | ;;
11 | *)
12 | echo "skipping creating release for ${GIT_BRANCH}"
13 | exit 0
14 | ;;
15 | esac
16 |
17 |
18 | # create JSON
19 | TMP_JSON=/tmp/gh-publish.$$.json
20 | GH_RELEASES=/tmp/gh-releases.$$.json
21 | BASE_VERSION=`cat base-version.txt`
22 | VERSION="${BASE_VERSION}.`git rev-list HEAD --count`"
23 |
24 | if ! [ "$GHTOKEN" ]; then
25 | echo 'Please put your GitHub API Token in an environment variable named GHTOKEN'
26 | exit 1
27 | fi
28 |
29 | # Delete all the old draft releases, otherwise this gets filled up pretty fast as we create for every commit:
30 | # but only if jq is available
31 | if which jq >/dev/null 2>&1; then
32 | DRAFT=true
33 | C=0
34 |
35 | # Get the json from Github API
36 | curl -o $GH_RELEASES \
37 | --silent -H "Authorization: token $GHTOKEN" \
38 | https://api.github.com/repos/Dyalog/kafka/releases
39 |
40 | RELEASE_COUNT=`cat $GH_RELEASES | jq ". | length"`
41 | GH_VERSION_ND_LAST=0
42 |
43 | while [ $C -le $RELEASE_COUNT ] ; do
44 | DRAFT=`cat $GH_RELEASES | jq ".[$C].draft"`
45 | ID=`cat $GH_RELEASES | jq ".[$C].id"`
46 | GH_VERSION=$(cat $GH_RELEASES | jq ".[$C].name" | sed 's/"//g;s/^v//')
47 | GH_VERSION_ND=$(cat $GH_RELEASES | jq ".[$C].name" | sed 's/"//g;s/^v//;s/\.//g')
48 | GH_VERSION_AB=${GH_VERSION%.*}
49 |
50 | if [ "${GH_VERSION_AB}" = "${BASE_VERSION}" ]; then
51 | if [ "$DRAFT" = "true" ]; then
52 | echo -e -n "*** $(cat $GH_RELEASES | jq ".[$C].name" | sed 's/"//g') with id: $(cat $GH_RELEASES | jq ".[$C].id") is a draft - Deleting.\n"
53 | curl -X "DELETE" -H "Authorization: token $GHTOKEN" https://api.github.com/repos/Dyalog/kafka/releases/${ID}
54 | else
55 | if [ $GH_VERSION_ND -gt $GH_VERSION_ND_LAST ]; then
56 | COMMIT_SHA=`cat $GH_RELEASES | jq -r ".[$C].target_commitish"`
57 | GH_VERSION_ND_LAST=$GH_VERSION_ND
58 | PRERELEASE=`cat $GH_RELEASES | jq -r ".[$C].prerelease"`
59 | fi
60 | fi
61 | fi
62 |
63 | let C=$C+1
64 | done
65 | rm -f $GH_RELEASES
66 |
67 | else
68 | echo jq not found, not removing draft releases
69 | fi
70 |
71 | if [ $GH_VERSION_ND_LAST = 0 ]; then
72 | echo "No releases of $BASETVERSION found, not populating changelog"
73 | JSON_BODY=$(echo -e "Pre-Release of Dyalog-Kafka $BASE_VERSION\n\nWARNING: This is a pre-release version of Dyalog-Kafka $BASE_VERSION: it is possible that functionality may be added, removed or altered; we do not recommend using pre-release versions of Dyalog Kafka in production environment." | python -c 'import json,sys; print(json.dumps(sys.stdin.read()))')
74 | PRERELEASE=true
75 | else
76 | echo using log from $COMMIT_SHA from $GH_VERSION_ND_LAST
77 | echo "Is Prerelease: ${PRERELEASE}"
78 | if [ "${PRERELEASE}" = "false" ]; then
79 | MSG_TEXT="Release Dyalog-Kafka ${BASE_VERSION}\n\n"
80 | else
81 | MSG_TEXT="Pre-Release of Dyalog-Kafka $BASE_VERSION\n\nWARNING: This is a pre-release version of Dyalog-Kafka $BASE_VERSION: it is possible that functionality may be added, removed or altered; we do not recommend using pre-release versions of Dyalog-Kafka in production environments.\n\n"
82 | fi
83 | JSON_BODY=$( ( echo -e "${MSG_TEXT}Changelog:"; git log --format='%s' ${COMMIT_SHA}.. ) | grep -v -i todo | python -c 'import json,sys; print(json.dumps(sys.stdin.read()))')
84 | fi
85 |
86 | cat >$TMP_JSON <<.
87 | {
88 | "tag_name": "v$VERSION",
89 | "target_commitish": "${GIT_COMMIT}",
90 | "name": "v$VERSION",
91 | "body": $JSON_BODY,
92 | "draft": true,
93 | "prerelease": ${PRERELEASE}
94 | }
95 | .
96 |
97 | cat $TMP_JSON
98 |
99 | REPO=Dyalog/kafka # ideally this should be parsed from "git ls-remote --get-url origin"
100 | TMP_RESPONSE=/tmp/gh-response.$$.json
101 | curl -o $TMP_RESPONSE --data @$TMP_JSON -H "Authorization: token $GHTOKEN" -i https://api.github.com/repos/$REPO/releases
102 |
103 | cat "$TMP_RESPONSE"
104 |
105 | RELEASE_ID=`grep '"id"' $TMP_RESPONSE | head -1 | sed 's/.*: //;s/,//'`
106 |
107 | echo "created release with id: $RELEASE_ID"
108 |
109 | for F in /devt/builds/kafka/${GIT_BRANCH}/latest/*.zip; do
110 | # Check if the file exists and is not empty
111 | if [ -f "$F" ]; then
112 | echo "Uploading $F to Github"
113 | curl -o /dev/null -H "Authorization: token $GHTOKEN" \
114 | -H 'Accept: application/vnd.github.manifold-preview' \
115 | -H 'Content-Type: application/zip' \
116 | --data-binary @"$F" \
117 | https://uploads.github.com/repos/$REPO/releases/$RELEASE_ID/assets?name=$(basename "$F")
118 | fi
119 | done
120 |
121 | rm -f $TMP_RESPONSE $TMP_JSON
122 |
--------------------------------------------------------------------------------
/SPEC.md:
--------------------------------------------------------------------------------
1 | # Dyalog-Kafka
2 |
3 | The aim of the Dyalog-Kafka project is to provide a binding to part of the [Confluent librdkafka](https://github.com/confluentinc/librdkafka) library such that we can access Kafka from Dyalog APL.
4 |
5 | ## Note
6 |
7 | The interface presented below is a work in progress, and its semantics should not be relied upon.
8 |
9 | ## Scope
10 |
11 | For the first milestone of this project, we aim to support the `Producer` and `Consumer` aspects only. This means that there will be no Dyalog APL version of the `AdminClient` API which interacts with the cluster (and topic) configuration. All topic creation must therefore be done outside Dyalog APL.
12 |
13 | Our initial aim is to provide as thin as possible a layer on top of librdkafka, upon which richer Dyalog interfaces can be based. This falls into two abstraction layers:
14 | 1. The API layer itself, mapping the librdkafka functions into APL.
15 | 2. A convenience APL layer built on top of that.
16 |
17 | The semantics of the lower layer are largely dictated by the wrapped library, with a few work-arounds required by the Dyalog FFI. We don't expect Dyalog APL application developers will want to use this layer directly.
18 |
19 | The convenience APL layer will likely change rapidly and without notice as the design progresses. It is currently provided solely as an illustration of what will be covered, and we're still investigating the exact makeup of this layer.
20 |
21 | The rest of this document deals with the second layer, and has three aspects:
22 | 1. Configuration
23 | 2. Producer
24 | 3. Consumer
25 |
26 | ## Library initialisation
27 |
28 | Use `]link.create # aplsource` to bring in the code into your workspace, and then call the `Init` function, with the path to the directory where the shared library resides:
29 | ```
30 | Init 'path/to/dir/housing/kafka/shared/lib'
31 | ```
32 |
33 | You should now be ready to use the library.
34 |
35 | ## Configuration
36 |
37 | The configuration format is a table of key and value columns. The key names are the standard Kafka configuration keys. Both the `Producer` and `Consumer` take the same configuration format. Here is what a consumer configuration could look like:
38 |
39 | ```apl
40 | config ← 0 2 ⍴⍬
41 | config⍪← 'bootstrap.servers' 'localhost:9092'
42 | config⍪← 'client.id' 'bhcgrs3550'
43 | config⍪← 'group.id' 'dyalog'
44 | config
45 |
46 | bootstrap.servers localhost:9092
47 | client.id bhcgrs3550
48 | group.id dyalog
49 | ```
50 | which specifies a client with the id `bhcgrs3550`, belonging to the consumer group `dyalog`, talking to the Kafka cluster with entry point `localhost:9092`. A corresponding `Producer` configuration example:
51 |
52 | ```apl
53 | config←0 2⍴⍬
54 | config⍪←'bootstrap.servers' 'localhost:9092'
55 | config⍪←'client.id' 'bhc'
56 | config
57 |
58 | bootstrap.servers localhost:9092
59 | client.id bhc
60 | ```
61 |
62 | ## Producer scope
63 |
64 | Assumption: you have created a configuration table as per above.
65 |
66 | 1. Create a producer
67 | ```
68 | producer←⎕NEW Producer config
69 | ```
70 |
71 | 2. Produce a message
72 |
73 | A Kafka message is a tuple specifying the topic, the payload and the message key.Use the `produce` method to produce a message of `topic`, `payload`, `key`:
74 | ```
75 | producer.produce 'animals' 'tiger' 'key01'
76 | ```
77 | Where necessary, encode the payload:
78 | ```
79 | producer.produce 'invoices' (1⎕JSON invoice) customer_id
80 | ```
81 | There is also a `Record` interface that packages up the message in a bundle:
82 | ```
83 | producer.produce_record ⎕NEW #.Record('animals' 'Blackbird' 'key01')
84 | ```
85 | To send produce multiple messages, use
86 | ```apl
87 | :For i :In ⍳n
88 | producer.produce 'animals' (100↑'Payload',⍕i) ('key',⍕4|i)
89 | :If 0=10|i
90 | producer.update_outstanding ⍝ Await successful delivery
91 | :EndIf
92 | EndFor
93 | ```
94 |
95 | 3. Delivery receipts
96 |
97 | Use `producer.delivery_report n` to see the `n` most recent production receipts.
98 |
99 | We're looking at options for what a more ergonomic asynchronous API would look like.
100 |
101 | 4. Destroy the producer
102 | ```
103 | ⎕EX'producer'
104 | ```
105 |
106 | ## Consumer scope
107 |
108 | Assumption: you have created a configuration table as per above.
109 |
110 | 1. Create a consumer instance
111 | ```apl
112 | consumer←⎕NEW Consumer config
113 | ```
114 | 2. Subscribe client instance to a set of Kafka topics
115 | ```apl
116 | consumer.subscribe 'animals' 'cars' 'plants'
117 | ```
118 | 3. Consume
119 |
120 | Consume messages in a loop. Kafka parallelism is achieved by consumer groups and partitioned topics. The `Record` interface allow for access by the names `Topic`, `Payload`, `Key`, `Partition`, `Offset`.
121 | ```apl
122 | :While 0=⊃rec←consumer.consume_record
123 | (2⊃rec).(Topic Payload Key Partition Offset)
124 | :EndWhile
125 | ```
126 | If auto-commit is disabled (see the `enable.auto.commit` config parameter for the consumer), it is possible to manually commit offsets
127 | ```apl
128 | consumer.commit
129 | ```
130 | which will synchronously commit the offset on the current partition assignment.
131 | 4. Destroy consumer
132 | ```apl
133 | ⎕EX'consumer'
134 | ```
135 |
136 | ## Examples
137 |
138 | Note: the semantics are subject to change.
139 |
140 | Here is a complete example showing both a `Producer` and a `Consumer`.
141 |
142 | Create three topics:
143 | ```
144 | kafka-topics.sh \
145 | --bootstrap-server localhost:9092 \
146 | --create --topic "animals" \
147 | --partitions 3
148 |
149 | kafka-topics.sh \
150 | --bootstrap-server localhost:9092 \
151 | --create --topic "cars" \
152 | --partitions 3
153 |
154 | kafka-topics.sh \
155 | --bootstrap-server localhost:9092 \
156 | --create --topic "plants" \
157 | --partitions 3
158 | ```
159 |
160 | Now, run the following function:
161 | ```apl
162 | Example n;i
163 | ⍝ Produce and consume messages
164 |
165 | ⍝ Call to Init function
166 | ⍝ Init'path/to/dir/housing/kafka/shared/lib'
167 | #.Init'.'
168 |
169 | ⍝ Set up the producer
170 | config←0 2⍴⍬
171 | config⍪←'bootstrap.servers' 'localhost:9092'
172 | config⍪←'client.id' 'bhc'
173 |
174 | producer←⎕NEW Producer config
175 |
176 | ⍝ Produce onto the "animals" topic the message "tiger" with key "cats"
177 | producer.produce'animals' 'tiger' 'cats'
178 |
179 | ⍝ Produce n messages onto the animals topic in a loop by using the Record interface
180 | :For i :In ⍳n
181 | producer.produce_record ⎕NEW #.Record('animals'(75↑'Payload',⍕i)('key',⍕4|i))
182 | :If 0=10|i
183 | producer.update_outstanding ⍝ Ask for delivery report
184 | :EndIf
185 | :EndFor
186 |
187 | ⍝ Produce a few messages to the other topics, too
188 | producer.produce_record ⎕NEW #.Record('cars' 'ferrari' 'sportcars')
189 | producer.produce_record ⎕NEW #.Record('plants' 'iris' 'flowers')
190 | ⍝ Ask for delivery report
191 | producer.update_outstanding
192 |
193 |
194 | ⍝ Set up the consumer
195 | config←0 2⍴⍬
196 | config⍪←'bootstrap.servers' 'localhost:9092'
197 | config⍪←'client.id' 'bhcgrs3550'
198 | config⍪←'group.id' 'dyalog'
199 | config⍪←'auto.offset.reset' 'earliest' ⍝ Start consuming from the beginning if no offset is found
200 |
201 | topic_list←'animals' 'cars' 'plants'
202 | consumer←⎕NEW Consumer config
203 | consumer.subscribe topic_list
204 |
205 | ⎕DL 5
206 |
207 | ⍝ Let's drain the topics
208 | :While 0=⊃cr←consumer.consume_record
209 | (2⊃cr).(Topic Payload Key Partition Offset)
210 | :EndWhile
211 |
212 | ⍝ Tidy up
213 | ⎕EX'producer'
214 | ⎕EX'consumer'
215 |
216 | ```
217 |
218 |
--------------------------------------------------------------------------------
/kafka/kafka.vcxproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | Win32
7 |
8 |
9 | Release
10 | Win32
11 |
12 |
13 | Debug
14 | x64
15 |
16 |
17 | Release
18 | x64
19 |
20 |
21 |
22 | 17.0
23 | Win32Proj
24 | {5457142e-4872-4be3-bbcb-6a0baad2298c}
25 | kafka
26 | 10.0
27 |
28 |
29 |
30 | DynamicLibrary
31 | true
32 | v143
33 | Unicode
34 |
35 |
36 | DynamicLibrary
37 | false
38 | v143
39 | true
40 | Unicode
41 |
42 |
43 | DynamicLibrary
44 | true
45 | v143
46 | Unicode
47 |
48 |
49 | DynamicLibrary
50 | false
51 | v143
52 | true
53 | Unicode
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 | $(SolutionDir)$(Platform)\int\$(Configuration)\
75 |
76 |
77 | $(SolutionDir)$(Platform)\int\$(Configuration)\
78 |
79 |
80 | $(SolutionDir)$(Platform)\$(Configuration)\
81 | $(SolutionDir)$(Platform)\int\$(Configuration)\
82 |
83 |
84 | $(SolutionDir)$(Platform)\$(Configuration)\
85 | $(SolutionDir)$(Platform)\int\$(Configuration)\
86 |
87 |
88 |
89 | Level3
90 | true
91 | FORWIN=1;WIN32;_DEBUG;KAFKA_EXPORTS;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)
92 | true
93 | NotUsing
94 | pch.h
95 | $(SolutionDir)packages\librdkafka.redist.2.5.0\build\native\include\librdkafka
96 |
97 |
98 | Windows
99 | true
100 | false
101 | $(SolutionDir)packages\librdkafka.redist.2.5.0\build\native\lib\win\$(PlatformShortName)\win-$(PlatformShortName)-Release\v142
102 | librdkafka.lib;librdkafkacpp.lib
103 |
104 |
105 | xcopy $(OutDir)*.dll $(SolutionDir)distribution\win\$(Configuration)\$(PlatformShortName)\ /Y
106 |
107 |
108 |
109 |
110 | Level3
111 | true
112 | true
113 | true
114 | FORWIN=1;WIN32;NDEBUG;KAFKA_EXPORTS;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)
115 | true
116 | NotUsing
117 | pch.h
118 | $(SolutionDir)packages\librdkafka.redist.2.5.0\build\native\include\librdkafka
119 |
120 |
121 | Windows
122 | true
123 | true
124 | true
125 | false
126 | $(SolutionDir)packages\librdkafka.redist.2.5.0\build\native\lib\win\$(PlatformShortName)\win-$(PlatformShortName)-Release\v142
127 | librdkafka.lib;librdkafkacpp.lib
128 |
129 |
130 | xcopy $(OutDir)*.dll $(SolutionDir)distribution\win\$(Configuration)\$(PlatformShortName)\ /Y
131 |
132 |
133 |
134 |
135 | Level3
136 | true
137 | FORWIN=1;_DEBUG;KAFKA_EXPORTS;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)
138 | true
139 | NotUsing
140 | pch.h
141 | $(SolutionDir)packages\librdkafka.redist.2.5.0\build\native\include\librdkafka
142 |
143 |
144 | Windows
145 | true
146 | false
147 | $(SolutionDir)packages\librdkafka.redist.2.5.0\build\native\lib\win\$(PlatformShortName)\win-$(PlatformShortName)-Release\v142
148 | librdkafka.lib;librdkafkacpp.lib
149 |
150 |
151 | xcopy $(OutDir)*.dll $(SolutionDir)distribution\win\$(Configuration)\$(PlatformShortName)\ /Y
152 |
153 |
154 |
155 |
156 | Level3
157 | true
158 | true
159 | true
160 | FORWIN=1;NDEBUG;KAFKA_EXPORTS;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)
161 | true
162 | NotUsing
163 | pch.h
164 | $(SolutionDir)packages\librdkafka.redist.2.5.0\build\native\include\librdkafka
165 |
166 |
167 | Windows
168 | true
169 | true
170 | true
171 | false
172 | $(SolutionDir)packages\librdkafka.redist.2.5.0\build\native\lib\win\$(PlatformShortName)\win-$(PlatformShortName)-Release\v142
173 | librdkafka.lib;librdkafkacpp.lib
174 |
175 |
176 | xcopy $(OutDir)*.dll $(SolutionDir)distribution\win\$(Configuration)\$(PlatformShortName)\ /Y
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 | This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
196 |
197 |
198 |
199 |
--------------------------------------------------------------------------------
/kafka/kafka.cpp:
--------------------------------------------------------------------------------
1 | #if defined(FORWIN)
2 | #define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
3 | // Windows Header Files
4 | #include
5 | #elif defined(FORmac) ||defined(FORaix) || defined(FORlinux)
6 | #include
7 | #else
8 | #endif
9 | #include
10 | #include
11 | #include
12 | #include "rdkafka.h"
13 | #include "kafka.h"
14 |
15 | #if defined(FORmac) || defined(FORlinux) || defined(FORaix)
16 | int strncpy_s(char* dst, size_t ds, const char* src, size_t ss)
17 | {
18 | strncpy(dst, src, ss);
19 | return 0;
20 | }
21 | #define min(a,b) ((a)<(b)?(a):(b))
22 | #endif
23 |
24 |
25 |
26 | unsigned long long global_counter=0; // added =0
27 |
28 | typedef struct {
29 | unsigned long long msg_id;
30 | rd_kafka_resp_err_t err;
31 | } delivery_report;
32 |
33 | typedef struct {
34 | int counter;
35 | int counter_limit;
36 | delivery_report** drs;
37 | } delivery_reports;
38 |
39 | typedef struct {
40 | rd_kafka_conf_t* conf;
41 | rd_kafka_t* rk;
42 | rd_kafka_message_t* msg;
43 | } kafka_struct;
44 |
45 |
46 | static void on_delivery(rd_kafka_t* rk, const rd_kafka_message_t* rkmessage, void* opaque)
47 | {
48 | delivery_reports* drs = (delivery_reports*)rd_kafka_opaque(rk);
49 |
50 | if (drs->counter >= drs->counter_limit)
51 | {
52 | //extend the drs array;
53 | delivery_report** drs_tmp = (delivery_report**)calloc(2 * drs->counter_limit, sizeof(delivery_report*));
54 | memcpy(drs_tmp, drs->drs, drs->counter_limit * sizeof(delivery_report*));
55 | free(drs->drs);
56 | drs->drs = drs_tmp;
57 | drs->counter_limit = 2 * drs->counter_limit;
58 | }
59 |
60 | delivery_report* dr = (delivery_report*)calloc(1, sizeof(delivery_report));
61 |
62 | dr->msg_id = (unsigned long long)rkmessage->_private;
63 | dr->err = rkmessage->err;
64 |
65 | drs->drs[drs->counter] = dr;
66 | drs->counter++;
67 | }
68 |
69 |
70 | LIBRARY_API int Version(char* version, int len)
71 | {
72 | const char* kafkaver = rd_kafka_version_str();
73 | strncpy_s(version, len, kafkaver, 1+strlen(kafkaver));
74 |
75 | return rd_kafka_version();
76 | }
77 |
78 |
79 | LIBRARY_API int InitKafka(void** kafka)
80 | {
81 | kafka_struct* kf = (kafka_struct*)calloc(1, sizeof(kafka_struct));
82 | *kafka = (void*)kf;
83 |
84 | return 0;
85 | }
86 |
87 |
88 | LIBRARY_API int UninitProducer(void* prod)
89 | {
90 | kafka_struct* pr = (kafka_struct*)prod;
91 | int kerr = 0;
92 | if (pr->rk != NULL) {
93 | kerr = rd_kafka_flush((rd_kafka_t*)pr->rk, 500);
94 | rd_kafka_destroy((rd_kafka_t*)pr->rk);
95 | }
96 | free(pr);
97 |
98 | return kerr;
99 | }
100 |
101 | LIBRARY_API int UninitConsumer(void* cons)
102 | {
103 | kafka_struct* co = (kafka_struct*)cons;
104 | int kerr = 0;
105 | if (co->rk != NULL) {
106 | // Close consumer
107 | kerr = rd_kafka_consumer_close(co->rk);
108 | // Destroy the consumer.
109 | rd_kafka_destroy(co->rk);
110 | }
111 | free(co);
112 |
113 | return kerr;
114 | }
115 |
116 | LIBRARY_API int SetKafkaConf(void* kafka, char* key, char* val, char* errtxt, int *plen)
117 | {
118 | rd_kafka_conf_res_t res;
119 | size_t len = *plen;
120 | *errtxt = 0;
121 | kafka_struct* kf = (kafka_struct*)kafka;
122 | if (kf->conf == NULL) {
123 | rd_kafka_conf_t* config = rd_kafka_conf_new();
124 | kf->conf = config;
125 | }
126 |
127 | res=rd_kafka_conf_set((rd_kafka_conf_t*)kf->conf, key, val, errtxt, len);
128 | if (res == RD_KAFKA_CONF_OK)
129 | *plen = 0;
130 | else
131 | *plen = (int)strlen(errtxt);
132 |
133 | return (int) res;
134 | }
135 |
136 |
137 |
138 | LIBRARY_API int Produce(void* prod, char* topic, char* payload, uint32_t paylen, char* key,uint32_t keylen, int32_t partition, uint64_t* msgid, char* errtxt, int *plen)
139 | {
140 | int kerr = 0;
141 | size_t len = *plen;
142 | *plen=0;
143 | *errtxt = 0;
144 |
145 | kafka_struct* pr = (kafka_struct*)prod;
146 | *msgid = global_counter++;
147 |
148 | if (pr->rk == NULL) {
149 |
150 | // Delivery CB
151 | rd_kafka_conf_set_dr_msg_cb(pr->conf, on_delivery);
152 | delivery_reports* drs = (delivery_reports*)calloc(1, sizeof(delivery_reports));
153 | drs->counter = 0;
154 | drs->counter_limit = 100;
155 | drs->drs = (delivery_report**)calloc(100, sizeof(delivery_report*));
156 | rd_kafka_conf_set_opaque(pr->conf, (void*)drs);
157 |
158 | rd_kafka_t* rk = rd_kafka_new(RD_KAFKA_PRODUCER, pr->conf, errtxt, len);
159 | pr->rk = rk;
160 | if (NULL!=rk)
161 | *plen = 0;
162 | else {
163 | *plen = (int)strlen(errtxt);
164 | return 1; // Failed to initialize producer
165 | }
166 | //ok?
167 | pr->conf = NULL;
168 | }
169 |
170 | kerr = rd_kafka_producev(pr->rk,
171 | RD_KAFKA_V_PARTITION(partition),
172 | RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
173 | RD_KAFKA_V_VALUE(payload, paylen),
174 | RD_KAFKA_V_KEY(key, keylen),
175 | RD_KAFKA_V_TOPIC(topic),
176 | RD_KAFKA_V_OPAQUE((void*)*msgid),
177 | RD_KAFKA_V_END
178 | );
179 | return kerr;
180 | }
181 |
182 |
183 |
184 | //LIBRARY_API int CreateTopic(void** topic, void* kafka, char* topic_name, void* topic_conf)
185 | //{
186 | // kafka_struct* kf = (kafka_struct*)kafka;
187 | // rd_kafka_topic_new(kf->rk, topic_name, (rd_kafka_topic_conf_t*)topic_conf);
188 | // rd_kafka_topic_t* t = rd_kafka_topic_new(kf->rk, topic_name, (rd_kafka_topic_conf_t*)topic_conf);
189 | // *topic = (void*)t;
190 | // return 0;
191 | //}
192 | //
193 |
194 | LIBRARY_API int NewTopicPartitionList(void** subscr)
195 | {
196 | rd_kafka_topic_partition_list_t* subscription = rd_kafka_topic_partition_list_new(1);
197 | *subscr = (void*)subscription;
198 |
199 | return 0;
200 | }
201 |
202 |
203 | LIBRARY_API int SetTopicPartitionList(void* subscr, char* topic, int32_t partition)
204 | {
205 |
206 | rd_kafka_topic_partition_list_t* subscription = (rd_kafka_topic_partition_list_t*)subscr;
207 | rd_kafka_topic_partition_list_add(subscription, (const char*)topic, partition);
208 |
209 | return 0;
210 | }
211 |
212 | LIBRARY_API int SetOffset(void* subscr, char* topic, int32_t partition, int64_t offset)
213 | {
214 | rd_kafka_resp_err_t res;
215 | rd_kafka_topic_partition_list_t* subscription = (rd_kafka_topic_partition_list_t*)subscr;
216 | res = rd_kafka_topic_partition_list_set_offset(subscription, (const char*) topic, partition, offset);
217 |
218 | return (int)res;
219 | }
220 |
221 | LIBRARY_API int SubscribeConsumerTPList(void* kafka, void* subscr, char* errtxt, int *plen)
222 | {
223 | rd_kafka_resp_err_t res;
224 | size_t len = *plen;
225 | *errtxt = 0;
226 |
227 | kafka_struct* kf = (kafka_struct*)kafka;
228 | rd_kafka_topic_partition_list_t* subscription = (rd_kafka_topic_partition_list_t*)subscr;
229 |
230 | rd_kafka_t* rk = rd_kafka_new(RD_KAFKA_CONSUMER, kf->conf, errtxt, len);
231 | kf->rk = rk;
232 | if (NULL!=rk)
233 | *plen = 0;
234 | else
235 | *plen = (int)strlen(errtxt);
236 |
237 | res = rd_kafka_subscribe(kf->rk, subscription); // Only the "topic" field is used here
238 | //rd_kafka_resp_err_t err_p = rd_kafka_assign(kf->rk, subscription);
239 |
240 | rd_kafka_topic_partition_list_destroy(subscription);
241 |
242 | rd_kafka_poll_set_consumer(kf->rk);
243 | kf->conf = NULL;
244 | return (int) res;
245 | }
246 |
247 |
248 | LIBRARY_API int Consume(void* cons, char* topic,uint32_t *topiclen, char* payload,uint32_t *paylen,char* key, uint32_t *keylen,int32_t *partition, int64_t* offset, char* errtxt, int *plen)
249 | {
250 | kafka_struct* co = (kafka_struct*)cons;
251 | rd_kafka_message_t* rkmessage;
252 | size_t len = *plen;
253 | *errtxt = 0;
254 |
255 | if (NULL != co->msg)
256 | {
257 | rkmessage = co->msg;
258 | co->msg = NULL;
259 | }
260 | else
261 | rkmessage = rd_kafka_consumer_poll(co->rk, 500);
262 |
263 |
264 | if (rkmessage) {
265 |
266 | if ( 0 != rkmessage->err)
267 | {
268 | strncpy_s(errtxt, len, (char*)rkmessage->payload, rkmessage->len);
269 | *plen=(int)rkmessage->len;
270 | return rkmessage->err;
271 | }
272 |
273 | size_t tlen = strlen(rd_kafka_topic_name(rkmessage->rkt));
274 | if (*keylen < rkmessage->key_len || *paylen < rkmessage->len || *topiclen < tlen)
275 | {
276 | // not enough space to return save message for next call
277 | co->msg = rkmessage;
278 | *topiclen = (uint32_t)tlen;
279 | *paylen = (uint32_t)rkmessage->len;
280 | *keylen = (uint32_t)rkmessage->key_len;
281 | strncpy_s(errtxt, len, "message too long", 17);
282 | return 2;
283 | }
284 |
285 | memcpy(topic, rd_kafka_topic_name(rkmessage->rkt), tlen);
286 | memcpy(payload, (char*)rkmessage->payload, rkmessage->len);
287 | memcpy(key, (char*)rkmessage->key, rkmessage->key_len);
288 | // strncpy_s(topic, *topiclen, rd_kafka_topic_name(rkmessage->rkt), tlen);
289 | // strncpy_s(payload, *paylen, (char*)rkmessage->payload, rkmessage->len);
290 | // strncpy_s(key, *keylen,(char*) rkmessage->key, rkmessage->key_len);
291 | *partition = rkmessage->partition;
292 | *offset = (int64_t)rkmessage->offset;
293 |
294 | *topiclen = (uint32_t)tlen;
295 | *paylen = (uint32_t)rkmessage->len;
296 | *keylen = (uint32_t)rkmessage->key_len;
297 |
298 | strncpy_s(errtxt, len, "", 1+strlen(""));
299 | rd_kafka_message_destroy(rkmessage);
300 | }
301 | else
302 | {
303 | strncpy_s(errtxt, len, "no msg", 1+strlen("no msg"));
304 | return 1;
305 | }
306 | return 0;
307 | }
308 |
309 | LIBRARY_API int Commit(void* cons, void* subscr)
310 | {
311 | kafka_struct* co = (kafka_struct*)cons;
312 | rd_kafka_t* rk = (rd_kafka_t*)co->rk;
313 | rd_kafka_topic_partition_list_t* offsets = (rd_kafka_topic_partition_list_t*)subscr;
314 | rd_kafka_resp_err_t res;
315 |
316 | // Allow only sync
317 | res = rd_kafka_commit(rk, offsets, 0);
318 |
319 | return (int) res;
320 | }
321 |
322 | LIBRARY_API int Committed(void* cons, void* subscr, int64_t* offset)
323 | {
324 | rd_kafka_resp_err_t res;
325 |
326 | kafka_struct* co = (kafka_struct*)cons;
327 | rd_kafka_t* rk = (rd_kafka_t*)co->rk;
328 |
329 | rd_kafka_topic_partition_list_t* subscription = (rd_kafka_topic_partition_list_t*)subscr;
330 |
331 | res = rd_kafka_committed(rk, subscription, 5000); // 5 sec max
332 |
333 | *offset = subscription->elems[0].offset;
334 |
335 | rd_kafka_topic_partition_list_destroy(subscription);
336 |
337 | return (int)res;
338 | }
339 |
340 | LIBRARY_API int DeliveryReport(void* prod, unsigned long long* msgid, int* err, int* plength)
341 | {
342 | kafka_struct* pr = (kafka_struct*)prod;
343 |
344 | // Copy the Producer object from APL
345 | rd_kafka_t* rk;
346 | rk = (rd_kafka_t*)pr->rk;
347 |
348 | if (rk == NULL)
349 | *plength = 0;
350 | else {
351 | // Retrive delivery callbacks for the producer
352 | delivery_reports* drs = (delivery_reports*)rd_kafka_opaque(rk);
353 |
354 | // Trigger the on_delivery function to produce the DR
355 | rd_kafka_poll(rk, 500);
356 |
357 | // Number of delivery callbacks requested
358 | // be careful, it is possible that we have sent n msgs, but
359 | // poll has triggered only mcounter, *plength);
362 |
363 | for (int i = 0; i < req_drs; i++)
364 | {
365 | msgid[i] = (unsigned long long) drs->drs[i]->msg_id;
366 | err[i] = (int)drs->drs[i]->err;
367 |
368 | // Free memory
369 | free(drs->drs[i]);
370 | drs->drs[i] = NULL;
371 | }
372 |
373 | // Check if the CB queue is empty
374 | if (*plength >= drs->counter) {
375 | *plength = drs->counter;
376 | drs->counter = 0;
377 | }
378 | // If not, move the queued messages at the beginning of the drs array and reset counters
379 | else {
380 | drs->counter = drs->counter - *plength;
381 | for (int i = 0; i < drs->counter; i++) {
382 | drs->drs[i] = drs->drs[i + *plength];
383 | drs->drs[i + *plength] = NULL;
384 | }
385 | }
386 | }
387 | return 0;
388 | }
389 |
390 | LIBRARY_API int DRMessageError(int* err, char* errtxt, int *plen)
391 | {
392 | size_t len = *plen;
393 | *errtxt = 0;
394 | rd_kafka_resp_err_t errorid = (rd_kafka_resp_err_t)*err;
395 | const char* DR_msgerror = rd_kafka_err2str(errorid);
396 | strncpy_s(errtxt, len, DR_msgerror, strlen(DR_msgerror));
397 | *plen =(int) strlen(DR_msgerror);
398 | return 0;
399 | }
400 |
401 |
402 | LIBRARY_API int NewTopicConf(void** topicconf)
403 | {
404 | *topicconf = (void*)rd_kafka_topic_conf_new();
405 | return 0;
406 | }
407 |
408 |
409 | LIBRARY_API int DelTopicConf(void* topicconf)
410 | {
411 | rd_kafka_topic_conf_destroy((rd_kafka_topic_conf_t*)topicconf);
412 |
413 | return 0;
414 | }
415 |
416 | LIBRARY_API int SetTopicConf(void* topicconf, char* key, char* val, char* errtxt, int *plen)
417 | {
418 | rd_kafka_conf_res_t res;
419 | size_t len = *plen;
420 | *errtxt = 0;
421 | res= rd_kafka_topic_conf_set((rd_kafka_topic_conf_t*)topicconf, key, val, errtxt, len);
422 | if (res == RD_KAFKA_CONF_OK)
423 | *plen = 0;
424 | else
425 | *plen = (int)strlen(errtxt);
426 |
427 | return (int)res;
428 | }
429 |
430 | void Add(char* buffer,const char* str, int* poff, int max)
431 | {
432 | int len =(int) strlen(str);
433 |
434 | if (buffer != NULL && max >= len + *poff)
435 | memcpy(buffer + *poff, str, len);
436 | *poff += len;
437 | }
438 |
439 | LIBRARY_API int32_t Describe(char* buffer, int32_t* psize)
440 | {
441 | int off = 0;
442 | int ret = 0;
443 | Add(buffer, "{", &off, *psize);
444 | Add(buffer, "\"Version\":\"0.1\",\"Patterns\":[", &off, *psize);
445 | Add(buffer, "\"I4 %P|Version >0T1 U4\",", &off, *psize);
446 | Add(buffer, "\"I4 %P|InitKafka >P\",", &off, *psize);
447 | Add(buffer, "\"I4 %P|UninitProducer P\",", &off, *psize);
448 | Add(buffer, "\"I4 %P|UninitConsumer P\",", &off, *psize);
449 | Add(buffer, "\"I4 %P|SetKafkaConf P <0T1 <0T1 >0T1 =I4\",", &off, *psize);
450 | Add(buffer, "\"I4 %P|NewTopicPartitionList >P\",", &off, *psize);
451 | Add(buffer, "\"I4 %P|SetTopicPartitionList P <0T1 I4\",", &off, *psize);
452 | Add(buffer, "\"I4 %P|SetOffset P <0T1 I4 I8\",", &off, *psize);
453 | Add(buffer, "\"I4 %P|SubscribeConsumerTPList P P >0T1 =I4\",", &off, *psize);
454 | Add(buffer, "\"I4 %P|Consume P >0T1 =U4 >0T1 =U4 >0T1 =U4 >U4 >I8 >0T1 =I4\",", &off, *psize);
455 | Add(buffer, "\"I4 %P|Commit P P\",", &off, *psize);
456 | Add(buffer, "\"I4 %P|Committed P P >I8\",", &off, *psize);
457 | Add(buffer, "\"I4 %P|Produce P <0T1 <0T1 U4 <0T1 U4 I4 >U8 >0T1 =I4\",", &off, *psize);
458 | Add(buffer, "\"I4 %P|DeliveryReport P >I8[] >I4[] =I4\",", &off, *psize);
459 | Add(buffer, "\"I4 %P|DRMessageError 0T1 =I4\"", &off, *psize);
460 | Add(buffer, "]", &off, *psize);
461 | Add(buffer, "}", &off, *psize);
462 |
463 | if (buffer != NULL && off < *psize)
464 | *(buffer + off) = 0;
465 | else
466 | ret = off + 1;
467 | *psize = off + 1;
468 | return ret;
469 | }
470 |
--------------------------------------------------------------------------------