├── .gitignore ├── pony-kafka ├── .gitignore ├── utils │ └── bool_converter │ │ └── bool_converter.pony ├── custombuffered │ ├── reader.pony │ ├── codecs │ │ ├── varint_encoder.pony │ │ ├── big_endian_encoder.pony │ │ ├── little_endian_encoder.pony │ │ ├── varint_decoder.pony │ │ ├── big_endian_decoder.pony │ │ └── little_endian_decoder.pony │ ├── iso_reader.pony │ ├── writer.pony │ ├── _test.pony │ └── val_reader.pony ├── compression │ ├── xxhash.pony │ ├── _test.pony │ ├── lz4.pony │ ├── snappy.pony │ └── zlib.pony ├── customnet │ ├── custom_tcp_connection.pony │ ├── tcp_connection_handler.pony │ ├── mock_tcp_connection_handler.pony │ └── custom_tcp_connection_notify.pony ├── fsm │ └── fsm.pony ├── customlogger │ └── logger.pony └── kafka_broker_connection.pony ├── misc └── kafka │ ├── start_zookeeper.sh │ ├── start_kafka_0.sh │ ├── start_kafka_1.sh │ ├── start_kafka_2.sh │ ├── cleanup_data_files.sh │ ├── create_replicate_topic.sh │ ├── download_kafka_java.sh │ ├── zookeeper.properties │ ├── kafka-server-0.properties │ ├── kafka-server-1.properties │ └── kafka-server-2.properties ├── LICENSE ├── README.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md └── examples └── simple └── main.pony /.gitignore: -------------------------------------------------------------------------------- 1 | pony-kafka1 2 | -------------------------------------------------------------------------------- /pony-kafka/.gitignore: -------------------------------------------------------------------------------- 1 | pony-kafka 2 | *.dSYM 3 | -------------------------------------------------------------------------------- /misc/kafka/start_zookeeper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | 5 | cd ~ 6 | 7 | kafka/bin/zookeeper-server-start.sh \ 8 | "${DIR}/zookeeper.properties" 9 | -------------------------------------------------------------------------------- /misc/kafka/start_kafka_0.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | 5 | cd ~ 6 | 7 | kafka/bin/kafka-server-start.sh \ 8 | "${DIR}/kafka-server-0.properties" 9 | 10 | -------------------------------------------------------------------------------- /misc/kafka/start_kafka_1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | 5 | cd ~ 6 | 7 | kafka/bin/kafka-server-start.sh \ 8 | "${DIR}/kafka-server-1.properties" 9 | 10 | -------------------------------------------------------------------------------- /misc/kafka/start_kafka_2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | 5 | cd ~ 6 | 7 | kafka/bin/kafka-server-start.sh \ 8 | "${DIR}/kafka-server-2.properties" 9 | 10 | -------------------------------------------------------------------------------- /pony-kafka/utils/bool_converter/bool_converter.pony: -------------------------------------------------------------------------------- 1 | primitive BoolConverter 2 | fun bool_to_u8(x: Bool): U8 => 3 | if x then 1 else 0 end 4 | 5 | fun u8_to_bool(x: U8): Bool => 6 | if x > 0 then true else false end 7 | -------------------------------------------------------------------------------- /misc/kafka/cleanup_data_files.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | 5 | sudo rm -r /data/zookeeper 6 | sudo rm -r /data/kafka-logs-0 7 | sudo rm -r /data/kafka-logs-1 8 | sudo rm -r /data/kafka-logs-2 9 | 10 | -------------------------------------------------------------------------------- /misc/kafka/create_replicate_topic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | 5 | cd ~ 6 | 7 | kafka/bin/kafka-topics.sh --create \ 8 | --zookeeper localhost:2181 \ 9 | --replication-factor 3 \ 10 | --partitions 15 \ 11 | --topic test 12 | 13 | -------------------------------------------------------------------------------- /misc/kafka/download_kafka_java.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd ~ 4 | 5 | sudo add-apt-repository -y ppa:webupd8team/java 6 | sudo apt-get -y update 7 | sudo sh -c 'echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections' 8 | sudo apt-get -y install oracle-java8-installer 9 | 10 | wget http://download.nextag.com/apache/kafka/0.10.2.1/kafka_2.11-0.10.2.1.tgz 11 | tar -xvzf kafka_2.11-0.10.2.1.tgz 12 | 13 | mv kafka_2.11-0.10.2.1 kafka 14 | 15 | sudo chmod 777 /data 16 | 17 | -------------------------------------------------------------------------------- /pony-kafka/custombuffered/reader.pony: -------------------------------------------------------------------------------- 1 | trait Reader 2 | fun size(): USize 3 | fun ref clear() 4 | fun ref skip(n: USize) ? 5 | fun ref block(len: USize): Array[U8] iso^ ? 6 | fun ref read_byte(): U8 ? 7 | fun ref read_bytes(len: USize): (Array[U8] val | Array[Array[U8] val] val | Array[U8] iso^ | Array[Array[U8] iso] iso^) ? 8 | fun ref read_contiguous_bytes(len: USize): (Array[U8] val | Array[U8] iso^) ? 9 | 10 | trait PeekableReader is Reader 11 | fun box peek_byte(offset: USize = 0): U8 ? 12 | fun box peek_bytes(len: USize, offset: USize = 0): 13 | (Array[U8] val | Array[Array[U8] val] val) ? 14 | 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2017, Sendence LLC 2 | 3 | NOTE: The majority of the project is under the Apache License, Version 2.0 as 4 | noted below but some files are BSD licensed. Every file has a license at the 5 | top to indicate how it is licensed. 6 | 7 | 8 | Licensed under the Apache License, Version 2.0 (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at 11 | 12 | http://www.apache.org/licenses/LICENSE-2.0 13 | 14 | Unless required by applicable law or agreed to in writing, software 15 | distributed under the License is distributed on an "AS IS" BASIS, 16 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | See the License for the specific language governing permissions and 18 | limitations under the License. 19 | -------------------------------------------------------------------------------- /misc/kafka/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir=/data/zookeeper 17 | # the port at which the clients will connect 18 | clientPort=2181 19 | # disable the per-ip limit on the number of connections since this is a non-production config 20 | maxClientCnxns=0 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Pony-Kafka 2 | 3 | Welcome to Pony Kafka. 4 | 5 | This is a pure kafka client written in Pony. The goal is to eventually reach feature parity with the official kafka client. 6 | 7 | # Why 8 | 9 | The main reason this exists is because the alternatives weren't necessarily going to be clean/easy to integrate into Pony and its normal "async everything" manner of working. 10 | 11 | # Building 12 | 13 | You need [ponyc](https://github.com/ponylang/ponyc) to compile `pony-kafka`. This is currently tested with `ponyc` version master. 14 | 15 | You also need the following (in addition to what is needed for Pony itself): 16 | 17 | * LZ4 18 | * Snappy 19 | * Zlib 20 | 21 | For Ubuntu 16.04 or newer you can run: 22 | 23 | ```bash 24 | sudo apt-get install libsnappy-dev liblz4-dev zlib1g-dev 25 | ``` 26 | 27 | For older Ubuntu you can run: 28 | 29 | ```bash 30 | sudo apt-get install libsnappy-dev 31 | cd /tmp 32 | wget -O liblz4-1.7.5.tar.gz https://github.com/lz4/lz4/archive/v1.7.5.tar.gz 33 | tar zxvf liblz4-1.7.5.tar.gz 34 | cd lz4-1.7.5 35 | sudo make install 36 | ``` 37 | 38 | For OSX you can run: 39 | 40 | ```bash 41 | brew install snappy lz4 42 | ``` 43 | 44 | You can then build the pony-kafka tests by running: 45 | 46 | ```bash 47 | ponyc pony-kafka 48 | ``` 49 | 50 | or the example performance application by running: 51 | 52 | ```bash 53 | ponyc examples/performance 54 | ``` 55 | 56 | # Current status 57 | 58 | This is currently alpha quality software that still needs more work before it is production ready. 59 | 60 | A quick summary of features: 61 | 62 | Feature | Description | Status 63 | --- | --- | --- 64 | Basic Consumer | Ability to connect to kafka brokers and consume messages | Implemented 65 | Group Consumer | Ability to do high level consumer failover like official kafka client | Not Implemented 66 | Producer | Ability to connect to kafka brokers and produce messages | Implemented 67 | Leader Failover | Ability to correctly recover from/react to kafka leader failover | Partially Implemented 68 | Compression | Ability to use LZ4/Snappy/Zlib compression for message sets | Implemented 69 | Message Format V2 | Ability to use message set format version 2 | Not Implemented 70 | Idempotence/Transaction | Ability to use idempotence/transactions | Not Implemented 71 | Metrics | Ability to collect metrics and provide reports of metrics periodically | Not Implemented 72 | Security | Ability to use SSL/SASL/etc to secure connection to Kafka brokers | Not Implemented 73 | Message Interceptors | Ability to intercept messages before produce and after consume to be able to modify them or extract metadata from them for monitoring and other purposes | Not Implemented 74 | Producer Batching | Ability to batch produce requests for efficiency | Implemented 75 | Producer Rate Limiting | Ability to limit number of outstanding produce requests | Implemented 76 | Throttling | Ability to tell producers of data to slow down due to network congestion | Implemented 77 | Message Delivery Reports | Report back to producers once Kafka has confirm message has been successfully stored | Implemented 78 | Logging | Logging of what is happening/errors | Partially Implemented 79 | Error Handling | Ability to gracefully handle errors (retry if possible; fail fast if not) | Partially Implemented 80 | Documentation | Comprehensive documentation for developers using Pony Kafka and developers enhancing Pony Kafka | Not Implemented 81 | Testing | Comprehensive test suite to confirm everything is working as expected | Not Implemented 82 | -------------------------------------------------------------------------------- /pony-kafka/custombuffered/codecs/varint_encoder.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | use ".." 31 | use "../../utils/bool_converter" 32 | 33 | primitive VarIntEncoder 34 | fun u8(wb: Writer, data: U8) => 35 | """ 36 | Write a byte to the buffer in base 128 varint encoding.. 37 | """ 38 | _encode_varint(wb, data.u64()) 39 | 40 | fun i8(wb: Writer, data: I8) => 41 | """ 42 | Write a i8 to the buffer in base 128 varint encoding. 43 | """ 44 | _encode_svarint(wb, data.i64()) 45 | 46 | fun bool(wb: Writer, data: Bool) => 47 | """ 48 | Write a Bool to the buffer in base 128 varint encoding. 49 | """ 50 | _encode_varint(wb, BoolConverter.bool_to_u8(data).u64()) 51 | 52 | fun u16(wb: Writer, data: U16) => 53 | """ 54 | Write a U16 to the buffer in base 128 varint encoding. 55 | """ 56 | _encode_varint(wb, data.u64()) 57 | 58 | fun i16(wb: Writer, data: I16) => 59 | """ 60 | Write an I16 to the buffer in zig zag base 128 varint encoding. 61 | """ 62 | _encode_svarint(wb, data.i64()) 63 | 64 | fun u32(wb: Writer, data: U32) => 65 | """ 66 | Write a U32 to the buffer in base 128 varint encoding. 67 | """ 68 | _encode_varint(wb, data.u64()) 69 | 70 | fun i32(wb: Writer, data: I32) => 71 | """ 72 | Write an I32 to the buffer in zig zag base 128 varint encoding. 73 | """ 74 | _encode_svarint(wb, data.i64()) 75 | 76 | fun u64(wb: Writer, data: U64) => 77 | """ 78 | Write a U64 to the buffer in base 128 varint. 79 | """ 80 | _encode_varint(wb, data.u64()) 81 | 82 | fun i64(wb: Writer, data: I64) => 83 | """ 84 | Write an I64 to the buffer in zig zag base 128 varint encoding. 85 | """ 86 | _encode_svarint(wb, data.i64()) 87 | 88 | fun _encode_svarint(wb: Writer, data: I64) => 89 | _encode_varint(wb, ((data << 1) xor (data >> 63)).u64()) 90 | 91 | fun _encode_varint(wb: Writer, data: U64) => 92 | var d = data 93 | repeat 94 | wb.write_byte((d.u8() and 0x7f) or (if (d > 0x7f) then 0x80 else 0 end)) 95 | d = d >> 7 96 | until (d == 0) end 97 | 98 | -------------------------------------------------------------------------------- /pony-kafka/compression/xxhash.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | 15 | */ 16 | 17 | // based on https://github.com/Cyan4973/xxHash/blob/dev/xxhash.c#L263 18 | 19 | primitive XXHash 20 | // TODO: hash64 21 | // TODO: should null terminator for strings be included? to match behavior of 22 | // C string hashing? 23 | fun hash32(buffer: (String | Array[U8] box), seed: U32, buffer_offset: USize = 0, num_bytes: USize = -1): 24 | U32 ? 25 | => 26 | match buffer 27 | | let b: String => _hash32(b.array(), seed, buffer_offset, num_bytes)? 28 | | let b: Array[U8] box => _hash32(b, seed, buffer_offset, num_bytes)? 29 | end 30 | 31 | fun _hash32(buffer: Array[U8] box, seed: U32 = 0, buffer_offset: USize = 0, 32 | num_bytes: USize = -1): U32 ? 33 | => 34 | var h32: U32 = 0 35 | var offset: USize = buffer_offset 36 | var size: USize = buffer.size().min(num_bytes) 37 | var last: USize = size + offset 38 | 39 | if size > 16 then 40 | let limit = last - 16 41 | var v1: U32 = seed + prime32_1() + prime32_2() 42 | var v2: U32 = seed + prime32_2() 43 | var v3: U32 = seed + 0 44 | var v4: U32 = seed - prime32_1() 45 | 46 | repeat 47 | v1 = round32(v1, read32(buffer, offset)?) 48 | offset = offset + 4 49 | v2 = round32(v2, read32(buffer, offset)?) 50 | offset = offset + 4 51 | v3 = round32(v3, read32(buffer, offset)?) 52 | offset = offset + 4 53 | v4 = round32(v4, read32(buffer, offset)?) 54 | offset = offset + 4 55 | until offset > limit end 56 | 57 | h32 = rotl32(v1, 1) + rotl32(v2, 7) + rotl32(v3, 12) + rotl32(v4, 18) 58 | 59 | else 60 | h32 = seed + prime32_5() 61 | end 62 | 63 | h32 = h32 + size.u32() 64 | 65 | while (offset + 4) <= last do 66 | h32 = h32 + (read32(buffer, offset)? * prime32_3()) 67 | h32 = rotl32(h32, 17) * prime32_4() 68 | offset = offset + 4 69 | end 70 | 71 | while offset < last do 72 | h32 = h32 + (buffer(offset)?.u32() * prime32_5()) 73 | h32 = rotl32(h32, 11) * prime32_1() 74 | offset = offset + 1 75 | end 76 | 77 | h32 = h32 xor (h32 >> 15) 78 | h32 = h32 * prime32_2() 79 | h32 = h32 xor (h32 >> 13) 80 | h32 = h32 * prime32_3() 81 | h32 = h32 xor (h32 >> 16) 82 | 83 | h32 84 | 85 | fun read32(buffer: Array[U8] box, offset: USize): U32 ? => 86 | // TODO: figure out some way of detecting endianness; big endian needs byte 87 | // swapping 88 | (buffer(offset + 3)?.u32() << 24) or (buffer(offset + 2)?.u32() << 16) or 89 | (buffer(offset + 1)?.u32() << 8) or buffer(offset + 0)?.u32() 90 | 91 | fun round32(seed: U32, value: U32): U32 => 92 | var x = seed + (value * prime32_2()) 93 | rotl32(x, 13) 94 | x * prime32_1() 95 | 96 | fun rotl32(x: U32, r: U32): U32 => 97 | ((x << r) or (x >> (32 - r))) 98 | 99 | fun prime32_1(): U32 => 2654435761 100 | 101 | fun prime32_2(): U32 => 2246822519 102 | 103 | fun prime32_3(): U32 => 3266489917 104 | 105 | fun prime32_4(): U32 => 668265263 106 | 107 | fun prime32_5(): U32 => 374761393 108 | -------------------------------------------------------------------------------- /pony-kafka/customnet/custom_tcp_connection.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | use "net" 31 | 32 | trait CustomTCPConnection 33 | fun ref get_handler(): TCPConnectionHandler 34 | 35 | be write(data: ByteSeq) => 36 | """ 37 | Write a single sequence of bytes. 38 | """ 39 | get_handler().write(data) 40 | 41 | be queue(data: ByteSeq) => 42 | """ 43 | Queue a single sequence of bytes on linux. 44 | Do nothing on windows. 45 | """ 46 | get_handler().queue(data) 47 | 48 | be writev(data: ByteSeqIter) => 49 | """ 50 | Write a sequence of sequences of bytes. 51 | """ 52 | get_handler().writev(data) 53 | 54 | be queuev(data: ByteSeqIter) => 55 | """ 56 | Queue a sequence of sequences of bytes on linux. 57 | Do nothing on windows. 58 | """ 59 | get_handler().queuev(data) 60 | 61 | be send_queue() => 62 | """ 63 | Write pending queue to network on linux. 64 | Do nothing on windows. 65 | """ 66 | get_handler().send_queue() 67 | 68 | be mute(d: Any tag) => 69 | """ 70 | Temporarily suspend reading off this TCPConnection until such time as 71 | `unmute` is called. 72 | """ 73 | get_handler().mute(d) 74 | 75 | be unmute(d: Any tag) => 76 | """ 77 | Start reading off this TCPConnection again after having been muted. 78 | """ 79 | get_handler().unmute(d) 80 | 81 | be set_notify(notify: CustomTCPConnectionNotify iso) => 82 | """ 83 | Change the notifier. 84 | """ 85 | get_handler().set_notify(consume notify) 86 | 87 | be dispose() => 88 | """ 89 | Close the connection gracefully once all writes are sent. 90 | """ 91 | get_handler().dispose() 92 | 93 | be _event_notify(event: AsioEventID, flags: U32, arg: U32) => 94 | """ 95 | Handle socket events. 96 | """ 97 | get_handler().event_notify(event, flags, arg) 98 | 99 | be _read_again() => 100 | """ 101 | Resume reading. 102 | """ 103 | get_handler().pending_reads() 104 | 105 | fun ref expect(qty: USize = 0) => 106 | """ 107 | A `received` call on the notifier must contain exactly `qty` bytes. If 108 | `qty` is zero, the call can contain any amount of data. This has no effect 109 | if called in the `sent` notifier callback. 110 | """ 111 | get_handler().expect(qty) 112 | 113 | be reconnect() => 114 | get_handler().reconnect() 115 | 116 | -------------------------------------------------------------------------------- /pony-kafka/customnet/tcp_connection_handler.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | use "net" 31 | 32 | trait TCPConnectionHandler 33 | 34 | fun ref write(data: ByteSeq) 35 | """ 36 | Write a single sequence of bytes. 37 | """ 38 | 39 | fun ref queue(data: ByteSeq) 40 | """ 41 | Queue a single sequence of bytes on linux. 42 | Do nothing on windows. 43 | """ 44 | 45 | fun ref writev(data: ByteSeqIter) 46 | """ 47 | Write a sequence of sequences of bytes. 48 | """ 49 | 50 | fun ref queuev(data: ByteSeqIter) 51 | """ 52 | Queue a sequence of sequences of bytes on linux. 53 | Do nothing on windows. 54 | """ 55 | 56 | fun ref send_queue() 57 | """ 58 | Write pending queue to network on linux. 59 | Do nothing on windows. 60 | """ 61 | 62 | fun ref mute(d: Any tag) 63 | """ 64 | Temporarily suspend reading off this TCPConnection until such time as 65 | `unmute` is called. 66 | """ 67 | 68 | fun ref unmute(d: Any tag) 69 | """ 70 | Start reading off this TCPConnection again after having been muted. 71 | """ 72 | 73 | fun ref set_notify(notify: CustomTCPConnectionNotify iso) 74 | """ 75 | Change the notifier. 76 | """ 77 | 78 | fun ref dispose() 79 | """ 80 | Close the connection gracefully once all writes are sent. 81 | """ 82 | 83 | fun ref event_notify(event: AsioEventID, flags: U32, arg: U32) 84 | """ 85 | Handle socket events. 86 | """ 87 | 88 | fun ref pending_reads() 89 | """ 90 | Unless this connection is currently muted, read while data is available, 91 | guessing the next packet length as we go. If we read 4 kb of data, send 92 | ourself a resume message and stop reading, to avoid starving other actors. 93 | """ 94 | 95 | fun ref expect(qty: USize = 0) 96 | """ 97 | A `received` call on the notifier must contain exactly `qty` bytes. If 98 | `qty` is zero, the call can contain any amount of data. This has no effect 99 | if called in the `sent` notifier callback. 100 | """ 101 | 102 | fun ref reconnect() 103 | """ 104 | A `reconnect` call to ask the handler to handle a reconnection request. 105 | """ 106 | 107 | fun local_address(): NetAddress 108 | """ 109 | Return the local IP address. 110 | """ 111 | 112 | fun remote_address(): NetAddress 113 | """ 114 | Return the remote IP address. 115 | """ 116 | 117 | fun requested_address(): (String, String) 118 | """ 119 | Return the host and service that were originally provided to the 120 | @pony_os_listen_tcp method. 121 | """ 122 | -------------------------------------------------------------------------------- /pony-kafka/custombuffered/codecs/big_endian_encoder.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | use ".." 31 | use "../../utils/bool_converter" 32 | 33 | primitive BigEndianEncoder 34 | fun u8(wb: Writer, data: U8) => 35 | """ 36 | Write a byte to the buffer. 37 | """ 38 | wb.write_byte(data) 39 | 40 | fun i8(wb: Writer, data: I8) => 41 | """ 42 | Write an i8 to the buffer. 43 | """ 44 | u8(wb, data.u8()) 45 | 46 | fun bool(wb: Writer, data: Bool) => 47 | """ 48 | Write a Bool to the buffer. 49 | """ 50 | u8(wb, BoolConverter.bool_to_u8(data)) 51 | 52 | fun u16(wb: Writer, data: U16) => 53 | """ 54 | Write a U16 to the buffer in big-endian byte order. 55 | """ 56 | wb.write_two_bytes((data >> 8).u8(), data.u8()) 57 | 58 | fun i16(wb: Writer, data: I16) => 59 | """ 60 | Write an I16 to the buffer in big-endian byte order. 61 | """ 62 | u16(wb, data.u16()) 63 | 64 | fun u32(wb: Writer, data: U32) => 65 | """ 66 | Write a U32 to the buffer in big-endian byte order. 67 | """ 68 | wb.write_four_bytes((data >> 24).u8(), (data >> 16).u8(), 69 | (data >> 8).u8(), data.u8()) 70 | 71 | fun i32(wb: Writer, data: I32) => 72 | """ 73 | Write an I32 to the buffer in big-endian byte order. 74 | """ 75 | u32(wb, data.u32()) 76 | 77 | fun f32(wb: Writer, data: F32) => 78 | """ 79 | Write an F32 to the buffer in big-endian byte order. 80 | """ 81 | u32(wb, data.bits()) 82 | 83 | fun u64(wb: Writer, data: U64) => 84 | """ 85 | Write a U64 to the buffer in big-endian byte order. 86 | """ 87 | wb.write_eight_bytes((data >> 56).u8(), (data >> 48).u8(), 88 | (data >> 40).u8(), (data >> 32).u8(), (data >> 24).u8(), 89 | (data >> 16).u8(), (data >> 8).u8(), data.u8()) 90 | 91 | fun i64(wb: Writer, data: I64) => 92 | """ 93 | Write an I64 to the buffer in big-endian byte order. 94 | """ 95 | u64(wb, data.u64()) 96 | 97 | fun f64(wb: Writer, data: F64) => 98 | """ 99 | Write an F64 to the buffer in big-endian byte order. 100 | """ 101 | u64(wb, data.bits()) 102 | 103 | fun u128(wb: Writer, data: U128) => 104 | """ 105 | Write a U128 to the buffer in big-endian byte order. 106 | """ 107 | wb.write_sixteen_bytes((data >> 120).u8(), (data >> 112).u8(), 108 | (data >> 104).u8(), (data >> 96).u8(), (data >> 88).u8(), 109 | (data >> 80).u8(), (data >> 72).u8(), (data >> 64).u8(), 110 | (data >> 56).u8(), (data >> 48).u8(), (data >> 40).u8(), 111 | (data >> 32).u8(), (data >> 24).u8(), (data >> 16).u8(), 112 | (data >> 8).u8(), data.u8()) 113 | 114 | fun i128(wb: Writer, data: I128) => 115 | """ 116 | Write an I128 to the buffer in big-endian byte order. 117 | """ 118 | u128(wb, data.u128()) 119 | -------------------------------------------------------------------------------- /pony-kafka/custombuffered/codecs/little_endian_encoder.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | use ".." 31 | use "../../utils/bool_converter" 32 | 33 | primitive LittleEndianEncoder 34 | fun u8(wb: Writer, data: U8) => 35 | """ 36 | Write a byte to the buffer. 37 | """ 38 | wb.write_byte(data) 39 | 40 | fun i8(wb: Writer, data: I8) => 41 | """ 42 | Write an i8 to the buffer. 43 | """ 44 | u8(wb, data.u8()) 45 | 46 | fun bool(wb: Writer, data: Bool) => 47 | """ 48 | Write a Bool to the buffer. 49 | """ 50 | u8(wb, BoolConverter.bool_to_u8(data)) 51 | 52 | fun u16(wb: Writer, data: U16) => 53 | """ 54 | Write a U16 to the buffer in little-endian byte order. 55 | """ 56 | wb.write_two_bytes(data.u8(), (data >> 8).u8()) 57 | 58 | fun i16(wb: Writer, data: I16) => 59 | """ 60 | Write an I16 to the buffer in little-endian byte order. 61 | """ 62 | u16(wb, data.u16()) 63 | 64 | fun u32(wb: Writer, data: U32) => 65 | """ 66 | Write a U32 to the buffer in little-endian byte order. 67 | """ 68 | wb.write_four_bytes(data.u8(), (data >> 8).u8(), (data >> 16).u8(), 69 | (data >> 24).u8()) 70 | 71 | fun i32(wb: Writer, data: I32) => 72 | """ 73 | Write an I32 to the buffer in little-endian byte order. 74 | """ 75 | u32(wb, data.u32()) 76 | 77 | fun f32(wb: Writer, data: F32) => 78 | """ 79 | Write an F32 to the buffer in little-endian byte order. 80 | """ 81 | u32(wb, data.bits()) 82 | 83 | fun u64(wb: Writer, data: U64) => 84 | """ 85 | Write a U64 to the buffer in little-endian byte order. 86 | """ 87 | wb.write_eight_bytes(data.u8(), (data >> 8).u8(), (data >> 16).u8(), 88 | (data >> 24).u8(), (data >> 32).u8(), (data >> 40).u8(), 89 | (data >> 48).u8(), (data >> 56).u8()) 90 | 91 | fun i64(wb: Writer, data: I64) => 92 | """ 93 | Write an I64 to the buffer in little-endian byte order. 94 | """ 95 | u64(wb, data.u64()) 96 | 97 | fun f64(wb: Writer, data: F64) => 98 | """ 99 | Write an F64 to the buffer in little-endian byte order. 100 | """ 101 | u64(wb, data.bits()) 102 | 103 | fun u128(wb: Writer, data: U128) => 104 | """ 105 | Write a U128 to the buffer in little-endian byte order. 106 | """ 107 | wb.write_sixteen_bytes(data.u8(), (data >> 8).u8(), (data >> 16).u8(), 108 | (data >> 24).u8(), (data >> 32).u8(), (data >> 40).u8(), 109 | (data >> 48).u8(), (data >> 56).u8(), (data >> 64).u8(), 110 | (data >> 72).u8(), (data >> 80).u8(), (data >> 88).u8(), 111 | (data >> 96).u8(), (data >> 104).u8(), (data >> 112).u8(), 112 | (data >> 120).u8()) 113 | 114 | fun i128(wb: Writer, data: I128) => 115 | """ 116 | Write an I128 to the buffer in little-endian byte order. 117 | """ 118 | u128(wb, data.u128()) 119 | -------------------------------------------------------------------------------- /pony-kafka/customnet/mock_tcp_connection_handler.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | use "net" 31 | 32 | class MockTCPConnectionHandler is TCPConnectionHandler 33 | new create() 34 | => 35 | None 36 | 37 | fun ref write(data: ByteSeq) => 38 | """ 39 | Write a single sequence of bytes. 40 | """ 41 | None 42 | 43 | fun ref queue(data: ByteSeq) => 44 | """ 45 | Queue a single sequence of bytes on linux. 46 | Do nothing on windows. 47 | """ 48 | None 49 | 50 | fun ref writev(data: ByteSeqIter) => 51 | """ 52 | Write a sequence of sequences of bytes. 53 | """ 54 | None 55 | 56 | fun ref queuev(data: ByteSeqIter) => 57 | """ 58 | Queue a sequence of sequences of bytes on linux. 59 | Do nothing on windows. 60 | """ 61 | None 62 | 63 | fun ref send_queue() => 64 | """ 65 | Write pending queue to network on linux. 66 | Do nothing on windows. 67 | """ 68 | None 69 | 70 | fun ref mute(d: Any tag) => 71 | """ 72 | Temporarily suspend reading off this TCPConnection until such time as 73 | `unmute` is called. 74 | """ 75 | None 76 | 77 | fun ref unmute(d: Any tag) => 78 | """ 79 | Start reading off this TCPConnection again after having been muted. 80 | """ 81 | None 82 | 83 | fun ref set_notify(notify: CustomTCPConnectionNotify iso) => 84 | """ 85 | Change the notifier. 86 | """ 87 | None 88 | 89 | fun ref dispose() => 90 | """ 91 | Close the connection gracefully once all writes are sent. 92 | """ 93 | None 94 | 95 | fun ref event_notify(event: AsioEventID, flags: U32, arg: U32) => 96 | """ 97 | Handle socket events. 98 | """ 99 | None 100 | 101 | fun ref pending_reads() => 102 | """ 103 | Unless this connection is currently muted, read while data is available, 104 | guessing the next packet length as we go. If we read 4 kb of data, send 105 | ourself a resume message and stop reading, to avoid starving other actors. 106 | """ 107 | None 108 | 109 | fun ref expect(qty: USize = 0) => 110 | """ 111 | A `received` call on the notifier must contain exactly `qty` bytes. If 112 | `qty` is zero, the call can contain any amount of data. This has no effect 113 | if called in the `sent` notifier callback. 114 | """ 115 | None 116 | 117 | fun ref reconnect() => 118 | None 119 | 120 | fun local_address(): NetAddress => 121 | """ 122 | Return the local IP address. 123 | """ 124 | NetAddress 125 | 126 | fun remote_address(): NetAddress => 127 | """ 128 | Return the remote IP address. 129 | """ 130 | NetAddress 131 | 132 | fun requested_address(): (String, String) => 133 | """ 134 | Return the host and service that were originally provided to the 135 | @pony_os_listen_tcp method. 136 | """ 137 | ("", "") 138 | -------------------------------------------------------------------------------- /pony-kafka/compression/_test.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | 15 | */ 16 | 17 | use "ponytest" 18 | use "../customlogger" 19 | 20 | actor Main is TestList 21 | new create(env: Env) => PonyTest(env, this) 22 | new make() => None 23 | 24 | fun tag tests(test: PonyTest) => 25 | test(_TestXXHash) 26 | test(_TestZlib) 27 | test(_TestLZ4) 28 | test(_TestSnappy) 29 | 30 | 31 | class iso _TestXXHash is UnitTest 32 | """ 33 | Test XXHash encoding. 34 | """ 35 | fun name(): String => "compression/XXHash" 36 | 37 | fun apply(h: TestHelper) ? => 38 | let d = [as U8: 1; 2; 6; 10; 42; 'H'; 'e'; 'l'; 'l'; 'o'; ','; ' '; 'w' 39 | 'o'; 'r'; 'l'; 'd'; '!'; 0; 2; 56; 99] 40 | h.assert_eq[U32](0x02cc5d05, XXHash.hash32(Array[U8], 0)?) 41 | h.assert_eq[U32](0xe0fe705f, XXHash.hash32([as U8: 42], 0)?) 42 | h.assert_eq[U32](0x9e5e7e93, XXHash.hash32([as U8: 'H'; 'e'; 'l'; 'l'; 'o' 43 | ','; ' '; 'w'; 'o'; 'r'; 'l'; 'd'; '!'; 0], 0)?) 44 | h.assert_eq[U32](0xd6bf8459, XXHash.hash32(Array[U8], 0x42c91977)?) 45 | h.assert_eq[U32](0x02cc5d05, XXHash.hash32(d, 0, 4, 0)?) 46 | h.assert_eq[U32](0xe0fe705f, XXHash.hash32(d, 0, 4, 1)?) 47 | h.assert_eq[U32](0x9e5e7e93, XXHash.hash32(d, 0, 5, 14)?) 48 | 49 | class iso _TestZlib is UnitTest 50 | """ 51 | Test zlib compression/decompression. 52 | """ 53 | fun name(): String => "compression/Zlib" 54 | 55 | fun apply(h: TestHelper) ? => 56 | let b = recover Array[U8](10000) end 57 | b.undefined(b.space()) 58 | let b': Array[U8] val = consume b 59 | 60 | let logger = StringLogger(Warn, h.env.out) 61 | 62 | let compressed_data = ZlibCompressor.compress(logger, b')? 63 | 64 | let a = ZlibDecompressor.decompress(logger, consume val compressed_data)? 65 | let a': Array[U8] val = consume val a 66 | 67 | h.assert_eq[USize](b'.size(), a'.size()) 68 | 69 | for (i, v) in b'.pairs() do 70 | h.assert_eq[U8](v, a'(i)?) 71 | end 72 | 73 | class iso _TestLZ4 is UnitTest 74 | """ 75 | Test lz4 compression/decompression. 76 | """ 77 | fun name(): String => "compression/LZ4" 78 | 79 | fun apply(h: TestHelper) ? => 80 | let b = recover Array[U8](10000) end 81 | b.undefined(b.space()) 82 | let b': Array[U8] val = consume b 83 | 84 | let logger = StringLogger(Warn, h.env.out) 85 | 86 | let compressed_data = LZ4Compressor.compress(logger, b')? 87 | 88 | let a = LZ4Decompressor.decompress(logger, consume val compressed_data)? 89 | let a': Array[U8] val = consume val a 90 | 91 | h.assert_eq[USize](b'.size(), a'.size()) 92 | 93 | for (i, v) in b'.pairs() do 94 | h.assert_eq[U8](v, a'(i)?) 95 | end 96 | 97 | class iso _TestSnappy is UnitTest 98 | """ 99 | Test snappy compression/decompression. 100 | """ 101 | fun name(): String => "compression/Snappy" 102 | 103 | fun apply(h: TestHelper) ? => 104 | let b = recover Array[U8](10000) end 105 | b.undefined(b.space()) 106 | let b': Array[U8] val = consume b 107 | 108 | let logger = StringLogger(Warn, h.env.out) 109 | 110 | let compressed_data = SnappyCompressor.compress(logger, b')? 111 | 112 | let a = SnappyDecompressor.decompress(logger, consume val compressed_data)? 113 | let a': Array[U8] val = consume val a 114 | 115 | h.assert_eq[USize](b'.size(), a'.size()) 116 | 117 | for (i, v) in b'.pairs() do 118 | h.assert_eq[U8](v, a'(i)?) 119 | end 120 | 121 | let compressed_data' = SnappyCompressor.compress_java(logger, b')? 122 | 123 | let z = SnappyDecompressor.decompress_java(logger, 124 | consume val compressed_data')? 125 | let z': Array[U8] val = consume val z 126 | 127 | h.assert_eq[USize](b'.size(), z'.size()) 128 | 129 | for (i, v) in b'.pairs() do 130 | h.assert_eq[U8](v, z'(i)?) 131 | end 132 | 133 | -------------------------------------------------------------------------------- /pony-kafka/customnet/custom_tcp_connection_notify.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | interface CustomTCPConnectionNotify 31 | """ 32 | Notifications for TCP connections. 33 | 34 | For an example of using this class please see the documentation for the 35 | `TCPConnection` and `TCPListener` actors. 36 | """ 37 | fun ref accepted(conn: CustomTCPConnection ref) => 38 | """ 39 | Called when a TCPConnection is accepted by a TCPListener. 40 | """ 41 | None 42 | 43 | fun ref before_reconnecting(conn: CustomTCPConnection ref) => 44 | """ 45 | Called right before a reconnection attempt is made. The reconnection 46 | attempt will cause either `connecting` or `connect_failed` to get called 47 | as appropriate. 48 | """ 49 | None 50 | 51 | fun ref connecting(conn: CustomTCPConnection ref, count: U32) => 52 | """ 53 | Called if name resolution succeeded for a TCPConnection and we are now 54 | waiting for a connection to the server to succeed. The count is the number 55 | of connections we're trying. The notifier will be informed each time the 56 | count changes, until a connection is made or connect_failed() is called. 57 | """ 58 | None 59 | 60 | fun ref connected(conn: CustomTCPConnection ref) => 61 | """ 62 | Called when we have successfully connected to the server. 63 | """ 64 | None 65 | 66 | fun ref connect_failed(conn: CustomTCPConnection ref) => 67 | """ 68 | Called when we have failed to connect to all possible addresses for the 69 | server. At this point, the connection will never be established. 70 | """ 71 | None 72 | 73 | fun ref auth_failed(conn: CustomTCPConnection ref) => 74 | """ 75 | A raw TCPConnection has no authentication mechanism. However, when 76 | protocols are wrapped in other protocols, this can be used to report an 77 | authentication failure in a lower level protocol (e.g. SSL). 78 | """ 79 | None 80 | 81 | fun ref sent(conn: CustomTCPConnection ref, data: ByteSeq): ByteSeq => 82 | """ 83 | Called when data is sent on the connection. This gives the notifier an 84 | opportunity to modify sent data before it is written. To swallow data, 85 | return an empty string. 86 | """ 87 | data 88 | 89 | fun ref sentv(conn: CustomTCPConnection ref, data: ByteSeqIter): ByteSeqIter 90 | => 91 | """ 92 | Called when multiple chunks of data are sent to the connection in a single 93 | call. This gives the notifier an opportunity to modify the sent data chunks 94 | before they are written. To swallow the send, return an empty 95 | Array[String]. 96 | """ 97 | data 98 | 99 | fun ref received(conn: CustomTCPConnection ref, data: Array[U8] iso, 100 | times: USize): Bool 101 | => 102 | """ 103 | Called when new data is received on the connection. Return true if you 104 | want to continue receiving messages without yielding until you read 105 | max_size on the TCPConnection. Return false to cause the TCPConnection 106 | to yield now. 107 | 108 | Includes the number of times during the current behavior, that received has 109 | been called. This allows the notifier to end reads on a regular basis. 110 | """ 111 | true 112 | 113 | fun ref expect(conn: CustomTCPConnection ref, qty: USize): USize => 114 | """ 115 | Called when the connection has been told to expect a certain quantity of 116 | bytes. This allows nested notifiers to change the expected quantity, which 117 | allows a lower level protocol to handle any framing (e.g. SSL). 118 | """ 119 | qty 120 | 121 | fun ref closed(conn: CustomTCPConnection ref) => 122 | """ 123 | Called when the connection is closed. 124 | """ 125 | None 126 | 127 | fun ref throttled(conn: CustomTCPConnection ref) => 128 | """ 129 | Called when the connection starts experiencing TCP backpressure. You should 130 | respond to this by pausing additional calls to `write` and `writev` until 131 | you are informed that pressure has been released. Failure to respond to 132 | the `throttled` notification will result in outgoing data queuing in the 133 | connection and increasing memory usage. 134 | """ 135 | None 136 | 137 | fun ref unthrottled(conn: CustomTCPConnection ref) => 138 | """ 139 | Called when the connection stops experiencing TCP backpressure. Upon 140 | receiving this notification, you should feel free to start making calls to 141 | `write` and `writev` again. 142 | """ 143 | None 144 | 145 | fun ref dispose() => 146 | """ 147 | Called when the parent actor's dispose is called. 148 | """ 149 | None 150 | 151 | -------------------------------------------------------------------------------- /misc/kafka/kafka-server-0.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # see kafka.server.KafkaConfig for additional details and defaults 17 | 18 | ############################# Server Basics ############################# 19 | 20 | # The id of the broker. This must be set to a unique integer for each broker. 21 | broker.id=0 22 | port=9092 23 | 24 | # Switch to enable topic deletion or not, default value is false 25 | #delete.topic.enable=true 26 | 27 | ############################# Socket Server Settings ############################# 28 | 29 | # The address the socket server listens on. It will get the value returned from 30 | # java.net.InetAddress.getCanonicalHostName() if not configured. 31 | # FORMAT: 32 | # listeners = listener_name://host_name:port 33 | # EXAMPLE: 34 | # listeners = PLAINTEXT://your.host.name:9092 35 | #listeners=PLAINTEXT://:9092 36 | 37 | # Hostname and port the broker will advertise to producers and consumers. If not set, 38 | # it uses the value for "listeners" if configured. Otherwise, it will use the value 39 | # returned from java.net.InetAddress.getCanonicalHostName(). 40 | #advertised.listeners=PLAINTEXT://your.host.name:9092 41 | 42 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details 43 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL 44 | 45 | # The number of threads handling network requests 46 | num.network.threads=3 47 | 48 | # The number of threads doing disk I/O 49 | num.io.threads=8 50 | 51 | # The send buffer (SO_SNDBUF) used by the socket server 52 | socket.send.buffer.bytes=102400 53 | 54 | # The receive buffer (SO_RCVBUF) used by the socket server 55 | socket.receive.buffer.bytes=102400 56 | 57 | # The maximum size of a request that the socket server will accept (protection against OOM) 58 | socket.request.max.bytes=104857600 59 | 60 | 61 | ############################# Log Basics ############################# 62 | 63 | # A comma seperated list of directories under which to store log files 64 | log.dirs=/data/kafka-logs-0 65 | 66 | # The default number of log partitions per topic. More partitions allow greater 67 | # parallelism for consumption, but this will also result in more files across 68 | # the brokers. 69 | num.partitions=1 70 | 71 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. 72 | # This value is recommended to be increased for installations with data dirs located in RAID array. 73 | num.recovery.threads.per.data.dir=1 74 | 75 | ############################# Log Flush Policy ############################# 76 | 77 | # Messages are immediately written to the filesystem but by default we only fsync() to sync 78 | # the OS cache lazily. The following configurations control the flush of data to disk. 79 | # There are a few important trade-offs here: 80 | # 1. Durability: Unflushed data may be lost if you are not using replication. 81 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. 82 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 83 | # The settings below allow one to configure the flush policy to flush data after a period of time or 84 | # every N messages (or both). This can be done globally and overridden on a per-topic basis. 85 | 86 | # The number of messages to accept before forcing a flush of data to disk 87 | #log.flush.interval.messages=10000 88 | 89 | # The maximum amount of time a message can sit in a log before we force a flush 90 | #log.flush.interval.ms=1000 91 | 92 | ############################# Log Retention Policy ############################# 93 | 94 | # The following configurations control the disposal of log segments. The policy can 95 | # be set to delete segments after a period of time, or after a given size has accumulated. 96 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens 97 | # from the end of the log. 98 | 99 | # The minimum age of a log file to be eligible for deletion due to age 100 | log.retention.hours=168 101 | 102 | # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining 103 | # segments don't drop below log.retention.bytes. Functions independently of log.retention.hours. 104 | #log.retention.bytes=1073741824 105 | 106 | # The maximum size of a log segment file. When this size is reached a new log segment will be created. 107 | log.segment.bytes=1073741824 108 | 109 | # The interval at which log segments are checked to see if they can be deleted according 110 | # to the retention policies 111 | log.retention.check.interval.ms=300000 112 | 113 | ############################# Zookeeper ############################# 114 | 115 | # Zookeeper connection string (see zookeeper docs for details). 116 | # This is a comma separated host:port pairs, each corresponding to a zk 117 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 118 | # You can also append an optional chroot string to the urls to specify the 119 | # root directory for all kafka znodes. 120 | zookeeper.connect=localhost:2181 121 | 122 | # Timeout in ms for connecting to zookeeper 123 | zookeeper.connection.timeout.ms=6000 124 | 125 | 126 | -------------------------------------------------------------------------------- /misc/kafka/kafka-server-1.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # see kafka.server.KafkaConfig for additional details and defaults 17 | 18 | ############################# Server Basics ############################# 19 | 20 | # The id of the broker. This must be set to a unique integer for each broker. 21 | broker.id=1 22 | port=9093 23 | 24 | # Switch to enable topic deletion or not, default value is false 25 | #delete.topic.enable=true 26 | 27 | ############################# Socket Server Settings ############################# 28 | 29 | # The address the socket server listens on. It will get the value returned from 30 | # java.net.InetAddress.getCanonicalHostName() if not configured. 31 | # FORMAT: 32 | # listeners = listener_name://host_name:port 33 | # EXAMPLE: 34 | # listeners = PLAINTEXT://your.host.name:9092 35 | #listeners=PLAINTEXT://:9092 36 | 37 | # Hostname and port the broker will advertise to producers and consumers. If not set, 38 | # it uses the value for "listeners" if configured. Otherwise, it will use the value 39 | # returned from java.net.InetAddress.getCanonicalHostName(). 40 | #advertised.listeners=PLAINTEXT://your.host.name:9092 41 | 42 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details 43 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL 44 | 45 | # The number of threads handling network requests 46 | num.network.threads=3 47 | 48 | # The number of threads doing disk I/O 49 | num.io.threads=8 50 | 51 | # The send buffer (SO_SNDBUF) used by the socket server 52 | socket.send.buffer.bytes=102400 53 | 54 | # The receive buffer (SO_RCVBUF) used by the socket server 55 | socket.receive.buffer.bytes=102400 56 | 57 | # The maximum size of a request that the socket server will accept (protection against OOM) 58 | socket.request.max.bytes=104857600 59 | 60 | 61 | ############################# Log Basics ############################# 62 | 63 | # A comma seperated list of directories under which to store log files 64 | log.dirs=/data/kafka-logs-1 65 | 66 | # The default number of log partitions per topic. More partitions allow greater 67 | # parallelism for consumption, but this will also result in more files across 68 | # the brokers. 69 | num.partitions=1 70 | 71 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. 72 | # This value is recommended to be increased for installations with data dirs located in RAID array. 73 | num.recovery.threads.per.data.dir=1 74 | 75 | ############################# Log Flush Policy ############################# 76 | 77 | # Messages are immediately written to the filesystem but by default we only fsync() to sync 78 | # the OS cache lazily. The following configurations control the flush of data to disk. 79 | # There are a few important trade-offs here: 80 | # 1. Durability: Unflushed data may be lost if you are not using replication. 81 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. 82 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 83 | # The settings below allow one to configure the flush policy to flush data after a period of time or 84 | # every N messages (or both). This can be done globally and overridden on a per-topic basis. 85 | 86 | # The number of messages to accept before forcing a flush of data to disk 87 | #log.flush.interval.messages=10000 88 | 89 | # The maximum amount of time a message can sit in a log before we force a flush 90 | #log.flush.interval.ms=1000 91 | 92 | ############################# Log Retention Policy ############################# 93 | 94 | # The following configurations control the disposal of log segments. The policy can 95 | # be set to delete segments after a period of time, or after a given size has accumulated. 96 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens 97 | # from the end of the log. 98 | 99 | # The minimum age of a log file to be eligible for deletion due to age 100 | log.retention.hours=168 101 | 102 | # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining 103 | # segments don't drop below log.retention.bytes. Functions independently of log.retention.hours. 104 | #log.retention.bytes=1073741824 105 | 106 | # The maximum size of a log segment file. When this size is reached a new log segment will be created. 107 | log.segment.bytes=1073741824 108 | 109 | # The interval at which log segments are checked to see if they can be deleted according 110 | # to the retention policies 111 | log.retention.check.interval.ms=300000 112 | 113 | ############################# Zookeeper ############################# 114 | 115 | # Zookeeper connection string (see zookeeper docs for details). 116 | # This is a comma separated host:port pairs, each corresponding to a zk 117 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 118 | # You can also append an optional chroot string to the urls to specify the 119 | # root directory for all kafka znodes. 120 | zookeeper.connect=localhost:2181 121 | 122 | # Timeout in ms for connecting to zookeeper 123 | zookeeper.connection.timeout.ms=6000 124 | 125 | 126 | -------------------------------------------------------------------------------- /misc/kafka/kafka-server-2.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # see kafka.server.KafkaConfig for additional details and defaults 17 | 18 | ############################# Server Basics ############################# 19 | 20 | # The id of the broker. This must be set to a unique integer for each broker. 21 | broker.id=2 22 | port=9094 23 | 24 | # Switch to enable topic deletion or not, default value is false 25 | #delete.topic.enable=true 26 | 27 | ############################# Socket Server Settings ############################# 28 | 29 | # The address the socket server listens on. It will get the value returned from 30 | # java.net.InetAddress.getCanonicalHostName() if not configured. 31 | # FORMAT: 32 | # listeners = listener_name://host_name:port 33 | # EXAMPLE: 34 | # listeners = PLAINTEXT://your.host.name:9092 35 | #listeners=PLAINTEXT://:9092 36 | 37 | # Hostname and port the broker will advertise to producers and consumers. If not set, 38 | # it uses the value for "listeners" if configured. Otherwise, it will use the value 39 | # returned from java.net.InetAddress.getCanonicalHostName(). 40 | #advertised.listeners=PLAINTEXT://your.host.name:9092 41 | 42 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details 43 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL 44 | 45 | # The number of threads handling network requests 46 | num.network.threads=3 47 | 48 | # The number of threads doing disk I/O 49 | num.io.threads=8 50 | 51 | # The send buffer (SO_SNDBUF) used by the socket server 52 | socket.send.buffer.bytes=102400 53 | 54 | # The receive buffer (SO_RCVBUF) used by the socket server 55 | socket.receive.buffer.bytes=102400 56 | 57 | # The maximum size of a request that the socket server will accept (protection against OOM) 58 | socket.request.max.bytes=104857600 59 | 60 | 61 | ############################# Log Basics ############################# 62 | 63 | # A comma seperated list of directories under which to store log files 64 | log.dirs=/data/kafka-logs-2 65 | 66 | # The default number of log partitions per topic. More partitions allow greater 67 | # parallelism for consumption, but this will also result in more files across 68 | # the brokers. 69 | num.partitions=1 70 | 71 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. 72 | # This value is recommended to be increased for installations with data dirs located in RAID array. 73 | num.recovery.threads.per.data.dir=1 74 | 75 | ############################# Log Flush Policy ############################# 76 | 77 | # Messages are immediately written to the filesystem but by default we only fsync() to sync 78 | # the OS cache lazily. The following configurations control the flush of data to disk. 79 | # There are a few important trade-offs here: 80 | # 1. Durability: Unflushed data may be lost if you are not using replication. 81 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. 82 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 83 | # The settings below allow one to configure the flush policy to flush data after a period of time or 84 | # every N messages (or both). This can be done globally and overridden on a per-topic basis. 85 | 86 | # The number of messages to accept before forcing a flush of data to disk 87 | #log.flush.interval.messages=10000 88 | 89 | # The maximum amount of time a message can sit in a log before we force a flush 90 | #log.flush.interval.ms=1000 91 | 92 | ############################# Log Retention Policy ############################# 93 | 94 | # The following configurations control the disposal of log segments. The policy can 95 | # be set to delete segments after a period of time, or after a given size has accumulated. 96 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens 97 | # from the end of the log. 98 | 99 | # The minimum age of a log file to be eligible for deletion due to age 100 | log.retention.hours=168 101 | 102 | # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining 103 | # segments don't drop below log.retention.bytes. Functions independently of log.retention.hours. 104 | #log.retention.bytes=1073741824 105 | 106 | # The maximum size of a log segment file. When this size is reached a new log segment will be created. 107 | log.segment.bytes=1073741824 108 | 109 | # The interval at which log segments are checked to see if they can be deleted according 110 | # to the retention policies 111 | log.retention.check.interval.ms=300000 112 | 113 | ############################# Zookeeper ############################# 114 | 115 | # Zookeeper connection string (see zookeeper docs for details). 116 | # This is a comma separated host:port pairs, each corresponding to a zk 117 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 118 | # You can also append an optional chroot string to the urls to specify the 119 | # root directory for all kafka znodes. 120 | zookeeper.connect=localhost:2181 121 | 122 | # Timeout in ms for connecting to zookeeper 123 | zookeeper.connection.timeout.ms=6000 124 | 125 | 126 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or 20 | advances 21 | * Trolling, insulting/derogatory comments, and personal or political attacks 22 | * Public or private harassment 23 | * Publishing others' private information, such as a physical or electronic 24 | address, without explicit permission 25 | * Other conduct which could reasonably be considered inappropriate in a 26 | professional setting 27 | 28 | ## Our Responsibilities 29 | 30 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 31 | 32 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 33 | 34 | ## Scope 35 | 36 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 37 | 38 | ## Enforcement 39 | 40 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at coc@wallaroolabs.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 41 | 42 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 43 | 44 | ## Attribution 45 | 46 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 47 | 48 | [homepage]: http://contributor-covenant.org 49 | [version]: http://contributor-covenant.org/version/1/4/ 50 | 51 | # Social Rules 52 | 53 | In addition to having a code of conduct as an anti-harassment policy, we have a small set of [social rules](https://www.recurse.com/manual#sub-sec-social-rules) we follow. We (the project maintainers) lifted these rules from the [Recurse Center](https://www.recurse.com). We've seen these rules in effect in other environments. We'd like the Wallaroo community to share a similar positive environment. These rules are intended to be lightweight, and to make more explicit certain social norms that are normally implicit. Most of our social rules really boil down to “don't be a jerk“ or “don't be annoying.” Of course, almost nobody sets out to be a jerk or annoying, so telling people not to be jerks isn't a very productive strategy. 54 | 55 | Unlike the anti-harassment policy, violation of the social rules will not result in expulsion from the Wallaroo community or a strong warning from project maintainers. Rather, they are designed to provide some lightweight social structure for community members to use when interacting with each other. 56 | 57 | ## No feigning surprise. 58 | 59 | The first rule means you shouldn't act surprised when people say they don't know something. This applies to both technical things ("What?! I can't believe you don't know what the stack is!") and non-technical things ("You don't know who RMS is?!"). Feigning surprise has absolutely no social or educational benefit: When people feign surprise, it's usually to make them feel better about themselves and others feel worse. And even when that's not the intention, it's almost always the effect. 60 | 61 | ## No well-actually's 62 | 63 | A well-actually happens when someone says something that's almost - but not entirely - correct, and you say, "well, actually…" and then give a minor correction. This is especially annoying when the correction has no bearing on the actual conversation. This doesn't mean we aren't about truth-seeking or that we don't care about being precise. Almost all well-actually's in our experience are about grandstanding, not truth-seeking. 64 | 65 | ## No subtle -isms 66 | 67 | Our last social rule bans subtle racism, sexism, homophobia, transphobia, and other kinds of bias. This one is different from the rest, because it covers a class of behaviors instead of one very specific pattern. 68 | 69 | Subtle -isms are small things that make others feel uncomfortable, things that we all sometimes do by mistake. For example, saying "It's so easy my grandmother could do it" is a subtle -ism. Like the other three social rules, this one is often accidentally broken. Like the other three, it's not a big deal to mess up – you just apologize and move on. 70 | 71 | If you see a subtle -ism in the Wallaroo community, you can point it out to the relevant person, either publicly or privately, or you can ask one of the project maintainers to say something. After this, we ask that all further discussion move off of public channels. If you are a third party, and you don't see what could be biased about the comment that was made, feel free to talk to the project maintainers. Please don't say, "Comment X wasn't homophobic!" Similarly, please don't pile on to someone who made a mistake. The "subtle" in "subtle -isms" means that it's probably not obvious to everyone right away what was wrong with the comment. 72 | 73 | If you have any questions about any part of the code of conduct or social rules, please feel free to reach out to any of the project maintainers. 74 | -------------------------------------------------------------------------------- /pony-kafka/custombuffered/codecs/varint_decoder.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | use ".." 31 | use "../../utils/bool_converter" 32 | 33 | primitive VarIntDecoder 34 | fun u8(rb: Reader): U8 ? => 35 | """ 36 | Read a byte from the buffer in base 128 varint encoding.. 37 | """ 38 | _decode_varint(rb, 8)?.u8() 39 | 40 | fun i8(rb: Reader): I8 ? => 41 | """ 42 | Read a byte from the buffer in base 128 varint encoding.. 43 | """ 44 | _decode_svarint(rb, 8)?.i8() 45 | 46 | fun bool(rb: Reader): Bool ? => 47 | """ 48 | Read a Bool from the buffer in base 128 varint encoding. 49 | """ 50 | BoolConverter.u8_to_bool(_decode_varint(rb, 8)?.u8()) 51 | 52 | fun u16(rb: Reader): U16 ? => 53 | """ 54 | Read a U16 from the buffer in base 128 varint encoding. 55 | """ 56 | _decode_varint(rb, 16)?.u16() 57 | 58 | fun i16(rb: Reader): I16 ? => 59 | """ 60 | Read an I16 from the buffer in zig zag base 128 varint encoding. 61 | """ 62 | _decode_svarint(rb, 16)?.i16() 63 | 64 | fun u32(rb: Reader): U32 ? => 65 | """ 66 | Read a U32 from the buffer in base 128 varint encoding. 67 | """ 68 | _decode_varint(rb, 32)?.u32() 69 | 70 | fun i32(rb: Reader): I32 ? => 71 | """ 72 | Read an I32 from the buffer in zig zag base 128 varint encoding. 73 | """ 74 | _decode_svarint(rb, 32)?.i32() 75 | 76 | fun u64(rb: Reader): U64 ? => 77 | """ 78 | Read a U64 from the buffer in base 128 varint. 79 | """ 80 | _decode_varint(rb, 64)? 81 | 82 | fun i64(rb: Reader): I64 ? => 83 | """ 84 | Read an I64 from the buffer in zig zag base 128 varint encoding. 85 | """ 86 | _decode_svarint(rb, 64)? 87 | 88 | fun _decode_svarint(rb: Reader, bits_to_read: U64): I64 ? => 89 | let d = _decode_varint(rb, bits_to_read)?.i64() 90 | (d >> 1) xor -(d and 1) 91 | 92 | fun _decode_varint(rb: Reader, bits_to_read: U64): U64 ? => 93 | var d: U64 = 0 94 | var bits: U64 = 0 95 | var b: U64 = 0 96 | 97 | repeat 98 | if bits > bits_to_read then 99 | error 100 | end 101 | b = rb.read_byte()?.u64() 102 | d = d or ((b and 0x7f) << bits) 103 | bits = bits + 7 104 | until (b and 0x80) == 0 end 105 | 106 | d 107 | 108 | fun peek_u8(rb: PeekableReader, offset: USize = 0): (U8, USize) ? => 109 | """ 110 | Read a byte from the buffer in base 128 varint encoding. 111 | """ 112 | (let x, let num_bytes) = _peek_varint(rb, 8, offset)? 113 | (x.u8(), num_bytes) 114 | 115 | fun peek_i8(rb: PeekableReader, offset: USize = 0): (I8, USize) ? => 116 | """ 117 | Read a byte from the buffer in base 128 varint encoding. 118 | """ 119 | (let x, let num_bytes) = _peek_svarint(rb, 8, offset)? 120 | (x.i8(), num_bytes) 121 | 122 | fun peek_bool(rb: PeekableReader, offset: USize = 0): (Bool, USize) ? => 123 | """ 124 | Read a Bool from the buffer in base 128 varint encoding. 125 | """ 126 | (let x, let num_bytes) = _peek_varint(rb, 8, offset)? 127 | (BoolConverter.u8_to_bool(x.u8()), num_bytes) 128 | 129 | fun peek_u16(rb: PeekableReader, offset: USize = 0): (U16, USize) ? => 130 | """ 131 | Read a U16 from the buffer in base 128 varint encoding. 132 | """ 133 | (let x, let num_bytes) = _peek_varint(rb, 16, offset)? 134 | (x.u16(), num_bytes) 135 | 136 | fun peek_i16(rb: PeekableReader, offset: USize = 0): (I16, USize) ? => 137 | """ 138 | Read an I16 from the buffer in zig zag base 128 varint encoding. 139 | """ 140 | (let x, let num_bytes) = _peek_svarint(rb, 16, offset)? 141 | (x.i16(), num_bytes) 142 | 143 | fun peek_u32(rb: PeekableReader, offset: USize = 0): (U32, USize) ? => 144 | """ 145 | Read a U32 from the buffer in base 128 varint encoding. 146 | """ 147 | (let x, let num_bytes) = _peek_varint(rb, 32, offset)? 148 | (x.u32(), num_bytes) 149 | 150 | fun peek_i32(rb: PeekableReader, offset: USize = 0): (I32, USize) ? => 151 | """ 152 | Read an I32 from the buffer in zig zag base 128 varint encoding. 153 | """ 154 | (let x, let num_bytes) = _peek_svarint(rb, 32, offset)? 155 | (x.i32(), num_bytes) 156 | 157 | fun peek_u64(rb: PeekableReader, offset: USize = 0): (U64, USize) ? => 158 | """ 159 | Read a U64 from the buffer in base 128 varint. 160 | """ 161 | _peek_varint(rb, 64, offset)? 162 | 163 | fun peek_i64(rb: PeekableReader, offset: USize = 0): (I64, USize) ? => 164 | """ 165 | Read an I64 from the buffer in zig zag base 128 varint encoding. 166 | """ 167 | _peek_svarint(rb, 64, offset)? 168 | 169 | fun _peek_svarint(rb: PeekableReader, bits_to_read: U64, offset: USize = 0): 170 | (I64, USize) ? 171 | => 172 | (let d', let num_bytes) = _peek_varint(rb, bits_to_read, offset)? 173 | let d = d'.i64() 174 | ((d >> 1) xor -(d and 1), num_bytes) 175 | 176 | fun _peek_varint(rb: PeekableReader, bits_to_read: U64, offset: USize = 0): 177 | (U64, USize) ? 178 | => 179 | var d: U64 = 0 180 | var bits: U64 = 0 181 | var b: U64 = 0 182 | var offset' = offset 183 | 184 | repeat 185 | if bits > bits_to_read then 186 | error 187 | end 188 | b = rb.peek_byte(offset')?.u64() 189 | d = d or ((b and 0x7f) << bits) 190 | bits = bits + 7 191 | offset' = offset' + 1 192 | until (b and 0x80) == 0 end 193 | 194 | (d, offset' - offset) 195 | -------------------------------------------------------------------------------- /pony-kafka/fsm/fsm.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | 15 | */ 16 | 17 | use "collections" 18 | use "../customlogger" 19 | 20 | trait FsmState 21 | fun string(): String 22 | 23 | primitive FsmStateAny is FsmState 24 | fun string(): String => "FsmStateAny" 25 | 26 | primitive FsmStateNotOld is FsmState 27 | fun string(): String => "FsmStateNotOld" 28 | 29 | class Fsm[A] 30 | let state_transitions: MapIs[(FsmState val, FsmState val), {ref(FsmState val, 31 | Fsm[A], A) ?} ref] = state_transitions.create() 32 | let states: SetIs[FsmState val] = states.create() 33 | let valid_transitions: Array[(FsmState val, FsmState val, {ref(FsmState val, 34 | Fsm[A], A) ?} ref)] = valid_transitions.create() 35 | var error_func: {ref(FsmState val, FsmState val, Fsm[A], A)} ref = 36 | {ref(old_state: FsmState val, new_state: FsmState val, state_machine: 37 | Fsm[A], data: A) => None} ref 38 | var _current_state: FsmState val = FsmStateAny 39 | let _logger: Logger[String] 40 | 41 | 42 | new create(logger: Logger[String]) => _logger = logger 43 | 44 | fun ref initialize(initial_state: FsmState val, 45 | error_func': {ref(FsmState val, FsmState val, Fsm[A], A)} ref) ? 46 | => 47 | if (initial_state is FsmStateAny) or (initial_state is FsmStateNotOld) then 48 | _logger(Error) and _logger.log(Error, 49 | "Initial state can't be FsmStateAny/FsmStateNotOld") 50 | error 51 | end 52 | 53 | _current_state = initial_state 54 | error_func = error_func' 55 | 56 | for (old_state, new_state, func) in valid_transitions.values() do 57 | if old_state is FsmStateAny then 58 | _add_state_transitions_old_any(new_state, func)? 59 | elseif old_state is FsmStateNotOld then 60 | _add_state_transitions_old_not_self(new_state, func)? 61 | elseif new_state is FsmStateAny then 62 | _add_state_transitions_new_any(old_state, func)? 63 | elseif new_state is FsmStateNotOld then 64 | _add_state_transitions_new_not_self(old_state, func)? 65 | else 66 | _logger(Fine) and _logger.log(Fine, 67 | "Adding valid state transition from: " + old_state.string() + " to " + 68 | new_state.string()) 69 | state_transitions.insert((old_state, new_state), func)? 70 | end 71 | end 72 | 73 | fun ref _add_state_transitions_old_any(new_state: FsmState val, 74 | func: {ref(FsmState val, Fsm[A], A) ?} ref) ? 75 | => 76 | for s in states.values() do 77 | _logger(Fine) and _logger.log(Fine, "Adding valid state transition from: " 78 | + s.string() + " to " + new_state.string()) 79 | state_transitions.insert((s, new_state), func)? 80 | end 81 | 82 | fun ref _add_state_transitions_old_not_self(new_state: FsmState val, 83 | func: {ref(FsmState val, Fsm[A], A) ?} ref) ? 84 | => 85 | for s in states.values() do 86 | if s is new_state then 87 | continue 88 | end 89 | _logger(Fine) and _logger.log(Fine, 90 | "Adding valid state transition from: " + s.string() + " to " + 91 | new_state.string()) 92 | state_transitions.insert((s, new_state), func)? 93 | end 94 | 95 | fun ref _add_state_transitions_new_any(old_state: FsmState val, 96 | func: {ref(FsmState val, Fsm[A], A) ?} ref) ? 97 | => 98 | for s in states.values() do 99 | _logger(Fine) and _logger.log(Fine, "Adding valid state transition from: " 100 | + old_state.string() + " to " + s.string()) 101 | state_transitions.insert((old_state, s), func)? 102 | end 103 | 104 | fun ref _add_state_transitions_new_not_self(old_state: FsmState val, 105 | func: {ref(FsmState val, Fsm[A], A) ?} ref) ? 106 | => 107 | for s in states.values() do 108 | if s is old_state then 109 | continue 110 | end 111 | _logger(Fine) and _logger.log(Fine, "Adding valid state transition from: " 112 | + old_state.string() + " to " + s.string()) 113 | state_transitions.insert((old_state, s), func)? 114 | end 115 | 116 | fun ref add_allowed_state(state: FsmState val) ? => 117 | if not (_current_state is FsmStateAny) then 118 | _logger(Error) and _logger.log(Error, 119 | "Can't add allowed states after initialization") 120 | error 121 | end 122 | 123 | states.set(state) 124 | 125 | fun ref valid_transition(old_state: FsmState val, new_state: FsmState val, 126 | func: {ref(FsmState val, Fsm[A], A) ?} ref) ? 127 | => 128 | if not (_current_state is FsmStateAny) then 129 | _logger(Error) and _logger.log(Error, 130 | "Can't add valid transitions after initialization") 131 | error 132 | end 133 | 134 | if ((old_state is FsmStateAny) or (old_state is FsmStateNotOld)) and 135 | ((new_state is FsmStateAny) or (new_state is FsmStateNotOld)) then 136 | _logger(Error) and _logger.log(Error, 137 | "Old state and new state can't both be FsmStateAny/FsmStateNotOld") 138 | error 139 | end 140 | if ((not ((old_state is FsmStateAny) or (old_state is FsmStateNotOld))) and 141 | (not states.contains(old_state))) or ((not ((new_state is FsmStateAny) or 142 | (new_state is FsmStateNotOld)) and (not states.contains(new_state)))) then 143 | _logger(Error) and _logger.log(Error, "State not found. Either: " + 144 | old_state.string() + " or " + new_state.string()) 145 | error 146 | end 147 | valid_transitions.push((old_state, new_state, func)) 148 | 149 | fun ref transition_to(new_state: FsmState val, data: A) ? => 150 | _logger(Fine) and _logger.log(Fine, "State transition from: " + 151 | _current_state.string() + " to " + new_state.string()) 152 | let func = try 153 | state_transitions((_current_state, new_state))? 154 | else 155 | _logger(Fine) and _logger.log(Fine, 156 | "State transition lookup failed. from: " + _current_state.string() + 157 | " to " + new_state.string()) 158 | end 159 | 160 | match func 161 | | let f: {ref(FsmState val, Fsm[A], A) ?} ref => 162 | let old_state = _current_state = new_state 163 | try 164 | f(old_state, this, consume data)? 165 | else 166 | _logger(Error) and _logger.log(Error, 167 | "Error running function on state transition") 168 | error 169 | end 170 | else 171 | _logger(Error) and _logger.log(Error, "state transition not valid") 172 | error_func(_current_state, new_state, this, consume data) 173 | error 174 | end 175 | 176 | fun current_state(): FsmState val => _current_state 177 | -------------------------------------------------------------------------------- /pony-kafka/custombuffered/iso_reader.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | use "collections" 31 | 32 | class IsoReader is Reader 33 | """ 34 | Store network data and provide a parsing interface. 35 | """ 36 | embed _chunks: Array[Array[U8] iso] = _chunks.create() 37 | var _available: USize = 0 38 | 39 | fun size(): USize => 40 | """ 41 | Return the number of available bytes. 42 | """ 43 | _available 44 | 45 | fun ref clear() => 46 | """ 47 | Discard all pending data. 48 | """ 49 | _chunks.clear() 50 | _available = 0 51 | 52 | 53 | // cant handle iso strings until `iso_array()` is added to string 54 | // to be able to get the `iso` `array` from inside an `iso` string 55 | // fun ref append(data: ByteSeq iso) => 56 | fun ref append(data: Array[U8] iso) => 57 | """ 58 | Add a chunk of data. 59 | """ 60 | let data_array = 61 | match consume data 62 | | let data': Array[U8] iso => consume data' 63 | // | let data': String iso => (consume data').array() 64 | end 65 | 66 | _available = _available + data_array.size() 67 | _chunks.push(consume data_array) 68 | 69 | /* 70 | fun ref append(data: Array[ByteSeq iso] iso) => 71 | """ 72 | Add a chunk of data. 73 | """ 74 | for d in data.values() do 75 | match d 76 | | let s: String => append(s.array()) 77 | | let a: Array[U8] val => append(a) 78 | end 79 | end 80 | */ 81 | 82 | fun ref skip(n: USize) ? => 83 | """ 84 | Skip n bytes. 85 | """ 86 | if _available >= n then 87 | _available = _available - n 88 | var rem = n 89 | 90 | while rem > 0 do 91 | let avail = _chunks(0)?.size() 92 | 93 | if avail > rem then 94 | _chunks(0)?.trim_in_place(rem) 95 | break 96 | end 97 | 98 | rem = rem - avail 99 | end 100 | 101 | else 102 | error 103 | end 104 | 105 | fun ref block(len: USize): Array[U8] iso^ ? => 106 | """ 107 | Return a block as a contiguous chunk of memory. 108 | """ 109 | (let num_bytes, let data) = _read_bytes(len)? 110 | 111 | match consume data 112 | | let a: Array[U8] iso => 113 | a 114 | | let arr: Array[Array[U8] iso] iso => 115 | var out = arr.shift()? 116 | var i = out.size() 117 | out.undefined(num_bytes) 118 | while arr.size() > 0 do 119 | let a = recover val arr.shift()? end 120 | out = recover 121 | let r = consume ref out 122 | a.copy_to(r, 0, i, a.size()) 123 | i = i + a.size() 124 | consume r 125 | end 126 | end 127 | out 128 | end 129 | 130 | fun ref read_byte(): U8 ? => 131 | """ 132 | Get a single byte. 133 | """ 134 | if _available < 1 then 135 | error 136 | end 137 | 138 | _available = _available - 1 139 | let r = _chunks(0)?(0)? 140 | if _chunks(0)?.size() > 1 then 141 | _chunks(0)?.trim_in_place(1) 142 | else 143 | _chunks.shift()? 144 | end 145 | r 146 | 147 | fun ref read_bytes(len: USize): (Array[U8] iso^ | Array[Array[U8] iso] iso^) ? 148 | => 149 | _read_bytes(len)?._2 150 | 151 | fun ref _read_bytes(len: USize): 152 | (USize, (Array[U8] iso^ | Array[Array[U8] iso] iso^)) ? 153 | => 154 | """ 155 | Return a number of bytes as either a contiguous array or an array of arrays 156 | """ 157 | if len == 0 then 158 | return (0, recover Array[U8] end) 159 | end 160 | 161 | if _available < len then 162 | error 163 | end 164 | 165 | // TODO: rewrite to avoid allocation of out array if all data if in first 166 | // chunk 167 | _available = _available - len 168 | var out = recover Array[Array[U8] iso] end 169 | var i = USize(0) 170 | 171 | while i < len do 172 | var data = _chunks.shift()? 173 | let avail = data.size() 174 | let need = len - i 175 | let copy_len = need.min(avail) 176 | 177 | (let next_segment, data) = (consume data).chop(need) 178 | 179 | if avail >= need then 180 | if data.size() > 0 then 181 | _chunks.unshift(consume data) 182 | end 183 | 184 | if out.size() == 0 then 185 | return (copy_len, consume next_segment) 186 | else 187 | out.push(consume next_segment) 188 | break 189 | end 190 | else 191 | out.push(consume next_segment) 192 | end 193 | 194 | i = i + copy_len 195 | end 196 | 197 | (i, consume out) 198 | 199 | fun ref read_contiguous_bytes(len: USize): Array[U8] iso^ ? => 200 | """ 201 | Return a block as a contiguous chunk of memory without copying if possible 202 | or throw an error. 203 | """ 204 | // TODO: enhance to fall back to a copy if have non-contiguous data and 205 | // return an iso? Not possible because iso/val distinction doesn't exist at 206 | // runtime? Maybe need to enhance callers to be able to work with 207 | // non-contiguous memory? 208 | 209 | if len == 0 then 210 | return recover Array[U8] end 211 | end 212 | 213 | if _available < len then 214 | error 215 | end 216 | 217 | let avail = _chunks(0)?.size() 218 | 219 | // if we have enough data but not in a single contiguous chunk, call `block` 220 | // to copy chunks together 221 | if avail < len then 222 | return block(len)? 223 | end 224 | 225 | var out = recover Array[Array[U8] iso] end 226 | 227 | var data = _chunks.shift()? 228 | let need = len 229 | let copy_len = need.min(avail) 230 | 231 | (let next_segment, data) = (consume data).chop(need) 232 | if data.size() > 0 then 233 | _chunks.unshift(consume data) 234 | end 235 | _available = _available - len 236 | next_segment 237 | -------------------------------------------------------------------------------- /pony-kafka/custombuffered/writer.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | class Writer 31 | """ 32 | A buffer for building messages. 33 | 34 | `Writer` provides an way to create byte sequences using common 35 | data encodings. The `Writer` manages the underlying arrays and 36 | sizes. It is useful for encoding data to send over a network or 37 | store in a file. Once a message has been built you can call `done()` 38 | to get the message's `ByteSeq`s, and you can then reuse the 39 | `Writer` for creating a new message. 40 | 41 | For example, suppose we have a TCP-based network data protocol where 42 | messages consist of the following: 43 | 44 | * `message_length` - the number of bytes in the message as a 45 | big-endian 32-bit integer 46 | * `list_size` - the number of items in the following list of items 47 | as a big-endian 32-bit integer 48 | * zero or more items of the following data: 49 | * a big-endian 64-bit floating point number 50 | * a string that starts with a big-endian 32-bit integer that 51 | specifies the length of the string, followed by a number of 52 | bytes that represent the string 53 | 54 | A message would be something like this: 55 | 56 | ``` 57 | [message_length][list_size][float1][string1][float2][string2]... 58 | ``` 59 | 60 | The following program uses a write buffer to encode an array of 61 | tuples as a message of this type: 62 | 63 | ``` 64 | use "net" 65 | 66 | actor Main 67 | new create(env: Env) => 68 | let wb = Writer 69 | let messages = [[(F32(3597.82), "Anderson"), (F32(-7979.3), "Graham")], 70 | [(F32(3.14159), "Hopper"), (F32(-83.83), "Jones")]] 71 | for items in messages.values() do 72 | wb.i32_be((items.size() / 2).i32()) 73 | for (f, s) in items.values() do 74 | wb.f32_be(f) 75 | wb.i32_be(s.size().i32()) 76 | wb.write(s.array()) 77 | end 78 | let wb_msg = Writer 79 | wb_msg.i32_be(wb.size().i32()) 80 | wb_msg.writev(wb.done()) 81 | env.out.writev(wb_msg.done()) 82 | end 83 | ``` 84 | """ 85 | var _chunks: Array[ByteSeq] iso = recover Array[ByteSeq] end 86 | var _current: Array[U8] iso = recover Array[U8] end 87 | var _offset: USize = 0 88 | var _size: USize = 0 89 | 90 | fun ref reserve_current(size': USize) => 91 | """ 92 | Reserve space for size bytes in `_current`. 93 | """ 94 | _check(size') 95 | 96 | fun ref reserve_chunks(size': USize) => 97 | """ 98 | Reserve space for size' chunks. 99 | 100 | This needs to be recalled after every call to `done` 101 | as `done` resets the chunks. 102 | """ 103 | _chunks.reserve(size') 104 | 105 | fun size(): USize => 106 | _size 107 | 108 | fun ref write_byte(data: U8) => 109 | """ 110 | Write a byte to the buffer. 111 | """ 112 | _check(1) 113 | _byte(data) 114 | 115 | fun ref write_two_bytes(data: U8, data2: U8) => 116 | """ 117 | Write a byte to the buffer. 118 | """ 119 | _check(2) 120 | _byte(data) 121 | _byte(data2) 122 | 123 | fun ref write_four_bytes(data: U8, data2: U8, data3: U8, data4: U8) 124 | => 125 | """ 126 | Write a byte to the buffer. 127 | """ 128 | _check(4) 129 | _byte(data) 130 | _byte(data2) 131 | _byte(data3) 132 | _byte(data4) 133 | 134 | fun ref write_eight_bytes(data: U8, data2: U8, data3: U8, data4: U8 135 | , data5: U8, data6: U8, data7: U8, data8: U8) => 136 | """ 137 | Write a byte to the buffer. 138 | """ 139 | _check(8) 140 | _byte(data) 141 | _byte(data2) 142 | _byte(data3) 143 | _byte(data4) 144 | _byte(data5) 145 | _byte(data6) 146 | _byte(data7) 147 | _byte(data8) 148 | 149 | fun ref write_sixteen_bytes(data: U8, data2: U8, data3: U8, data4: U8 150 | , data5: U8, data6: U8, data7: U8, data8: U8 151 | , data9: U8, data10: U8, data11: U8, data12: U8 152 | , data13: U8, data14: U8, data15: U8, data16: U8) => 153 | """ 154 | Write a byte to the buffer. 155 | """ 156 | _check(16) 157 | _byte(data) 158 | _byte(data2) 159 | _byte(data3) 160 | _byte(data4) 161 | _byte(data5) 162 | _byte(data6) 163 | _byte(data7) 164 | _byte(data8) 165 | _byte(data9) 166 | _byte(data10) 167 | _byte(data11) 168 | _byte(data12) 169 | _byte(data13) 170 | _byte(data14) 171 | _byte(data15) 172 | _byte(data16) 173 | 174 | // TODO: Ability to overwrite at a previous position (only if that position 175 | // used to be part of one of our accumulation iso's) 176 | // TODO: Copy small sized ByteSeq instead to minimize multiple small arrays 177 | // for IO calls 178 | fun ref write(data: ByteSeq) => 179 | """ 180 | Write a ByteSeq to the buffer. 181 | """ 182 | _append_current() 183 | _chunks.push(data) 184 | _size = _size + data.size() 185 | 186 | fun ref writev(data: ByteSeqIter) => 187 | """ 188 | Write ByteSeqs to the buffer. 189 | """ 190 | _append_current() 191 | for chunk in data.values() do 192 | _chunks.push(chunk) 193 | _size = _size + chunk.size() 194 | end 195 | 196 | fun ref done(): Array[ByteSeq] iso^ => 197 | """ 198 | Return an array of buffered ByteSeqs and reset the Writer's buffer. 199 | """ 200 | _append_current() 201 | _size = 0 202 | _chunks = recover Array[ByteSeq] end 203 | 204 | fun ref _append_current() => 205 | if _offset > 0 then 206 | _current.truncate(_offset) 207 | _offset = 0 208 | _chunks.push(_current = recover Array[U8] end) 209 | end 210 | 211 | fun ref _check(size': USize) => 212 | if (_current.size() - _offset) < size' then 213 | _current.undefined(_offset + size') 214 | end 215 | 216 | fun ref _byte(data: U8) => 217 | try 218 | _current(_offset)? = data 219 | _offset = _offset + 1 220 | _size = _size + 1 221 | end 222 | -------------------------------------------------------------------------------- /pony-kafka/customlogger/logger.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | """ 31 | # Logger package 32 | 33 | Provides basic logging facilities. For most use cases, the `StringLogger` class 34 | will be used. On construction, it takes 2 parameters and a 3rd optional 35 | parameter: 36 | 37 | * LogLevel below which no output will be logged 38 | * OutStream to log to 39 | * Optional LogFormatter 40 | 41 | If you need to log arbitrary objects, take a look at `ObjectLogger[A]` which 42 | can log arbitrary objects so long as you provide it a lambda to covert from A 43 | to String. 44 | 45 | ## API Philosophy 46 | 47 | The API for using Logger is an attempt to abide by the Pony philosophy of first, 48 | be correct and secondly, aim for performance. One of the ways that logging can 49 | slow your system down is by having to evaluate expressions to be logged 50 | whether they will be logged or not (based on the level of logging). For example: 51 | 52 | `logger.log(Warn, name + ": " + reason)` 53 | 54 | will construct a new String regardless of whether we will end up logging the 55 | message or not. 56 | 57 | The Logger API uses boolean short circuiting to avoid this. 58 | 59 | `logger(Warn) and logger.log(name + ": " + reason)` 60 | 61 | will not evaluate the expression to be logged unless the log level Warn is at 62 | or above the overall log level for our logging. This is as close as we can get 63 | to zero cost for items that aren't going to end up being logged. 64 | 65 | ## Example programs 66 | 67 | ### String Logger 68 | 69 | The following program will output 'my warn message' and 'my error message' to 70 | STDOUT in the standard default log format. 71 | 72 | ```pony 73 | use "logger" 74 | 75 | actor Main 76 | new create(env: Env) => 77 | let logger = StringLogger( 78 | Warn, 79 | env.out) 80 | 81 | logger(Fine) and logger.log("my fine message") 82 | logger(Info) and logger.log("my info message") 83 | logger(Warn) and logger.log("my warn message") 84 | logger(Error) and logger.log("my error message") 85 | ``` 86 | 87 | ### Logger[A] 88 | 89 | The following program will output '42' to STDOUT in the standard default log 90 | format. 91 | 92 | ```pony 93 | use "logger" 94 | 95 | actor Main 96 | new create(env: Env) => 97 | let logger = Logger[U64](Fine, 98 | env.out, 99 | {(a: U64): String => a.string() }) 100 | 101 | logger(Error) and logger.log(U64(42)) 102 | ``` 103 | 104 | ## Custom formatting your logs 105 | 106 | The Logger package provides an interface for formatting logs. If you wish to 107 | override the standard formatting, you can create an object that implements: 108 | 109 | ```pony 110 | interface val LogFormatter 111 | fun apply( 112 | msg: String, 113 | file_name: String, 114 | file_linenum: String, 115 | file_linepos: String): String 116 | ``` 117 | 118 | This can either be a class or because the interface only has a single apply 119 | method, can also be a lambda. 120 | """ 121 | 122 | use "time" 123 | 124 | type LogLevel is 125 | ( Fine 126 | | Info 127 | | Warn 128 | | Error 129 | ) 130 | 131 | primitive Fine 132 | fun apply(): U32 => 0 133 | fun string(): String => "FINE" 134 | 135 | primitive Info 136 | fun apply(): U32 => 1 137 | fun string(): String => "INFO" 138 | 139 | primitive Warn 140 | fun apply(): U32 => 2 141 | fun string(): String => "WARN" 142 | 143 | primitive Error 144 | fun apply(): U32 => 3 145 | fun string(): String => "ERROR" 146 | 147 | class val Logger[A] 148 | let _level: LogLevel 149 | let _out: OutStream 150 | let _f: {(A): String} val 151 | let _formatter: LogFormatter 152 | let _verbose: Bool 153 | 154 | new val create(level: LogLevel, 155 | out: OutStream, 156 | f: {(A): String} val, 157 | verbose: Bool = ifdef debug then true else false end, 158 | formatter: LogFormatter = DefaultLogFormatter) 159 | => 160 | _level = level 161 | _out = out 162 | _f = f 163 | _formatter = formatter 164 | _verbose = verbose 165 | 166 | fun apply(level: LogLevel): Bool => 167 | level() >= _level() 168 | 169 | fun log(level: LogLevel, value: A, 170 | loc: SourceLoc = __loc): Bool 171 | => 172 | try 173 | let date = PosixDate(Time.seconds()).format("%Y-%m-%d %H:%M:%S")? 174 | _out.print(_formatter(level, _f(consume value), _verbose, date, loc)) 175 | true 176 | else 177 | false 178 | end 179 | 180 | primitive StringLogger 181 | fun apply(level: LogLevel, 182 | out: OutStream, 183 | verbose: Bool = ifdef debug then true else false end, 184 | formatter: LogFormatter = DefaultLogFormatter): Logger[String] 185 | => 186 | Logger[String](level, out, {(s: String): String => s }, verbose, formatter) 187 | 188 | interface val LogFormatter 189 | """ 190 | Interface required to implement custom log formatting. 191 | 192 | * `msg` is the logged message 193 | * `loc` is the location log was called from 194 | 195 | See `DefaultLogFormatter` for an example of how to implement a LogFormatter. 196 | """ 197 | fun apply(level: LogLevel, msg: String, verbose: Bool, date: String, loc: 198 | SourceLoc): String 199 | 200 | primitive DefaultLogFormatter is LogFormatter 201 | fun apply(level: LogLevel, msg: String, verbose: Bool, date: String, loc: 202 | SourceLoc): String 203 | => 204 | let file_name: String = loc.file() 205 | let file_linenum: String = loc.line().string() 206 | let file_linepos: String = loc.pos().string() 207 | let level_name: String = level.string() 208 | 209 | let output = recover String( 210 | if verbose then 211 | file_name.size() 212 | + file_linenum.size() 213 | + file_linepos.size() 214 | + 4 215 | else 216 | 0 217 | end 218 | + date.size() 219 | + level_name.size() 220 | + msg.size() 221 | + 4) end 222 | 223 | output.append(date) 224 | output.append(": ") 225 | output.append(level_name) 226 | output.append(": ") 227 | if verbose then 228 | output.append(file_name) 229 | output.append(":") 230 | output.append(file_linenum) 231 | output.append(":") 232 | output.append(file_linepos) 233 | output.append(": ") 234 | end 235 | output.append(msg) 236 | output 237 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | You want to contribute to Pony Kafka? Awesome. 4 | 5 | There are a number of ways to contribute to Pony Kafka. As this document is a little long, feel free to jump to the section that applies to you currently: 6 | 7 | * [Bug report](#bug-report) 8 | * [How to contribute](#how-to-contribute) 9 | * [Pull request](#pull-request) 10 | 11 | Additional notes regarding formatting: 12 | 13 | * [Documentation formatting](#documentation-formatting) 14 | * [Code formatting](#code-formatting) 15 | * [Standard Library File Naming](#standard-library-file-naming) 16 | 17 | ## Bug report 18 | 19 | First of all please [search existing issues][https://github.com/wallaroolabs/pony-kafka/issues] to make sure your issue hasn't already been reported. If you cannot find a suitable issue — [create a new one][https://github.com/WallarooLabs/pony-kafka/issues/new]. 20 | 21 | Provide the following details: 22 | 23 | - short summary of what you were trying to achieve, 24 | - a code snippet causing the bug, 25 | - expected result, 26 | - actual results and 27 | - environment details: at least operating system version 28 | 29 | If possible, try to isolate the problem and provide just enough code to demonstrate it. Add any related information which might help to fix the issue. 30 | 31 | ## How to Contribute 32 | 33 | We use a fairly standard GitHub pull request workflow. If you have already contributed to a project via GitHub pull request, you can skip this section and proceed to the [specific details of what we ask for in a pull request](#pull-request). If this is your first time contributing to a project via GitHub, read on. 34 | 35 | Here is the basic GitHub workflow: 36 | 37 | 1. Fork the Pony Kafka repo. you can do this via the GitHub website. This will result in you having your own copy of the Pony Kafka repo under your GitHub account. 38 | 2. Clone your Pony Kafka repo to your local machine 39 | 3. Make a branch for your change 40 | 4. Make your change on that branch 41 | 5. Push your change to your repo 42 | 6. Use the github ui to open a PR 43 | 44 | Some things to note that aren't immediately obvious to folks just starting out: 45 | 46 | 1. Your fork doesn't automatically stay up to date with changes in the main repo. 47 | 2. Any changes you make on your branch that you used for one PR will automatically appear in another PR so if you have more than 1 PR, be sure to always create different branches for them. 48 | 3. Weird things happen with commit history if you don't create your PR branches off of `master` so always make sure you have the `master` branch checked out before creating a branch for a PR 49 | 50 | If you feel overwhelmed at any point, don't worry, it can be a lot to learn when you get started. 51 | 52 | You can get help using GitHub via [the official documentation](https://help.github.com/). Some hightlights include: 53 | 54 | - [Fork A Repo](https://help.github.com/articles/fork-a-repo/) 55 | - [Creating a pull request](https://help.github.com/articles/creating-a-pull-request/) 56 | - [Syncing a fork](https://help.github.com/articles/syncing-a-fork/) 57 | 58 | ## Pull request 59 | 60 | Before issuing a pull request we ask that you squash all your commits into a single logical commit. While your PR is in review, we may ask for additional changes, please do not squash those commits while the review is underway. Once everything is good, we'll then ask you to further squash those commits before merging. We ask that you not squash while a review is underway as it can make it hard to follow what is going on. Additionally, we ask that you: 61 | 62 | * [Write a good commit message](http://chris.beams.io/posts/git-commit/) 63 | * Issue 1 Pull Request per feature. Don't lump unrelated changes together. 64 | 65 | If you aren't sure how to squash multiple commits into one, Steve Klabnik wrote [a handy guide](http://blog.steveklabnik.com/posts/2012-11-08-how-to-squash-commits-in-a-github-pull-request) that you can refer to. 66 | 67 | Once those conditions are met, the PR can be merged. 68 | 69 | Please note, if your changes are purely to things like README, CHANGELOG etc, you can add [skip ci] as the last line of your commit message and your PR won't be run through our continuous integration systems. We ask that you use [skip ci] where appropriate as it helps to get changes through CI faster and doesn't waste resources that TravisCI is kindly donating to the Open Source community. 70 | 71 | ## Documentation formatting 72 | 73 | When contributing to documentation, try to keep the following style guidelines in mind: 74 | 75 | As much as possible all documentation should be textual and in Markdown format. Diagrams are often needed to clarify a point. For any images, an original high-resolution source should be provided as well so updates can be made. 76 | 77 | Documentation is not "source code." As such, it should not be wrapped at 80 columns. Documentation should be allowed to flow naturally until the end of a paragraph. It is expected that the reader will turn on soft wrapping as needed. 78 | 79 | All code examples in documentation should be formatted in a fashion appropriate to the language in question. 80 | 81 | All command line examples in documentation should be presented in a copy and paste friendly fashion. Assume the user is using the `bash` shell. GitHub formatting on long command lines can be unfriendly to copy-and-paste. Long command lines should be broken up using `\` so that each line is no more than 80 columns. Wrapping at 80 columns should result in a good display experience in GitHub. Additionally, continuation lines should be indented two spaces. 82 | 83 | OK: 84 | 85 | ```bash 86 | my_command --some-option foo --path-to-file ../../long/line/foo \ 87 | --some-other-option bar 88 | ``` 89 | 90 | Not OK: 91 | 92 | ```bash 93 | my_command --some-option foo --path-to-file ../../long/line/foo --some-other-option bar 94 | ``` 95 | 96 | Wherever possible when writing documentation, favor full command options rather than short versions. Full flags are usually much easier to modify because the meaning is clearer. 97 | 98 | OK: 99 | 100 | ```bash 101 | my_command --messages 100 102 | ``` 103 | 104 | Not OK: 105 | 106 | ```bash 107 | my_command -m 100 108 | ``` 109 | 110 | ## Code formatting 111 | 112 | The basics: 113 | 114 | * Indentation 115 | 116 | We indent using spaces, not tabs. Indentation is language specific. 117 | 118 | * Watch your whitespace! 119 | 120 | Use an editor plugin to remove unused trailing whitespace including both at the end of a line and at the end of a file. By the same token, remember to leave a single newline only line at the end of each file. It makes output files to the console much more pleasant. 121 | 122 | * Line Length 123 | 124 | We all have different sized monitors. What might look good on yours might look like awful on another. Be kind and wrap all lines at 80 columns unless you have a good reason not to. 125 | 126 | * Reformatting code to meet standards 127 | 128 | Try to avoid doing it. A commit that changes the formatting for large chunks of a file makes for an ugly commit history when looking for changes. Don't commit code that doesn't conform to coding standards in the first place. If you do reformat code, make sure it is either standalone reformatting with no logic changes or confined solely to code whose logic you touched. For example, updating the indentation in a file? Do not make logic changes along with it. Editing a line that has extra whitespace at the end? Feel free to remove it. 129 | 130 | The details: 131 | 132 | All Pony sources should follow the [Pony standard library style guide](https://github.com/ponylang/ponyc/blob/master/STYLE_GUIDE.md). 133 | 134 | ## File naming 135 | 136 | Our Pony code follows the [Pony standard library file naming guidelines](https://github.com/ponylang/ponyc/blob/master/STYLE_GUIDE.md#naming). 137 | 138 | 139 | 140 | -------------------------------------------------------------------------------- /examples/simple/main.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | 15 | */ 16 | 17 | use "net" 18 | use "collections" 19 | use "../../pony-kafka/customlogger" 20 | use "../../pony-kafka" 21 | use "random" 22 | 23 | actor Main is KafkaClientManager 24 | var _kc: (KafkaClient tag | None) = None 25 | let _env: Env 26 | let _logger: Logger[String] val 27 | 28 | new create(env: Env) => 29 | _env = env 30 | _logger = StringLogger(Fine, env.out) 31 | 32 | // create kafka config 33 | let kconf = 34 | recover iso 35 | let kc = KafkaConfig(_logger, "My Client" where 36 | use_snappy_java_framing' = false) 37 | kc.add_broker("127.0.0.1", 9092) 38 | // uncomment for producer only config 39 | // kc.add_topic_config("test") 40 | 41 | // uncomment for consumer only config and tell kafka to send messages to 42 | // consumers 43 | // kc.add_topic_config("test", KafkaConsumeOnly where 44 | // consumer_message_handler = recover KafkaRoundRobinConsumerMessageHandler end, 45 | // compression = KafkaGzipTopicCompression) 46 | 47 | // producer/consumer config and tell kafka to send messages to consumers 48 | kc.add_topic_config("test", KafkaProduceAndConsume where 49 | consumer_message_handler = recover 50 | KafkaRoundRobinConsumerMessageHandler end, compression = 51 | KafkaSnappyTopicCompression) 52 | 53 | kc 54 | end 55 | 56 | 57 | // create kafka client and register producer 58 | try 59 | let kc = KafkaClient(env.root as AmbientAuth, consume kconf, this) 60 | kc.register_producer(P(_logger)) 61 | _kc = kc 62 | end 63 | 64 | be receive_kafka_topics_partitions(client: KafkaClient, topic_partitions: Map[String, 65 | (KafkaTopicType, Set[KafkaPartitionId])] val) => 66 | match _kc 67 | | let kc: KafkaClient tag => 68 | let consumers = recover iso Array[KafkaConsumer tag] end 69 | consumers.push(C(_logger, "1")) 70 | consumers.push(C(_logger, "2")) 71 | consumers.push(C(_logger, "3")) 72 | 73 | kc.register_consumers("test", consume consumers) 74 | kc.consumer_resume_all() 75 | end 76 | 77 | be kafka_client_error(client: KafkaClient, error_report: KafkaErrorReport) => 78 | None 79 | 80 | // kafka consumer actor 81 | actor C is KafkaConsumer 82 | let logger: Logger[String] 83 | let _name: String 84 | var _i: USize = 1 85 | 86 | 87 | new create(logger': Logger[String], name: String) => 88 | logger = logger' 89 | _name = name 90 | 91 | // behavior kafka calls for each message received that should be sent to this 92 | // actor 93 | be receive_kafka_message(client: KafkaClient, value: Array[U8] iso, key: (Array[U8] val | None), msg_metadata: KafkaMessageMetadata val, 94 | network_received_timestamp: U64) 95 | => 96 | logger(Fine) and logger.log(Fine, "Received kafka message") 97 | let m = String.from_array(consume value) 98 | 99 | logger.log(Info, "CONSUMER(" + _name + ")-MSG(" + _i.string() + 100 | "): Received Msg. topic: " + msg_metadata.get_topic() + ", partition: " + 101 | msg_metadata.get_partition_id().string() + ", offset: " + msg_metadata.get_offset().string() 102 | + ", value: " + m) 103 | 104 | _i = _i + 1 105 | 106 | 107 | // kafka producer actor 108 | actor P is KafkaProducer 109 | // variable to hold producer mapping for sending requests to broker 110 | // connections 111 | var _kafka_producer_mapping: (KafkaProducerMapping ref | None) = None 112 | 113 | let mt: Random = MT 114 | let logger: Logger[String] 115 | 116 | new create(logger': Logger[String]) => 117 | logger = logger' 118 | 119 | fun ref create_producer_mapping(client: KafkaClient, mapping: KafkaProducerMapping): 120 | (KafkaProducerMapping | None) 121 | => 122 | _kafka_producer_mapping = mapping 123 | 124 | fun ref producer_mapping(client: KafkaClient): (KafkaProducerMapping | None) => 125 | _kafka_producer_mapping 126 | 127 | fun ref _kafka_producer_throttled(client: KafkaClient, topic_partitions_throttled: Map[String, Set[KafkaPartitionId]] val) 128 | => 129 | None 130 | 131 | fun ref _kafka_producer_unthrottled(client: KafkaClient, topic_partitions_throttled: Map[String, Set[KafkaPartitionId]] val) 132 | => 133 | None 134 | 135 | be kafka_producer_ready(client: KafkaClient) => 136 | produce_data() 137 | 138 | be kafka_message_delivery_report(client: KafkaClient, delivery_report: KafkaProducerDeliveryReport) 139 | => 140 | if not (delivery_report.status is ErrorNone) then 141 | logger(Error) and logger.log(Error, "received delivery report: " + 142 | delivery_report.string()) 143 | else 144 | logger(Fine) and logger.log(Fine, "received delivery report: " + 145 | delivery_report.string()) 146 | end 147 | 148 | // example produce data function 149 | fun ref produce_data() => 150 | logger(Info) and logger.log(Info, "asked to produce data") 151 | let d = generate_data(100) 152 | let d2 = generate_data(1000) 153 | 154 | logger.log(Info, "PRODUCER: Sending messages to topic: " + "test") 155 | for (o, v, k) in d.values() do 156 | try 157 | logger.log(Info, "PRODUCER: Sending message: " + (v as String)) 158 | end 159 | end 160 | try 161 | let ret = (_kafka_producer_mapping as KafkaProducerMapping 162 | ref).send_topic_messages("test", d) 163 | if ret isnt None then error end 164 | else 165 | logger(Error) and logger.log(Error, "error sending messages to brokers") 166 | end 167 | 168 | logger.log(Info, "PRODUCER: Sending messages to topic: " + "test") 169 | for (o, v, k) in d2.values() do 170 | try 171 | logger.log(Info, "PRODUCER: Sending message: " + (v as String)) 172 | end 173 | try 174 | let ret = (_kafka_producer_mapping as KafkaProducerMapping 175 | ref).send_topic_message("test", o, v, k) 176 | if ret isnt None then error end 177 | else 178 | logger(Error) and logger.log(Error, "error sending messages to brokers") 179 | end 180 | end 181 | 182 | // generate data 183 | fun ref generate_data(start: USize = 0): Array[(Any tag, (ByteSeq | 184 | Array[ByteSeq] val), (None | ByteSeq | Array[ByteSeq] val))] 185 | => 186 | let msgs = Array[(Any tag, (ByteSeq | Array[ByteSeq] val), (None | ByteSeq | 187 | Array[ByteSeq] val))] 188 | 189 | var num_msgs = mt.int(11) 190 | var i: USize = start 191 | var a = "(" + i.string() + ") - 2begin" 192 | msgs.push((a, a, None)) 193 | i = i + 1 194 | 195 | if num_msgs > 9 then 196 | a = "(" + i.string() + ") - 2this is anothr test" 197 | msgs.push((a, a, None)) 198 | i = i + 1 199 | end 200 | 201 | if num_msgs > 8 then 202 | a = "(" + i.string() + ") - 2this is and another test" 203 | msgs.push((a, a, None)) 204 | i = i + 1 205 | end 206 | 207 | if num_msgs > 7 then 208 | a = "(" + i.string() + ") - 2this is anothr test" 209 | msgs.push((a, a, None)) 210 | i = i + 1 211 | end 212 | 213 | if num_msgs > 6 then 214 | a = "(" + i.string() + ") - 2this is and another test" 215 | msgs.push((a, a, None)) 216 | i = i + 1 217 | end 218 | 219 | if num_msgs > 5 then 220 | a = "(" + i.string() + ") - 2this is anothr test" 221 | msgs.push((a, a, None)) 222 | i = i + 1 223 | end 224 | 225 | if num_msgs > 4 then 226 | a = "(" + i.string() + ") - 2this is and another test" 227 | msgs.push((a, a, None)) 228 | i = i + 1 229 | end 230 | 231 | if num_msgs > 3 then 232 | a = "(" + i.string() + ") - 2this is anothr test" 233 | msgs.push((a, a, None)) 234 | i = i + 1 235 | end 236 | 237 | if num_msgs > 2 then 238 | a = "(" + i.string() + ") - 2this is and another test" 239 | msgs.push((a, a, None)) 240 | i = i + 1 241 | end 242 | 243 | if num_msgs > 1 then 244 | a = "(" + i.string() + ") - 2this is anothr test" 245 | msgs.push((a, a, None)) 246 | i = i + 1 247 | end 248 | 249 | if num_msgs > 0 then 250 | a = "(" + i.string() + ") - 2this is and another test" 251 | msgs.push((a, a, None)) 252 | i = i + 1 253 | end 254 | 255 | a = "(" + i.string() + ") - 2done" 256 | msgs.push((a, a, None)) 257 | i = i + 1 258 | 259 | msgs 260 | 261 | -------------------------------------------------------------------------------- /pony-kafka/custombuffered/codecs/big_endian_decoder.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | use ".." 31 | use "../../utils/bool_converter" 32 | use "itertools" 33 | 34 | primitive BigEndianDecoder 35 | fun u8(rb: Reader): U8 ? => 36 | """ 37 | Get a U8. Raise an error if there isn't enough data. 38 | """ 39 | rb.read_byte()? 40 | 41 | fun bool(rb: Reader): Bool ? => 42 | """ 43 | Get a Bool. Raise an error if there isn't enough data. 44 | """ 45 | BoolConverter.u8_to_bool(u8(rb)?) 46 | 47 | fun i8(rb: Reader): I8 ? => 48 | """ 49 | Get an I8. 50 | """ 51 | u8(rb)?.i8() 52 | 53 | fun u16(rb: Reader): U16 ? => 54 | """ 55 | Get a big-endian U16. 56 | """ 57 | let data = rb.read_bytes(2)? 58 | 59 | _decode_u16(consume data)? 60 | 61 | fun _decode_u16(data: (Array[U8] val | Array[Array[U8] val] val | Array[Array[U8] iso] val)): U16 ? => 62 | match data 63 | | let d: Array[U8] val => 64 | (d(0)?.u16() << 8) or d(1)?.u16() 65 | | let d: (Array[Array[U8] val] val | Array[Array[U8] iso] val) => 66 | _decode_u16_array(d)? 67 | end 68 | 69 | fun _decode_u16_array(data: (Array[Array[U8] val] val | Array[Array[U8] iso] val)): U16 ? => 70 | var out: U16 = 0 71 | let iters = Array[Iterator[U8]] 72 | match data 73 | | let arr: Array[Array[U8] val] val => 74 | for a in arr.values() do 75 | iters.push(a.values()) 76 | end 77 | | let arr: Array[Array[U8] iso] val => 78 | for a in arr.values() do 79 | iters.push(a.values()) 80 | end 81 | end 82 | let iter_all = Iter[U8].chain(iters.values()) 83 | while iter_all.has_next() do 84 | out = (out << 8) or iter_all.next()?.u16() 85 | end 86 | out 87 | 88 | fun i16(rb: Reader): I16 ? => 89 | """ 90 | Get a big-endian I16. 91 | """ 92 | u16(rb)?.i16() 93 | 94 | fun u32(rb: Reader): U32 ? => 95 | """ 96 | Get a big-endian U32. 97 | """ 98 | let data = rb.read_bytes(4)? 99 | 100 | _decode_u32(consume data)? 101 | 102 | fun _decode_u32(data: (Array[U8] val | Array[Array[U8] val] val | Array[Array[U8] iso] val)): U32 ? => 103 | match data 104 | | let d: Array[U8] val => 105 | (d(0)?.u32() << 24) or (d(1)?.u32() << 16) or 106 | (d(2)?.u32() << 8) or d(3)?.u32() 107 | | let d: (Array[Array[U8] val] val | Array[Array[U8] iso] val) => 108 | _decode_u32_array(d)? 109 | end 110 | 111 | fun _decode_u32_array(data: (Array[Array[U8] val] val | Array[Array[U8] iso] val)): U32 ? => 112 | var out: U32 = 0 113 | let iters = Array[Iterator[U8]] 114 | match data 115 | | let arr: Array[Array[U8] val] val => 116 | for a in arr.values() do 117 | iters.push(a.values()) 118 | end 119 | | let arr: Array[Array[U8] iso] val => 120 | for a in arr.values() do 121 | iters.push(a.values()) 122 | end 123 | end 124 | let iter_all = Iter[U8].chain(iters.values()) 125 | while iter_all.has_next() do 126 | out = (out << 8) or iter_all.next()?.u32() 127 | end 128 | out 129 | 130 | fun i32(rb: Reader): I32 ? => 131 | """ 132 | Get a big-endian I32. 133 | """ 134 | u32(rb)?.i32() 135 | 136 | fun u64(rb: Reader): U64 ? => 137 | """ 138 | Get a big-endian U64. 139 | """ 140 | let data = rb.read_bytes(8)? 141 | 142 | _decode_u64(consume data)? 143 | 144 | fun _decode_u64(data: (Array[U8] val | Array[Array[U8] val] val | Array[Array[U8] iso] val)): U64 ? => 145 | match data 146 | | let d: Array[U8] val => 147 | (d(0)?.u64() << 56) or (d(1)?.u64() << 48) or 148 | (d(2)?.u64() << 40) or (d(3)?.u64() << 32) or 149 | (d(4)?.u64() << 24) or (d(5)?.u64() << 16) or 150 | (d(6)?.u64() << 8) or d(7)?.u64() 151 | | let d: (Array[Array[U8] val] val | Array[Array[U8] iso] val) => 152 | _decode_u64_array(d)? 153 | end 154 | 155 | fun _decode_u64_array(data: (Array[Array[U8] val] val | Array[Array[U8] iso] val)): U64 ? => 156 | var out: U64 = 0 157 | let iters = Array[Iterator[U8]] 158 | match data 159 | | let arr: Array[Array[U8] val] val => 160 | for a in arr.values() do 161 | iters.push(a.values()) 162 | end 163 | | let arr: Array[Array[U8] iso] val => 164 | for a in arr.values() do 165 | iters.push(a.values()) 166 | end 167 | end 168 | let iter_all = Iter[U8].chain(iters.values()) 169 | while iter_all.has_next() do 170 | out = (out << 8) or iter_all.next()?.u64() 171 | end 172 | out 173 | 174 | fun i64(rb: Reader): I64 ? => 175 | """ 176 | Get a big-endian I64. 177 | """ 178 | u64(rb)?.i64() 179 | 180 | 181 | fun u128(rb: Reader): U128 ? => 182 | """ 183 | Get a big-endian U128. 184 | """ 185 | let data = rb.read_bytes(16)? 186 | 187 | _decode_u128(consume data)? 188 | 189 | fun _decode_u128(data: (Array[U8] val | Array[Array[U8] val] val | Array[Array[U8] iso] val)): U128 ? => 190 | match data 191 | | let d: Array[U8] val => 192 | (d(0)?.u128() << 120) or (d(1)?.u128() << 112) or 193 | (d(2)?.u128() << 104) or (d(3)?.u128() << 96) or 194 | (d(4)?.u128() << 88) or (d(5)?.u128() << 80) or 195 | (d(6)?.u128() << 72) or (d(7)?.u128() << 64) or 196 | (d(8)?.u128() << 56) or (d(9)?.u128() << 48) or 197 | (d(10)?.u128() << 40) or (d(11)?.u128() << 32) or 198 | (d(12)?.u128() << 24) or (d(13)?.u128() << 16) or 199 | (d(14)?.u128() << 8) or d(15)?.u128() 200 | | let d: (Array[Array[U8] val] val | Array[Array[U8] iso] val) => 201 | _decode_u128_array(d)? 202 | end 203 | 204 | fun _decode_u128_array(data: (Array[Array[U8] val] val | Array[Array[U8] iso] val)): U128 ? => 205 | var out: U128 = 0 206 | let iters = Array[Iterator[U8]] 207 | match data 208 | | let arr: Array[Array[U8] val] val => 209 | for a in arr.values() do 210 | iters.push(a.values()) 211 | end 212 | | let arr: Array[Array[U8] iso] val => 213 | for a in arr.values() do 214 | iters.push(a.values()) 215 | end 216 | end 217 | let iter_all = Iter[U8].chain(iters.values()) 218 | while iter_all.has_next() do 219 | out = (out << 8) or iter_all.next()?.u128() 220 | end 221 | out 222 | 223 | fun i128(rb: Reader): I128 ? => 224 | """ 225 | Get a big-endian I129. 226 | """ 227 | u128(rb)?.i128() 228 | 229 | fun f32(rb: Reader): F32 ? => 230 | """ 231 | Get a big-endian F32. 232 | """ 233 | F32.from_bits(u32(rb)?) 234 | 235 | fun f64(rb: Reader): F64 ? => 236 | """ 237 | Get a big-endian F64. 238 | """ 239 | F64.from_bits(u64(rb)?) 240 | 241 | fun peek_u8(rb: PeekableReader box, offset: USize = 0): U8 ? => 242 | """ 243 | Peek at a U8 at the given offset. Raise an error if there isn't enough 244 | data. 245 | """ 246 | rb.peek_byte(offset)? 247 | 248 | fun peek_i8(rb: PeekableReader box, offset: USize = 0): I8 ? => 249 | """ 250 | Peek at an I8. 251 | """ 252 | peek_u8(rb, offset)?.i8() 253 | 254 | fun peek_u16(rb: PeekableReader box, offset: USize = 0): U16 ? => 255 | """ 256 | Peek at a big-endian U16. 257 | """ 258 | let data = rb.peek_bytes(2, offset)? 259 | 260 | _decode_u16(data)? 261 | 262 | fun peek_i16(rb: PeekableReader box, offset: USize = 0): I16 ? => 263 | """ 264 | Peek at a big-endian I16. 265 | """ 266 | peek_u16(rb, offset)?.i16() 267 | 268 | fun peek_u32(rb: PeekableReader box, offset: USize = 0): U32 ? => 269 | """ 270 | Peek at a big-endian U32. 271 | """ 272 | let data = rb.peek_bytes(4, offset)? 273 | 274 | _decode_u32(data)? 275 | 276 | fun peek_i32(rb: PeekableReader box, offset: USize = 0): I32 ? => 277 | """ 278 | Peek at a big-endian I32. 279 | """ 280 | peek_u32(rb, offset)?.i32() 281 | 282 | fun peek_u64(rb: PeekableReader box, offset: USize = 0): U64 ? => 283 | """ 284 | Peek at a big-endian U64. 285 | """ 286 | let data = rb.peek_bytes(8, offset)? 287 | 288 | _decode_u64(data)? 289 | 290 | fun peek_i64(rb: PeekableReader box, offset: USize = 0): I64 ? => 291 | """ 292 | Peek at a big-endian I64. 293 | """ 294 | peek_u64(rb, offset)?.i64() 295 | 296 | fun peek_u128(rb: PeekableReader box, offset: USize = 0): U128 ? => 297 | """ 298 | Peek at a big-endian U128. 299 | """ 300 | let data = rb.peek_bytes(16, offset)? 301 | 302 | _decode_u128(data)? 303 | 304 | fun peek_i128(rb: PeekableReader box, offset: USize = 0): I128 ? => 305 | """ 306 | Peek at a big-endian I129. 307 | """ 308 | peek_u128(rb, offset)?.i128() 309 | 310 | fun peek_f32(rb: PeekableReader box, offset: USize = 0): F32 ? => 311 | """ 312 | Peek at a big-endian F32. 313 | """ 314 | F32.from_bits(peek_u32(rb, offset)?) 315 | 316 | fun peek_f64(rb: PeekableReader box, offset: USize = 0): F64 ? => 317 | """ 318 | Peek at a big-endian F64. 319 | """ 320 | F64.from_bits(peek_u64(rb, offset)?) 321 | -------------------------------------------------------------------------------- /pony-kafka/custombuffered/codecs/little_endian_decoder.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | use ".." 31 | use "../../utils/bool_converter" 32 | use "itertools" 33 | 34 | primitive LittleEndianDecoder 35 | fun u8(rb: Reader): U8 ? => 36 | """ 37 | Get a U8. Raise an error if there isn't enough data. 38 | """ 39 | rb.read_byte()? 40 | 41 | fun bool(rb: Reader): Bool ? => 42 | """ 43 | Get a Bool. Raise an error if there isn't enough data. 44 | """ 45 | BoolConverter.u8_to_bool(u8(rb)?) 46 | 47 | fun i8(rb: Reader): I8 ? => 48 | """ 49 | Get an I8. 50 | """ 51 | u8(rb)?.i8() 52 | 53 | fun u16(rb: Reader): U16 ? => 54 | """ 55 | Get a little-endian U16. 56 | """ 57 | let data = rb.read_bytes(2)? 58 | 59 | _decode_u16(consume data)? 60 | 61 | fun _decode_u16(data: (Array[U8] val | Array[Array[U8] val] val | Array[Array[U8] iso] val)): U16 ? => 62 | match data 63 | | let d: Array[U8] val => 64 | (d(1)?.u16() << 8) or d(0)?.u16() 65 | | let d: (Array[Array[U8] val] val | Array[Array[U8] iso] val) => 66 | _decode_u16_array(d)? 67 | end 68 | 69 | fun _decode_u16_array(data: (Array[Array[U8] val] val | Array[Array[U8] iso] val)): U16 ? => 70 | var out: U16 = 0 71 | let iters = Array[Iterator[U8]] 72 | match data 73 | | let arr: Array[Array[U8] val] val => 74 | for a in arr.values() do 75 | iters.push(a.values()) 76 | end 77 | | let arr: Array[Array[U8] iso] val => 78 | for a in arr.values() do 79 | iters.push(a.values()) 80 | end 81 | end 82 | let iter_all = Iter[U8].chain(iters.values()) 83 | var i: U16 = 0 84 | while iter_all.has_next() do 85 | out = out or (iter_all.next()?.u16() << (i * 8)) 86 | i = i + 1 87 | end 88 | out 89 | 90 | fun i16(rb: Reader): I16 ? => 91 | """ 92 | Get a little-endian I16. 93 | """ 94 | u16(rb)?.i16() 95 | 96 | fun u32(rb: Reader): U32 ? => 97 | """ 98 | Get a little-endian U32. 99 | """ 100 | let data = rb.read_bytes(4)? 101 | 102 | _decode_u32(consume data)? 103 | 104 | fun _decode_u32(data: (Array[U8] val | Array[Array[U8] val] val | Array[Array[U8] iso] val)): U32 ? => 105 | match data 106 | | let d: Array[U8] val => 107 | (d(3)?.u32() << 24) or (d(2)?.u32() << 16) or 108 | (d(1)?.u32() << 8) or d(0)?.u32() 109 | | let d: (Array[Array[U8] val] val | Array[Array[U8] iso] val) => 110 | _decode_u32_array(d)? 111 | end 112 | 113 | fun _decode_u32_array(data: (Array[Array[U8] val] val | Array[Array[U8] iso] val)): U32 ? => 114 | var out: U32 = 0 115 | let iters = Array[Iterator[U8]] 116 | match data 117 | | let arr: Array[Array[U8] val] val => 118 | for a in arr.values() do 119 | iters.push(a.values()) 120 | end 121 | | let arr: Array[Array[U8] iso] val => 122 | for a in arr.values() do 123 | iters.push(a.values()) 124 | end 125 | end 126 | let iter_all = Iter[U8].chain(iters.values()) 127 | var i: U32 = 0 128 | while iter_all.has_next() do 129 | out = out or (iter_all.next()?.u32() << (i * 8)) 130 | i = i + 1 131 | end 132 | out 133 | 134 | fun i32(rb: Reader): I32 ? => 135 | """ 136 | Get a little-endian I32. 137 | """ 138 | u32(rb)?.i32() 139 | 140 | fun u64(rb: Reader): U64 ? => 141 | """ 142 | Get a little-endian U64. 143 | """ 144 | let data = rb.read_bytes(8)? 145 | 146 | _decode_u64(consume data)? 147 | 148 | fun _decode_u64(data: (Array[U8] val | Array[Array[U8] val] val | Array[Array[U8] iso] val)): U64 ? => 149 | match data 150 | | let d: Array[U8] val => 151 | (d(7)?.u64() << 56) or (d(6)?.u64() << 48) or 152 | (d(5)?.u64() << 40) or (d(4)?.u64() << 32) or 153 | (d(3)?.u64() << 24) or (d(2)?.u64() << 16) or 154 | (d(1)?.u64() << 8) or d(0)?.u64() 155 | | let d: (Array[Array[U8] val] val | Array[Array[U8] iso] val) => 156 | _decode_u64_array(d)? 157 | end 158 | 159 | fun _decode_u64_array(data: (Array[Array[U8] val] val | Array[Array[U8] iso] val)): U64 ? => 160 | var out: U64 = 0 161 | let iters = Array[Iterator[U8]] 162 | match data 163 | | let arr: Array[Array[U8] val] val => 164 | for a in arr.values() do 165 | iters.push(a.values()) 166 | end 167 | | let arr: Array[Array[U8] iso] val => 168 | for a in arr.values() do 169 | iters.push(a.values()) 170 | end 171 | end 172 | let iter_all = Iter[U8].chain(iters.values()) 173 | var i: U64 = 0 174 | while iter_all.has_next() do 175 | out = out or (iter_all.next()?.u64() << (i * 8)) 176 | i = i + 1 177 | end 178 | out 179 | 180 | fun i64(rb: Reader): I64 ? => 181 | """ 182 | Get a little-endian I64. 183 | """ 184 | u64(rb)?.i64() 185 | 186 | fun u128(rb: Reader): U128 ? => 187 | """ 188 | Get a little-endian U128. 189 | """ 190 | let data = rb.read_bytes(16)? 191 | 192 | _decode_u128(consume data)? 193 | 194 | fun _decode_u128(data: (Array[U8] val | Array[Array[U8] val] val | Array[Array[U8] iso] val)): U128 ? => 195 | match data 196 | | let d: Array[U8] val => 197 | (d(15)?.u128() << 120) or (d(14)?.u128() << 112) or 198 | (d(13)?.u128() << 104) or (d(12)?.u128() << 96) or 199 | (d(11)?.u128() << 88) or (d(10)?.u128() << 80) or 200 | (d(9)?.u128() << 72) or (d(8)?.u128() << 64) or 201 | (d(7)?.u128() << 56) or (d(6)?.u128() << 48) or 202 | (d(5)?.u128() << 40) or (d(4)?.u128() << 32) or 203 | (d(3)?.u128() << 24) or (d(2)?.u128() << 16) or 204 | (d(1)?.u128() << 8) or d(0)?.u128() 205 | | let d: (Array[Array[U8] val] val | Array[Array[U8] iso] val) => 206 | _decode_u128_array(d)? 207 | end 208 | 209 | fun _decode_u128_array(data: (Array[Array[U8] val] val | Array[Array[U8] iso] val)): U128 ? => 210 | var out: U128 = 0 211 | let iters = Array[Iterator[U8]] 212 | match data 213 | | let arr: Array[Array[U8] val] val => 214 | for a in arr.values() do 215 | iters.push(a.values()) 216 | end 217 | | let arr: Array[Array[U8] iso] val => 218 | for a in arr.values() do 219 | iters.push(a.values()) 220 | end 221 | end 222 | let iter_all = Iter[U8].chain(iters.values()) 223 | var i: U128 = 0 224 | while iter_all.has_next() do 225 | out = out or (iter_all.next()?.u128() << (i * 8)) 226 | i = i + 1 227 | end 228 | out 229 | 230 | fun i128(rb: Reader): I128 ? => 231 | """ 232 | Get a little-endian I128. 233 | """ 234 | u128(rb)?.i128() 235 | 236 | fun f32(rb: Reader): F32 ? => 237 | """ 238 | Get a little-endian F32. 239 | """ 240 | F32.from_bits(u32(rb)?) 241 | 242 | fun f64(rb: Reader): F64 ? => 243 | """ 244 | Get a little-endian F64. 245 | """ 246 | F64.from_bits(u64(rb)?) 247 | 248 | fun peek_u8(rb: PeekableReader, offset: USize = 0): U8 ? => 249 | """ 250 | Peek at a U8 at the given offset. Raise an error if there isn't enough 251 | data. 252 | """ 253 | rb.peek_byte(offset)? 254 | 255 | fun peek_i8(rb: PeekableReader, offset: USize = 0): I8 ? => 256 | """ 257 | Peek at an I8. 258 | """ 259 | peek_u8(rb, offset)?.i8() 260 | 261 | fun peek_u16(rb: PeekableReader, offset: USize = 0): U16 ? => 262 | """ 263 | Peek at a little-endian U16. 264 | """ 265 | let data = rb.peek_bytes(2, offset)? 266 | 267 | _decode_u16(data)? 268 | 269 | fun peek_i16(rb: PeekableReader, offset: USize = 0): I16 ? => 270 | """ 271 | Peek at a little-endian I16. 272 | """ 273 | peek_u16(rb, offset)?.i16() 274 | 275 | fun peek_u32(rb: PeekableReader, offset: USize = 0): U32 ? => 276 | """ 277 | Peek at a little-endian U32. 278 | """ 279 | let data = rb.peek_bytes(4, offset)? 280 | 281 | _decode_u32(data)? 282 | 283 | fun peek_i32(rb: PeekableReader, offset: USize = 0): I32 ? => 284 | """ 285 | Peek at a little-endian I32. 286 | """ 287 | peek_u32(rb, offset)?.i32() 288 | 289 | fun peek_u64(rb: PeekableReader, offset: USize = 0): U64 ? => 290 | """ 291 | Peek at a little-endian U64. 292 | """ 293 | let data = rb.peek_bytes(8, offset)? 294 | 295 | _decode_u64(data)? 296 | 297 | fun peek_i64(rb: PeekableReader, offset: USize = 0): I64 ? => 298 | """ 299 | Peek at a little-endian I64. 300 | """ 301 | peek_u64(rb, offset)?.i64() 302 | 303 | fun peek_u128(rb: PeekableReader, offset: USize = 0): U128 ? => 304 | """ 305 | Peek at a little-endian U128. 306 | """ 307 | let data = rb.peek_bytes(16, offset)? 308 | 309 | _decode_u128(data)? 310 | 311 | fun peek_i128(rb: PeekableReader, offset: USize = 0): I128 ? => 312 | """ 313 | Peek at a little-endian I128. 314 | """ 315 | peek_u128(rb, offset)?.i128() 316 | 317 | fun peek_f32(rb: PeekableReader, offset: USize = 0): F32 ? => 318 | """ 319 | Peek at a little-endian F32. 320 | """ 321 | F32.from_bits(peek_u32(rb, offset)?) 322 | 323 | fun peek_f64(rb: PeekableReader, offset: USize = 0): F64 ? => 324 | """ 325 | Peek at a little-endian F64. 326 | """ 327 | F64.from_bits(peek_u64(rb, offset)?) 328 | -------------------------------------------------------------------------------- /pony-kafka/compression/lz4.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | 15 | */ 16 | 17 | use "../customlogger" 18 | use "lib:lz4" 19 | 20 | use @LZ4F_compressBound[USize](src_size: USize, prefs_ptr: Pointer[U8]) 21 | use @LZ4F_isError[U32](err: LZ4FError) 22 | use @LZ4F_getErrorName[Pointer[U8]](err: LZ4FError) 23 | use @LZ4F_createCompressionContext[LZ4FError]( 24 | cctx_p: Pointer[LZ4FCompressionContext], version: U32) 25 | use @LZ4F_compressBegin[USize](cctx: LZ4FCompressionContext, 26 | buf: Pointer[U8] tag, buf_size: USize, pref_p: MaybePointer[LZ4FPreferences]) 27 | use @LZ4F_freeCompressionContext[LZ4FError](cctx: LZ4FCompressionContext) 28 | use @LZ4F_createDecompressionContext[LZ4FError]( 29 | dctx_p: Pointer[LZ4FDecompressionContext], version: U32) 30 | use @LZ4F_getFrameInfo[LZ4FError](dctx: LZ4FDecompressionContext, 31 | frame_info: MaybePointer[LZ4FFrameInfo], buf: Pointer[U8] tag, 32 | buf_size: Pointer[USize]) 33 | use @LZ4F_freeDecompressionContext[LZ4FError](dctx: LZ4FDecompressionContext) 34 | 35 | // TODO: Remove USize from union for buf pointer 36 | use @LZ4F_compressUpdate[USize](cctx: LZ4FCompressionContext, 37 | buf: (USize | Pointer[U8] tag), buf_size: USize, src_buf: Pointer[U8] tag, 38 | src_size: USize, copts_p: MaybePointer[LZ4FCompressOptions]) 39 | use @LZ4F_compressEnd[USize](cctx: LZ4FCompressionContext, 40 | buf: (USize | Pointer[U8] tag), buf_size: USize, 41 | copts_p: MaybePointer[LZ4FCompressOptions]) 42 | use @LZ4F_decompress[USize](dctx: LZ4FDecompressionContext, 43 | buf: (USize | Pointer[U8] tag), buf_size: Pointer[USize], 44 | src_buf: (USize | Pointer[U8] tag), src_size: Pointer[USize], 45 | copts_p: MaybePointer[LZ4FDecompressOptions]) 46 | 47 | type LZ4FError is USize 48 | 49 | primitive LZ4FVersion 50 | fun apply(): U32 => 100 51 | 52 | primitive LZ4FBlockLinked 53 | fun apply(): U32 => 0 54 | 55 | primitive LZ4FBlockIndependent 56 | fun apply(): U32 => 1 57 | 58 | type LZ4FCompressionContext is Pointer[U8] 59 | type LZ4FDecompressionContext is Pointer[U8] 60 | 61 | struct LZ4FDecompressOptions 62 | var stable_dst: U32 = 0 63 | var reserved_1: U32 = 0 64 | var reserved_2: U32 = 0 65 | var reserved_3: U32 = 0 66 | 67 | new create() => None 68 | 69 | struct LZ4FCompressOptions 70 | var stable_src: U32 = 0 71 | var reserved_1: U32 = 0 72 | var reserved_2: U32 = 0 73 | var reserved_3: U32 = 0 74 | 75 | new create() => None 76 | 77 | struct LZ4FFrameInfo 78 | var block_size: U32 = 0 79 | var block_mode: U32 = 0 80 | var content_checksum: U32 = 0 81 | var frame_type: U32 = 0 82 | var content_size: U64 = 0 83 | var reserved_1: U32 = 0 84 | var reserved_2: U32 = 0 85 | 86 | new create() => None 87 | 88 | struct LZ4FPreferences 89 | embed frame_info: LZ4FFrameInfo = LZ4FFrameInfo 90 | var compression_level: I32 = 0 91 | var auto_flush: U32 = 0 92 | var reserved_1: U32 = 0 93 | var reserved_2: U32 = 0 94 | var reserved_3: U32 = 0 95 | var reserved_4: U32 = 0 96 | 97 | new create() => None 98 | 99 | primitive LZ4Compressor 100 | fun compress(logger: Logger[String], data: ByteSeq, 101 | prefs: LZ4FPreferences = LZ4FPreferences, 102 | copts: LZ4FCompressOptions = LZ4FCompressOptions): Array[U8] iso^ ? 103 | => 104 | LZ4.compress_array(logger, recover val [data] end, data.size(), prefs, 105 | copts)? 106 | 107 | fun compress_array(logger: Logger[String], data: Array[ByteSeq] val, 108 | total_size: USize, prefs: LZ4FPreferences = LZ4FPreferences, 109 | copts: LZ4FCompressOptions = LZ4FCompressOptions): Array[U8] iso^ ? 110 | => 111 | LZ4.compress_array(logger, data, total_size, prefs, copts)? 112 | 113 | primitive LZ4Decompressor 114 | fun decompress(logger: Logger[String], data: ByteSeq, 115 | dopts: LZ4FDecompressOptions = LZ4FDecompressOptions): Array[U8] iso^ ? 116 | => 117 | LZ4.decompress(logger, data, dopts)? 118 | 119 | primitive LZ4 120 | // based on https://github.com/edenhill/librdkafka/blob/master/src/rdkafka_lz4.c 121 | fun decompress(logger: Logger[String], data: ByteSeq, 122 | dopts: LZ4FDecompressOptions = LZ4FDecompressOptions): Array[U8] iso^ ? 123 | => 124 | var dctx: LZ4FDecompressionContext = LZ4FDecompressionContext 125 | var fi: LZ4FFrameInfo = LZ4FFrameInfo 126 | var fi_p = MaybePointer[LZ4FFrameInfo](fi) 127 | var data_offset = data.size() 128 | 129 | var err = @LZ4F_createDecompressionContext(addressof dctx, LZ4FVersion()) 130 | if @LZ4F_isError(err) != 0 then 131 | logger(Error) and logger.log(Error, 132 | "LZ4 couldn't create decompression context! LZ4 error message: " + 133 | String.copy_cstring(@LZ4F_getErrorName(err))) 134 | error 135 | end 136 | 137 | err = @LZ4F_getFrameInfo(dctx, fi_p, data.cpointer(), addressof data_offset) 138 | if @LZ4F_isError(err) != 0 then 139 | logger(Error) and logger.log(Error, 140 | "LZ4 couldn't read frame info! LZ4 error message: " + 141 | String.copy_cstring(@LZ4F_getErrorName(err))) 142 | @LZ4F_freeDecompressionContext(dctx) 143 | error 144 | end 145 | 146 | let out_size = if (fi.content_size == 0) or 147 | (fi.content_size.usize() > (data.size() * 255)) then 148 | data.size() * 255 149 | else 150 | fi.content_size.usize() 151 | end 152 | 153 | let buffer = recover Array[U8](out_size) end 154 | buffer.undefined(buffer.space()) 155 | 156 | var buf_offset: USize = 0 157 | var buf_size: USize = buffer.size() 158 | var data_size: USize = data.size() 159 | 160 | var dopts_p = MaybePointer[LZ4FDecompressOptions](dopts) 161 | 162 | while data_offset < data.size() do 163 | buf_size = buffer.size() - buf_offset 164 | data_size = data.size() - data_offset 165 | 166 | err = @LZ4F_decompress(dctx, buffer.cpointer().usize() + buf_offset, 167 | addressof buf_size, data.cpointer().usize() + data_offset, 168 | addressof data_size, dopts_p) 169 | if @LZ4F_isError(err) != 0 then 170 | logger(Error) and logger.log(Error, 171 | "LZ4 couldn't decompression data! LZ4 error message: " + 172 | String.copy_cstring(@LZ4F_getErrorName(err))) 173 | @LZ4F_freeDecompressionContext(dctx) 174 | error 175 | end 176 | 177 | buf_offset = buf_offset + buf_size 178 | data_offset = data_offset + data_size 179 | 180 | if buf_offset > buffer.size() then 181 | logger(Error) and logger.log(Error, "LZ4 buffer offset is larger " + 182 | "than buffer size! This should never happen!") 183 | @LZ4F_freeDecompressionContext(dctx) 184 | error 185 | end 186 | 187 | if data_offset > data.size() then 188 | logger(Error) and logger.log(Error, "LZ4 input data offset is larger " + 189 | "than input data size! This should never happen!") 190 | @LZ4F_freeDecompressionContext(dctx) 191 | error 192 | end 193 | 194 | // done decompressing 195 | if err == 0 then 196 | break 197 | end 198 | 199 | if (err > 0) and (buf_offset == buffer.size()) then 200 | // grow buffer 201 | buffer.undefined(buffer.size()*2) 202 | end 203 | end 204 | 205 | if data_offset < data.size() then 206 | logger(Error) and logger.log(Error, "LZ4 didn't decompress all data!") 207 | @LZ4F_freeDecompressionContext(dctx) 208 | error 209 | end 210 | 211 | @LZ4F_freeDecompressionContext(dctx) 212 | 213 | buffer.truncate(buf_offset) 214 | 215 | buffer 216 | 217 | // based on https://github.com/edenhill/librdkafka/blob/master/src/rdkafka_lz4.c 218 | fun compress(logger: Logger[String], data: ByteSeq, 219 | prefs: LZ4FPreferences = LZ4FPreferences, 220 | copts: LZ4FCompressOptions = LZ4FCompressOptions): Array[U8] iso^ ? 221 | => 222 | compress_array(logger, recover val [data] end, data.size(), prefs, copts)? 223 | 224 | // TODO: Figure out appropriate way to hide Structs in MaybePointers that 225 | // isn't exposed to users 226 | // based on 227 | // https://github.com/edenhill/librdkafka/blob/master/src/rdkafka_lz4.c#L321 228 | fun compress_array(logger: Logger[String], data: Array[ByteSeq] val, 229 | total_size: USize, prefs: LZ4FPreferences = LZ4FPreferences, 230 | copts: LZ4FCompressOptions = LZ4FCompressOptions): Array[U8] iso^ ? 231 | => 232 | var bytes_written: USize = 0 233 | let max_len = @LZ4F_compressBound(total_size, Pointer[U8]) 234 | if @LZ4F_isError(max_len) != 0 then 235 | logger(Error) and logger.log(Error, "LZ4 couldn't determine output " + 236 | "size for compression! LZ4 error message: " + 237 | String.copy_cstring(@LZ4F_getErrorName(max_len))) 238 | error 239 | end 240 | let buffer = recover Array[U8](max_len) end 241 | buffer.undefined(buffer.space()) 242 | 243 | var cctx: LZ4FCompressionContext = LZ4FCompressionContext 244 | 245 | var err = @LZ4F_createCompressionContext(addressof cctx, LZ4FVersion()) 246 | if @LZ4F_isError(err) != 0 then 247 | logger(Error) and logger.log(Error, 248 | "LZ4 couldn't create compression context! LZ4 error message: " + 249 | String.copy_cstring(@LZ4F_getErrorName(err))) 250 | error 251 | end 252 | 253 | var prefs_p = MaybePointer[LZ4FPreferences](prefs) 254 | 255 | err = @LZ4F_compressBegin(cctx, buffer.cpointer(), buffer.size(), prefs_p) 256 | if @LZ4F_isError(err) != 0 then 257 | logger(Error) and logger.log(Error, 258 | "LZ4 couldn't begin compression! LZ4 error message: " + 259 | String.copy_cstring(@LZ4F_getErrorName(err))) 260 | @LZ4F_freeCompressionContext(cctx) 261 | error 262 | end 263 | 264 | bytes_written = bytes_written + err 265 | 266 | var copts_p = MaybePointer[LZ4FCompressOptions](copts) 267 | 268 | for d in data.values() do 269 | err = @LZ4F_compressUpdate(cctx, buffer.cpointer().usize() + 270 | bytes_written, buffer.size() - bytes_written, d.cpointer(), d.size(), 271 | copts_p) 272 | if @LZ4F_isError(err) != 0 then 273 | logger(Error) and logger.log(Error, 274 | "LZ4 compression error! LZ4 error message: " + 275 | String.copy_cstring(@LZ4F_getErrorName(err))) 276 | @LZ4F_freeCompressionContext(cctx) 277 | error 278 | end 279 | bytes_written = bytes_written + err 280 | end 281 | 282 | err = @LZ4F_compressEnd(cctx, buffer.cpointer().usize() + bytes_written, 283 | buffer.size() - bytes_written, copts_p) 284 | if @LZ4F_isError(err) != 0 then 285 | logger(Error) and logger.log(Error, 286 | "LZ4 couldn't end compression! LZ4 error message: " + 287 | String.copy_cstring(@LZ4F_getErrorName(err))) 288 | @LZ4F_freeCompressionContext(cctx) 289 | error 290 | end 291 | 292 | bytes_written = bytes_written + err 293 | 294 | @LZ4F_freeCompressionContext(cctx) 295 | 296 | buffer.truncate(bytes_written) 297 | 298 | buffer 299 | -------------------------------------------------------------------------------- /pony-kafka/compression/snappy.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | 15 | */ 16 | 17 | use "../customlogger" 18 | use "lib:snappy" 19 | 20 | use @snappy_validate_compressed_buffer[SnappyStatus](data: Pointer[U8] tag, 21 | size: USize) 22 | use @snappy_uncompressed_length[SnappyStatus](data: (USize | Pointer[U8] tag), 23 | size: USize, len: Pointer[USize]) 24 | use @snappy_max_compressed_length[USize](uncompressed_size: USize) 25 | use @snappy_uncompress[SnappyStatus](data: (USize | Pointer[U8] tag), 26 | size: USize, output: (USize | Pointer[U8] tag), output_size: Pointer[USize]) 27 | use @snappy_compress[SnappyStatus](data: Pointer[U8] tag, size: USize, 28 | output: (USize | Pointer[U8] tag), output_size: Pointer[USize]) 29 | 30 | type SnappyStatus is I32 31 | 32 | primitive SnappyCompressor 33 | fun compress(logger: Logger[String], data: ByteSeq): Array[U8] iso^ ? => 34 | Snappy.compress(logger, data)? 35 | 36 | fun compress_array(logger: Logger[String], data: Array[ByteSeq] val, 37 | total_size: USize): Array[U8] iso^ ? 38 | => 39 | Snappy.compress_array(logger, data, total_size)? 40 | 41 | fun compress_java(logger: Logger[String], data: ByteSeq, 42 | block_size: USize = 32*1024): Array[U8] iso^ ? 43 | => 44 | Snappy.compress_java(logger, data, block_size)? 45 | 46 | fun compress_array_java(logger: Logger[String], data: Array[ByteSeq] val, 47 | total_size: USize, block_size: USize = 32*1024): Array[U8] iso^ ? 48 | => 49 | Snappy.compress_array_java(logger, data, total_size, block_size)? 50 | 51 | primitive SnappyDecompressor 52 | fun decompress(logger: Logger[String], data: ByteSeq): Array[U8] iso^ ? => 53 | Snappy.decompress(logger, data)? 54 | 55 | fun decompress_java(logger: Logger[String], data: ByteSeq): Array[U8] iso^ ? => 56 | Snappy.decompress_java(logger, data)? 57 | 58 | primitive Snappy 59 | fun read32be(buffer: ByteSeq, offset: USize): U32 ? => 60 | // TODO: figure out some way of detecting endianness; big endian needs byte 61 | // swapping 62 | (buffer(offset + 0)?.u32() << 24) or (buffer(offset + 1)?.u32() << 16) or 63 | (buffer(offset + 2)?.u32() << 8) or buffer(offset + 3)?.u32() 64 | 65 | fun decompress_java(logger: Logger[String], data: ByteSeq): Array[U8] iso^ ? => 66 | let snappy_java_hdr_size: USize = 16 67 | let snappy_java_magic = [as U8: 0x82; 'S'; 'N'; 'A'; 'P'; 'P'; 'Y'; 0] 68 | 69 | 70 | if data.size() <= (snappy_java_hdr_size + 4) then 71 | logger(Info) and logger.log(Info, "Not snappy java compressed data " + 72 | "(not enough data). Falling back to normal snappy decompression.") 73 | return decompress(logger, data)? 74 | end 75 | 76 | if not ((data(0)? == snappy_java_magic(0)?) 77 | and (data(1)? == snappy_java_magic(1)?) 78 | and (data(2)? == snappy_java_magic(2)?) 79 | and (data(3)? == snappy_java_magic(3)?) 80 | and (data(4)? == snappy_java_magic(4)?) 81 | and (data(5)? == snappy_java_magic(5)?) 82 | and (data(6)? == snappy_java_magic(6)?) 83 | and (data(7)? == snappy_java_magic(7)?)) then 84 | logger(Info) and logger.log(Info, "Not snappy java compressed data " + 85 | "(invalid magic). Falling back to normal snappy decompression.") 86 | return decompress(logger, data)? 87 | end 88 | 89 | var offset: USize = snappy_java_hdr_size 90 | var err: SnappyStatus = 0 91 | var total_uncompressed_size: USize = 0 92 | 93 | while (offset + 4) < data.size() do 94 | var uncompressed_size: USize = 0 95 | var chunk_size = read32be(data, offset)?.usize() 96 | offset = offset + 4 97 | 98 | if (chunk_size + offset) > data.size() then 99 | logger(Error) and logger.log(Error, 100 | "Snappy Java deconding error! Invalid chunk length encountered.") 101 | error 102 | end 103 | 104 | err = @snappy_uncompressed_length(data.cpointer().usize() + offset, 105 | chunk_size, addressof uncompressed_size) 106 | if err != 0 then 107 | logger(Error) and logger.log(Error, 108 | "Error determining uncompressed size of snappy java compressed chunk." 109 | ) 110 | error 111 | end 112 | 113 | offset = offset + chunk_size 114 | total_uncompressed_size = total_uncompressed_size + uncompressed_size 115 | end 116 | 117 | if offset != data.size() then 118 | logger(Error) and logger.log(Error, 119 | "Error processing all of snappy java compressed data.") 120 | error 121 | end 122 | 123 | if total_uncompressed_size == 0 then 124 | logger(Error) and logger.log(Error, 125 | "Error snappy java uncompressed data is empty.") 126 | error 127 | end 128 | 129 | let buffer = recover Array[U8](total_uncompressed_size) end 130 | buffer.undefined(buffer.space()) 131 | 132 | offset = snappy_java_hdr_size 133 | total_uncompressed_size = 0 134 | 135 | while (offset + 4) < data.size() do 136 | var uncompressed_size: USize = 0 137 | var chunk_size = read32be(data, offset)?.usize() 138 | if (chunk_size + offset) > data.size() then 139 | logger(Error) and logger.log(Error, 140 | "Snappy Java deconding error! Invalid chunk length encountered.") 141 | error 142 | end 143 | offset = offset + 4 144 | 145 | err = @snappy_uncompressed_length(data.cpointer().usize() + offset, 146 | chunk_size, addressof uncompressed_size) 147 | if err != 0 then 148 | logger(Error) and logger.log(Error, 149 | "Error determining uncompressed size of snappy java compressed chunk." 150 | ) 151 | error 152 | end 153 | 154 | err = @snappy_uncompress(data.cpointer().usize() + offset, chunk_size, 155 | buffer.cpointer().usize() + total_uncompressed_size, 156 | addressof uncompressed_size) 157 | if err != 0 then 158 | logger(Error) and logger.log(Error, 159 | "Error uncompressing snappy java compressed chunk. Error code: " + 160 | err.string()) 161 | error 162 | end 163 | 164 | offset = offset + chunk_size 165 | total_uncompressed_size = total_uncompressed_size + uncompressed_size 166 | end 167 | 168 | if offset != data.size() then 169 | logger(Error) and logger.log(Error, 170 | "Error processing all of snappy java compressed data.") 171 | error 172 | end 173 | 174 | logger(Fine) and logger.log(Fine, 175 | "Snappy java uncompressed data. Uncompressed size: " + 176 | total_uncompressed_size.string() + ", compressed size: " + 177 | data.size().string()) 178 | 179 | buffer.truncate(total_uncompressed_size) 180 | 181 | buffer 182 | 183 | fun decompress(logger: Logger[String], data: ByteSeq): Array[U8] iso^ ? => 184 | var max_size: USize = 0 185 | var err = @snappy_uncompressed_length(data.cpointer().usize(), data.size(), 186 | addressof max_size) 187 | if err != 0 then 188 | logger(Error) and logger.log(Error, 189 | "Error determining uncompressed size of snappy compressed data.") 190 | error 191 | end 192 | 193 | let buffer = recover Array[U8](max_size) end 194 | buffer.undefined(buffer.space()) 195 | var out_len = buffer.size() 196 | 197 | err = @snappy_uncompress(data.cpointer().usize(), data.size(), 198 | buffer.cpointer().usize(), addressof out_len) 199 | if err != 0 then 200 | logger(Error) and logger.log(Error, 201 | "Error uncompressing snappy compressed data.") 202 | error 203 | end 204 | 205 | buffer.truncate(out_len) 206 | 207 | buffer 208 | 209 | fun compress(logger: Logger[String], data: ByteSeq): Array[U8] iso^ ? => 210 | let max_size = @snappy_max_compressed_length(data.size()) 211 | let buffer = recover Array[U8](max_size) end 212 | buffer.undefined(buffer.space()) 213 | var out_len = buffer.size() 214 | 215 | var err = @snappy_compress(data.cpointer(), data.size(), 216 | buffer.cpointer().usize(), addressof out_len) 217 | if err != 0 then 218 | logger(Error) and logger.log(Error, "Error compressing data with snappy.") 219 | error 220 | end 221 | 222 | buffer.truncate(out_len) 223 | 224 | buffer 225 | 226 | // TODO: Figure out a way to do this without copying all the data into a 227 | // single buffer 228 | fun compress_array(logger: Logger[String], data: Array[ByteSeq] val, total_size: USize): Array[U8] iso^ ? => 229 | let arr = recover iso 230 | let a = Array[U8](total_size) 231 | for d in data.values() do 232 | match d 233 | | let x: Array[U8] val => x.copy_to(a, 0, a.size(), x.size()) 234 | | let s: String => s.array().copy_to(a, 0, a.size(), s.array().size()) 235 | end 236 | end 237 | a 238 | end 239 | 240 | compress(logger, consume arr)? 241 | 242 | fun compress_java(logger: Logger[String], data: ByteSeq, 243 | block_size: USize = 32*1024): Array[U8] iso^ ? 244 | => 245 | let snappy_java_hdr_size: USize = 16 246 | let snappy_java_magic = [as U8: 0x82; 'S'; 'N'; 'A'; 'P'; 'P'; 'Y'; 0] 247 | 248 | let max_size = @snappy_max_compressed_length(data.size()) 249 | let buffer = recover Array[U8](max_size + 16) end 250 | buffer.undefined(buffer.space()) 251 | var total_compressed_size: USize = 0 252 | var out_len = buffer.size() 253 | 254 | var offset: USize = 0 255 | 256 | // write header 257 | buffer(0)? = snappy_java_magic(0)? 258 | buffer(1)? = snappy_java_magic(1)? 259 | buffer(2)? = snappy_java_magic(2)? 260 | buffer(3)? = snappy_java_magic(3)? 261 | buffer(4)? = snappy_java_magic(4)? 262 | buffer(5)? = snappy_java_magic(5)? 263 | buffer(6)? = snappy_java_magic(6)? 264 | buffer(7)? = snappy_java_magic(7)? 265 | buffer(8)? = 0 266 | buffer(9)? = 0 267 | buffer(10)? = 0 268 | buffer(11)? = 1 269 | buffer(12)? = 0 270 | buffer(13)? = 0 271 | buffer(14)? = 0 272 | buffer(15)? = 1 273 | 274 | total_compressed_size = 16 275 | offset = 0 276 | 277 | while offset < data.size() do 278 | let d = match data 279 | | let s: String => s.trim(offset, offset + block_size) 280 | | let a: Array[U8] val => a.trim(offset, offset + block_size) 281 | end 282 | 283 | // write compressed data to current write offset + 4 284 | out_len = buffer.size() - total_compressed_size 285 | var err = @snappy_compress(d.cpointer(), d.size(), 286 | buffer.cpointer().usize() + total_compressed_size + 4, 287 | addressof out_len) 288 | if err != 0 then 289 | logger(Error) and logger.log(Error, 290 | "Error compressing chunk with snappy.") 291 | error 292 | end 293 | 294 | // write chunk size (big endian) 295 | buffer(total_compressed_size + 0)? = (out_len >> 24).u8() 296 | buffer(total_compressed_size + 1)? = (out_len >> 16).u8() 297 | buffer(total_compressed_size + 2)? = (out_len >> 8).u8() 298 | buffer(total_compressed_size + 3)? = (out_len >> 0).u8() 299 | 300 | total_compressed_size = total_compressed_size + out_len + 4 301 | offset = offset + d.size() 302 | end 303 | 304 | logger(Fine) and logger.log(Fine, 305 | "Snappy java compressed data. Uncompressed size: " + data.size().string() 306 | + ", compressed size: " + total_compressed_size.string()) 307 | 308 | buffer.truncate(total_compressed_size) 309 | 310 | buffer 311 | 312 | // TODO: Figure out a way to do this without copying all the data into a 313 | // single buffer 314 | fun compress_array_java(logger: Logger[String], data: Array[ByteSeq] val, 315 | total_size: USize, block_size: USize = 32*1024): Array[U8] iso^ ? 316 | => 317 | let arr = recover iso 318 | let a = Array[U8](total_size) 319 | for d in data.values() do 320 | match d 321 | | let x: Array[U8] val => x.copy_to(a, 0, a.size(), x.size()) 322 | | let s: String => s.array().copy_to(a, 0, a.size(), s.array().size()) 323 | end 324 | end 325 | a 326 | end 327 | 328 | compress_java(logger, consume arr, block_size)? 329 | 330 | -------------------------------------------------------------------------------- /pony-kafka/kafka_broker_connection.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | 15 | */ 16 | 17 | use "customnet" 18 | use "customlogger" 19 | use "net" 20 | use "collections" 21 | 22 | // define what a kafka broker connection is 23 | // this is a trait so any actor type could in theory be a kafka broker 24 | // connection 25 | trait KafkaBrokerConnection is CustomTCPConnection 26 | // behavior to update the internal brokers list 27 | be _update_brokers_list(brokers_list: Map[KafkaNodeId, (_KafkaBroker val, 28 | KafkaBrokerConnection tag)] val) 29 | => 30 | try 31 | ((get_handler() as CustomTCPConnectionHandler).notify as 32 | _KafkaHandler).update_brokers_list(brokers_list) 33 | end 34 | 35 | // behavior to send messages to kafka brokers 36 | be send_kafka_messages(topic: String, msgs_to_send: Map[KafkaPartitionId, 37 | Array[ProducerKafkaMessage val] iso] val, auth: _KafkaProducerAuth) 38 | => 39 | try 40 | ((get_handler() as CustomTCPConnectionHandler).notify as 41 | _KafkaHandler).send_kafka_messages(this, topic, msgs_to_send, auth)? 42 | end 43 | 44 | be send_kafka_message(topic: String, partition_id: KafkaPartitionId, 45 | msg_to_send: ProducerKafkaMessage val, auth: _KafkaProducerAuth) 46 | => 47 | try 48 | ((get_handler() as CustomTCPConnectionHandler).notify as 49 | _KafkaHandler).send_kafka_message(this, topic, partition_id, 50 | msg_to_send, auth)? 51 | end 52 | 53 | /* 54 | be message_consumed(msg: KafkaMessage val, success: Bool) => 55 | try 56 | ((get_handler() as CustomTCPConnectionHandler).notify as 57 | _KafkaHandler).message_consumed(msg, success) 58 | end 59 | */ 60 | 61 | be _update_consumer_message_handler(topic: String, 62 | consumer_handler: KafkaConsumerMessageHandler val) 63 | => 64 | try 65 | ((get_handler() as CustomTCPConnectionHandler).notify as 66 | _KafkaHandler)._update_consumer_message_handler(topic, consumer_handler) 67 | end 68 | 69 | be _update_consumers(topic_consumers: Map[String, Array[KafkaConsumer tag] 70 | val] val) 71 | => 72 | try 73 | ((get_handler() as CustomTCPConnectionHandler).notify as 74 | _KafkaHandler)._update_consumers(topic_consumers) 75 | end 76 | 77 | be _consumer_pause(topic: String, partition_id: KafkaPartitionId) => 78 | try 79 | ((get_handler() as CustomTCPConnectionHandler).notify as 80 | _KafkaHandler)._consumer_pause(this, topic, partition_id) 81 | end 82 | 83 | be _consumer_pause_all() => 84 | try 85 | ((get_handler() as CustomTCPConnectionHandler).notify as 86 | _KafkaHandler)._consumer_pause_all(this) 87 | end 88 | 89 | be _consumer_resume(topic: String, partition_id: KafkaPartitionId, offset: KafkaOffset = -999) => 90 | try 91 | ((get_handler() as CustomTCPConnectionHandler).notify as 92 | _KafkaHandler)._consumer_resume(this, topic, partition_id, offset) 93 | end 94 | 95 | be _consumer_resume_all() => 96 | try 97 | ((get_handler() as CustomTCPConnectionHandler).notify as 98 | _KafkaHandler)._consumer_resume_all(this) 99 | end 100 | 101 | be _consume_messages() => 102 | try 103 | ((get_handler() as CustomTCPConnectionHandler).notify as 104 | _KafkaHandler).consume_messages(this)? 105 | end 106 | 107 | be _refresh_metadata() => 108 | try 109 | ((get_handler() as CustomTCPConnectionHandler).notify as 110 | _KafkaHandler).refresh_metadata(this)? 111 | end 112 | 113 | be _send_pending_messages() => 114 | try 115 | ((get_handler() as CustomTCPConnectionHandler).notify as 116 | _KafkaHandler)._send_pending_messages(this)? 117 | end 118 | 119 | be _leader_change_throttle_ack(topics_to_throttle: Map[String, Set[KafkaPartitionId] iso] 120 | val) 121 | => 122 | try 123 | ((get_handler() as CustomTCPConnectionHandler).notify as 124 | _KafkaHandler)._leader_change_throttle_ack(topics_to_throttle, this) 125 | end 126 | 127 | // update metadata based on what other broker connections got from kafka 128 | be _update_metadata(meta: _KafkaMetadata val) => 129 | try 130 | ((get_handler() as CustomTCPConnectionHandler).notify as 131 | _KafkaHandler)._update_metadata(meta, this) 132 | end 133 | 134 | be _leader_change_msgs(meta: _KafkaMetadata val, topic: String, partition_id: KafkaPartitionId, 135 | msgs: Map[KafkaPartitionId, Array[ProducerKafkaMessage val] iso] val, request_offset: KafkaOffset) 136 | => 137 | try 138 | ((get_handler() as CustomTCPConnectionHandler).notify as 139 | _KafkaHandler)._leader_change_msgs(meta, topic, partition_id, msgs, request_offset, this) 140 | end 141 | 142 | be write(data: ByteSeq) => 143 | """ 144 | Write a single sequence of bytes. 145 | """ 146 | try 147 | let handler = ((get_handler() as CustomTCPConnectionHandler).notify as 148 | _KafkaHandler) 149 | let kc = handler._get_client() 150 | let name = handler._get_name() 151 | kc._unrecoverable_error(KafkaErrorReport(ClientErrorShouldNeverHappen(name + 152 | "Cannot write directly on a Kafka Broker Connection." + 153 | " This should never happen."), 154 | "N/A", -1)) 155 | end 156 | 157 | be queue(data: ByteSeq) => 158 | """ 159 | Queue a single sequence of bytes on linux. 160 | Do nothing on windows. 161 | """ 162 | try 163 | let handler = ((get_handler() as CustomTCPConnectionHandler).notify as 164 | _KafkaHandler) 165 | let kc = handler._get_client() 166 | let name = handler._get_name() 167 | kc._unrecoverable_error(KafkaErrorReport(ClientErrorShouldNeverHappen(name + 168 | "Cannot queue directly on a Kafka Broker Connection." + 169 | " This should never happen."), 170 | "N/A", -1)) 171 | end 172 | 173 | be writev(data: ByteSeqIter) => 174 | """ 175 | Write a sequence of sequences of bytes. 176 | """ 177 | try 178 | let handler = ((get_handler() as CustomTCPConnectionHandler).notify as 179 | _KafkaHandler) 180 | let kc = handler._get_client() 181 | let name = handler._get_name() 182 | kc._unrecoverable_error(KafkaErrorReport(ClientErrorShouldNeverHappen(name + 183 | "Cannot writev directly on a Kafka Broker Connection." + 184 | " This should never happen."), 185 | "N/A", -1)) 186 | end 187 | 188 | be queuev(data: ByteSeqIter) => 189 | """ 190 | Queue a sequence of sequences of bytes on linux. 191 | Do nothing on windows. 192 | """ 193 | try 194 | let handler = ((get_handler() as CustomTCPConnectionHandler).notify as 195 | _KafkaHandler) 196 | let kc = handler._get_client() 197 | let name = handler._get_name() 198 | kc._unrecoverable_error(KafkaErrorReport(ClientErrorShouldNeverHappen(name + 199 | "Cannot queuev directly on a Kafka Broker Connection." + 200 | " This should never happen."), 201 | "N/A", -1)) 202 | end 203 | 204 | be send_queue() => 205 | """ 206 | Write pending queue to network on linux. 207 | Do nothing on windows. 208 | """ 209 | try 210 | let handler = ((get_handler() as CustomTCPConnectionHandler).notify as 211 | _KafkaHandler) 212 | let kc = handler._get_client() 213 | let name = handler._get_name() 214 | kc._unrecoverable_error(KafkaErrorReport(ClientErrorShouldNeverHappen(name + 215 | "Cannot send_queue directly on a Kafka Broker Connection." + 216 | " This should never happen."), 217 | "N/A", -1)) 218 | end 219 | 220 | be set_notify(notify: CustomTCPConnectionNotify iso) => 221 | """ 222 | Change the notifier. 223 | """ 224 | try 225 | let handler = ((get_handler() as CustomTCPConnectionHandler).notify as 226 | _KafkaHandler) 227 | let kc = handler._get_client() 228 | let name = handler._get_name() 229 | kc._unrecoverable_error(KafkaErrorReport(ClientErrorShouldNeverHappen(name + 230 | "Cannot set_notify directly on a Kafka Broker Connection." + 231 | " This should never happen."), 232 | "N/A", -1)) 233 | end 234 | 235 | 236 | // factory for creating kafka broker connections on demand 237 | // used in combination with the main broker connection trait to make kafka 238 | // connection types arbitrary 239 | trait KafkaBrokerConnectionFactory 240 | fun apply(auth: TCPConnectionAuth, notify: CustomTCPConnectionNotify iso, 241 | host: String, service: String, from: String = "", init_size: USize = 64, 242 | max_size: USize = 16384): KafkaBrokerConnection tag 243 | 244 | fun ip4(auth: TCPConnectionAuth, notify: CustomTCPConnectionNotify iso, 245 | host: String, service: String, from: String = "", init_size: USize = 64, 246 | max_size: USize = 16384): KafkaBrokerConnection tag 247 | 248 | fun ip6(auth: TCPConnectionAuth, notify: CustomTCPConnectionNotify iso, 249 | host: String, service: String, from: String = "", init_size: USize = 64, 250 | max_size: USize = 16384): KafkaBrokerConnection tag 251 | 252 | 253 | // simple kafka broker connection factory implementation to create simple kafka 254 | // broker connections 255 | class SimpleKafkaBrokerConnectionFactory is KafkaBrokerConnectionFactory 256 | new val create() => 257 | None 258 | 259 | fun apply(auth: TCPConnectionAuth, notify: CustomTCPConnectionNotify iso, 260 | host: String, service: String, from: String = "", init_size: USize = 64, 261 | max_size: USize = 16384): KafkaBrokerConnection tag 262 | => 263 | SimpleKafkaBrokerConnection(auth, consume notify, host, 264 | service, from, init_size, max_size) 265 | 266 | fun ip4(auth: TCPConnectionAuth, notify: CustomTCPConnectionNotify iso, 267 | host: String, service: String, from: String = "", init_size: USize = 64, 268 | max_size: USize = 16384): KafkaBrokerConnection tag 269 | => 270 | SimpleKafkaBrokerConnection.ip4(auth, consume notify, host, 271 | service, from, init_size, max_size) 272 | 273 | fun ip6(auth: TCPConnectionAuth, notify: CustomTCPConnectionNotify iso, 274 | host: String, service: String, from: String = "", init_size: USize = 64, 275 | max_size: USize = 16384): KafkaBrokerConnection tag 276 | => 277 | SimpleKafkaBrokerConnection.ip6(auth, consume notify, host, 278 | service, from, init_size, max_size) 279 | 280 | 281 | // simple kafka broker connection 282 | // all logic for the broker connection comes from the trait 283 | // the actur just need to implement `get_handler` and have an appropriate 284 | // internal handler variable 285 | actor SimpleKafkaBrokerConnection is KafkaBrokerConnection 286 | var _handler: TCPConnectionHandler = MockTCPConnectionHandler 287 | 288 | fun ref get_handler(): TCPConnectionHandler => 289 | _handler 290 | 291 | new create(auth: TCPConnectionAuth, notify: CustomTCPConnectionNotify iso, 292 | host: String, service: String, from: String = "", init_size: USize = 64, 293 | max_size: USize = 16384) 294 | => 295 | """ 296 | Connect via IPv4 or IPv6. If `from` is a non-empty string, the connection 297 | will be made from the specified interface. 298 | """ 299 | _handler = CustomTCPConnectionHandler(this, auth, consume notify, host, 300 | service, from, init_size, max_size) 301 | 302 | new ip4(auth: TCPConnectionAuth, notify: CustomTCPConnectionNotify iso, 303 | host: String, service: String, from: String = "", init_size: USize = 64, 304 | max_size: USize = 16384) 305 | => 306 | """ 307 | Connect via IPv4. 308 | """ 309 | _handler = CustomTCPConnectionHandler.ip4(this, auth, consume notify, host, 310 | service, from, init_size, max_size) 311 | 312 | new ip6(auth: TCPConnectionAuth, notify: CustomTCPConnectionNotify iso, 313 | host: String, service: String, from: String = "", init_size: USize = 64, 314 | max_size: USize = 16384) 315 | => 316 | """ 317 | Connect via IPv6. 318 | """ 319 | _handler = CustomTCPConnectionHandler.ip6(this, auth, consume notify, host, 320 | service, from, init_size, max_size) 321 | 322 | new _accept(listen: TCPListener, notify: CustomTCPConnectionNotify iso, fd: 323 | U32, 324 | init_size: USize = 64, max_size: USize = 16384) 325 | => 326 | """ 327 | A new connection accepted on a server. 328 | """ 329 | _handler = CustomTCPConnectionHandler.accept(this, listen, consume notify, 330 | fd, init_size, max_size) 331 | 332 | 333 | actor _MockKafkaBrokerConnection is KafkaBrokerConnection 334 | var _handler: TCPConnectionHandler = MockTCPConnectionHandler 335 | 336 | fun ref get_handler(): TCPConnectionHandler => 337 | _handler 338 | -------------------------------------------------------------------------------- /pony-kafka/custombuffered/_test.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | use "ponytest" 31 | use "codecs" 32 | 33 | actor Main is TestList 34 | new create(env: Env) => PonyTest(env, this) 35 | new make() => None 36 | 37 | fun tag tests(test: PonyTest) => 38 | test(_TestIsoReader) 39 | test(_TestValReader) 40 | test(_TestWriter) 41 | 42 | 43 | class iso _TestIsoReader is UnitTest 44 | """ 45 | Test adding to and reading from an IsoReader. 46 | """ 47 | fun name(): String => "buffered/IsoReader" 48 | 49 | fun apply(h: TestHelper) ? => 50 | let b = recover ref IsoReader end 51 | 52 | b.append(recover [as U8: 53 | 0x42 54 | 0xDE; 0xAD 55 | 0xAD; 0xDE 56 | 0xDE; 0xAD; 0xBE; 0xEF 57 | 0xEF; 0xBE; 0xAD; 0xDE 58 | 0xDE; 0xAD; 0xBE; 0xEF; 0xFE; 0xED; 0xFA; 0xCE 59 | 0xCE; 0xFA; 0xED] end) 60 | 61 | b.append(recover [as U8: 0xFE; 0xEF; 0xBE; 0xAD; 0xDE 62 | 0xDE; 0xAD; 0xBE; 0xEF; 0xFE; 0xED; 0xFA; 0xCE 63 | 0xDE; 0xAD; 0xBE; 0xEF; 0xFE; 0xED; 0xFA; 0xCE 64 | 0xCE; 0xFA; 0xED; 0xFE; 0xEF; 0xBE; 0xAD; 0xDE 65 | 0xCE; 0xFA; 0xED; 0xFE; 0xEF; 0xBE; 0xAD; 0xDE 66 | ] end) 67 | 68 | b.append(recover [as U8: 'h'; 'i'] end) 69 | b.append(recover [as U8: '\n'; 't'; 'h'; 'e'] end) 70 | b.append(recover [as U8: 'r'; 'e'; '\r'; '\n'] end) 71 | 72 | // These expectations consume bytes from the head of the buffer. 73 | h.assert_eq[U8](LittleEndianDecoder.u8(b)?, 0x42) 74 | h.assert_eq[U16](BigEndianDecoder.u16(b)?, 0xDEAD) 75 | h.assert_eq[U16](LittleEndianDecoder.u16(b)?, 0xDEAD) 76 | h.assert_eq[U32](BigEndianDecoder.u32(b)?, 0xDEADBEEF) 77 | h.assert_eq[U32](LittleEndianDecoder.u32(b)?, 0xDEADBEEF) 78 | h.assert_eq[U64](BigEndianDecoder.u64(b)?, 0xDEADBEEFFEEDFACE) 79 | h.assert_eq[U64](LittleEndianDecoder.u64(b)?, 0xDEADBEEFFEEDFACE) 80 | h.assert_eq[U128](BigEndianDecoder.u128(b)?, 81 | 0xDEADBEEFFEEDFACEDEADBEEFFEEDFACE) 82 | h.assert_eq[U128](LittleEndianDecoder.u128(b)?, 83 | 0xDEADBEEFFEEDFACEDEADBEEFFEEDFACE) 84 | 85 | 86 | h.assert_eq[String](String.from_array(b.read_contiguous_bytes(2)?), "hi") 87 | 88 | h.assert_eq[String](String.from_array(b.read_contiguous_bytes(4)?), "\nthe") 89 | 90 | h.assert_eq[String](String.from_array(b.read_contiguous_bytes(4)?), "re\r\n") 91 | 92 | h.assert_eq[USize](b.size(), 0) 93 | 94 | b.append(recover [as U8: 'h'; 'i'] end) 95 | b.append(recover [as U8: '\n'; 't'; 'h'; 'e'] end) 96 | b.append(recover [as U8: 'r'; 'e'; '\r'; '\n'] end) 97 | 98 | b.append(recover [as U8: 0] end) 99 | b.append(recover [as U8: 172; 2] end) 100 | 101 | b.append(recover [as U8: 'h'; 'i'] end) 102 | b.append(recover [as U8: '\n'; 't'; 'h'; 'e'] end) 103 | b.append(recover [as U8: 'r'; 'e'; '\r'; '\n'] end) 104 | 105 | h.assert_eq[String](String.from_array(b.block(2)?), "hi") 106 | h.assert_eq[String](String.from_array(b.block(8)?), "\nthere\r\n") 107 | 108 | b.skip(10)? 109 | 110 | h.assert_eq[U8](VarIntDecoder.u8(b)?, 0) 111 | h.assert_eq[U32](VarIntDecoder.u32(b)?, 300) 112 | 113 | // the last byte is consumed by the reader 114 | h.assert_eq[USize](b.size(), 0) 115 | 116 | b.append(recover [as U8: 'h'; 'i'] end) 117 | b.append(recover [as U8: '\n'; 't'; 'h'; 'e'] end) 118 | b.append(recover [as U8: 'r'; 'e'; '\r'; '\n'] end) 119 | 120 | b.clear() 121 | 122 | h.assert_eq[USize](b.size(), 0) 123 | 124 | class iso _TestValReader is UnitTest 125 | """ 126 | Test adding to and reading from a Reader. 127 | """ 128 | fun name(): String => "buffered/ValReader" 129 | 130 | fun apply(h: TestHelper) ? => 131 | let b = recover ref ValReader end 132 | 133 | b.append(recover [as U8: 134 | 0x42 135 | 0xDE; 0xAD 136 | 0xAD; 0xDE 137 | 0xDE; 0xAD; 0xBE; 0xEF 138 | 0xEF; 0xBE; 0xAD; 0xDE 139 | 0xDE; 0xAD; 0xBE; 0xEF; 0xFE; 0xED; 0xFA; 0xCE 140 | 0xCE; 0xFA; 0xED; 0xFE; 0xEF; 0xBE; 0xAD; 0xDE 141 | 0xDE; 0xAD; 0xBE; 0xEF; 0xFE; 0xED; 0xFA; 0xCE 142 | 0xDE; 0xAD; 0xBE; 0xEF; 0xFE; 0xED; 0xFA; 0xCE 143 | 0xCE; 0xFA; 0xED; 0xFE; 0xEF; 0xBE; 0xAD; 0xDE 144 | 0xCE; 0xFA; 0xED; 0xFE; 0xEF; 0xBE; 0xAD; 0xDE 145 | ] end) 146 | 147 | b.append(recover [as U8: 'h'; 'i'] end) 148 | b.append(recover [as U8: '\n'; 't'; 'h'; 'e'] end) 149 | b.append(recover [as U8: 'r'; 'e'; '\r'; '\n'] end) 150 | 151 | // These expectations peek into the buffer without consuming bytes. 152 | h.assert_eq[U8](LittleEndianDecoder.peek_u8(b)?, 0x42) 153 | h.assert_eq[U16](BigEndianDecoder.peek_u16(b, 1)?, 0xDEAD) 154 | h.assert_eq[U16](LittleEndianDecoder.peek_u16(b, 3)?, 0xDEAD) 155 | h.assert_eq[U32](BigEndianDecoder.peek_u32(b, 5)?, 0xDEADBEEF) 156 | h.assert_eq[U32](LittleEndianDecoder.peek_u32(b, 9)?, 0xDEADBEEF) 157 | h.assert_eq[U64](BigEndianDecoder.peek_u64(b, 13)?, 0xDEADBEEFFEEDFACE) 158 | h.assert_eq[U64](LittleEndianDecoder.peek_u64(b, 21)?, 0xDEADBEEFFEEDFACE) 159 | h.assert_eq[U128](BigEndianDecoder.peek_u128(b, 29)?, 160 | 0xDEADBEEFFEEDFACEDEADBEEFFEEDFACE) 161 | h.assert_eq[U128](LittleEndianDecoder.peek_u128(b, 45)?, 162 | 0xDEADBEEFFEEDFACEDEADBEEFFEEDFACE) 163 | 164 | h.assert_eq[U8](LittleEndianDecoder.peek_u8(b, 61)?, 'h') 165 | h.assert_eq[U8](LittleEndianDecoder.peek_u8(b, 62)?, 'i') 166 | 167 | // These expectations consume bytes from the head of the buffer. 168 | h.assert_eq[U8](LittleEndianDecoder.u8(b)?, 0x42) 169 | h.assert_eq[U16](BigEndianDecoder.u16(b)?, 0xDEAD) 170 | h.assert_eq[U16](LittleEndianDecoder.u16(b)?, 0xDEAD) 171 | h.assert_eq[U32](BigEndianDecoder.u32(b)?, 0xDEADBEEF) 172 | h.assert_eq[U32](LittleEndianDecoder.u32(b)?, 0xDEADBEEF) 173 | h.assert_eq[U64](BigEndianDecoder.u64(b)?, 0xDEADBEEFFEEDFACE) 174 | h.assert_eq[U64](LittleEndianDecoder.u64(b)?, 0xDEADBEEFFEEDFACE) 175 | h.assert_eq[U128](BigEndianDecoder.u128(b)?, 176 | 0xDEADBEEFFEEDFACEDEADBEEFFEEDFACE) 177 | h.assert_eq[U128](LittleEndianDecoder.u128(b)?, 178 | 0xDEADBEEFFEEDFACEDEADBEEFFEEDFACE) 179 | 180 | h.assert_eq[String](b.line()?, "hi") 181 | h.assert_eq[String](b.line()?, "there") 182 | 183 | b.append(recover [as U8: 'h'; 'i'] end) 184 | 185 | try 186 | b.line()? 187 | h.fail("shouldn't have a line") 188 | end 189 | 190 | b.append(recover [as U8: '!'; '\n'] end) 191 | h.assert_eq[String](b.line()?, "hi!") 192 | 193 | b.append(recover [as U8: 's'; 't'; 'r'; '1'] end) 194 | try 195 | b.read_until(0)? 196 | h.fail("should fail reading until 0") 197 | end 198 | b.append(recover [as U8: 0] end) 199 | b.append(recover [as U8: 'f'; 'i'; 'e'; 'l'; 'd'; '1'; ';' 200 | 'f'; 'i'; 'e'; 'l'; 'd'; '2'; ';'; ';'] end) 201 | h.assert_eq[String](String.from_array(b.read_until(0)?), "str1") 202 | h.assert_eq[String](String.from_array(b.read_until(';')?), "field1") 203 | h.assert_eq[String](String.from_array(b.read_until(';')?), "field2") 204 | // read an empty field 205 | h.assert_eq[String](String.from_array(b.read_until(';')?), "") 206 | 207 | b.append(recover [as U8: 0] end) 208 | b.append(recover [as U8: 172; 2] end) 209 | 210 | 211 | h.assert_eq[U8](VarIntDecoder.u8(b)?, 0) 212 | h.assert_eq[U32](VarIntDecoder.u32(b)?, 300) 213 | 214 | // the last byte is consumed by the reader 215 | h.assert_eq[USize](b.size(), 0) 216 | 217 | 218 | class iso _TestWriter is UnitTest 219 | """ 220 | Test writing to and reading from a Writer. 221 | """ 222 | fun name(): String => "buffered/Writer" 223 | 224 | fun apply(h: TestHelper) ? => 225 | let b = recover ref ValReader end 226 | let wb: Writer ref = Writer 227 | 228 | LittleEndianEncoder.u8(wb, 0x42) 229 | BigEndianEncoder.u16(wb, 0xDEAD) 230 | LittleEndianEncoder.u16(wb, 0xDEAD) 231 | BigEndianEncoder.u32(wb, 0xDEADBEEF) 232 | LittleEndianEncoder.u32(wb, 0xDEADBEEF) 233 | BigEndianEncoder.u64(wb, 0xDEADBEEFFEEDFACE) 234 | LittleEndianEncoder.u64(wb, 0xDEADBEEFFEEDFACE) 235 | BigEndianEncoder.u128(wb, 0xDEADBEEFFEEDFACEDEADBEEFFEEDFACE) 236 | LittleEndianEncoder.u128(wb, 0xDEADBEEFFEEDFACEDEADBEEFFEEDFACE) 237 | 238 | wb.write(recover [as U8: 'h'; 'i'] end) 239 | wb.writev(recover [as Array[U8]: [as U8: '\n'; 't'; 'h'; 'e'] 240 | [as U8: 'r'; 'e'; '\r'; '\n'] 241 | ] end) 242 | 243 | VarIntEncoder.u8(wb, 0) 244 | VarIntEncoder.u8(wb, 0x42) 245 | VarIntEncoder.u16(wb, 0xDEAD) 246 | VarIntEncoder.u32(wb, 0xDEADBEEF) 247 | VarIntEncoder.u64(wb, 0xDEADBEEFFEEDFACE) 248 | VarIntEncoder.i8(wb, -42) 249 | VarIntEncoder.i16(wb, -0xEAD) 250 | VarIntEncoder.i32(wb, -0xEADBEEF) 251 | VarIntEncoder.i64(wb, -0xEADBEEFFEEDFACE) 252 | 253 | VarIntEncoder.i64(wb, 150) 254 | VarIntEncoder.i64(wb, -150) 255 | VarIntEncoder.i64(wb, -2147483648) 256 | 257 | for bs in wb.done().values() do 258 | try 259 | b.append(bs as Array[U8] val) 260 | end 261 | end 262 | 263 | // These expectations peek into the buffer without consuming bytes. 264 | h.assert_eq[U8](BigEndianDecoder.peek_u8(b)?, 0x42) 265 | h.assert_eq[U16](BigEndianDecoder.peek_u16(b, 1)?, 0xDEAD) 266 | h.assert_eq[U16](LittleEndianDecoder.peek_u16(b, 3)?, 0xDEAD) 267 | h.assert_eq[U32](BigEndianDecoder.peek_u32(b, 5)?, 0xDEADBEEF) 268 | h.assert_eq[U32](LittleEndianDecoder.peek_u32(b, 9)?, 0xDEADBEEF) 269 | h.assert_eq[U64](BigEndianDecoder.peek_u64(b, 13)?, 0xDEADBEEFFEEDFACE) 270 | h.assert_eq[U64](LittleEndianDecoder.peek_u64(b, 21)?, 0xDEADBEEFFEEDFACE) 271 | h.assert_eq[U128](BigEndianDecoder.peek_u128(b, 29)?, 272 | 0xDEADBEEFFEEDFACEDEADBEEFFEEDFACE) 273 | h.assert_eq[U128](LittleEndianDecoder.peek_u128(b, 45)?, 274 | 0xDEADBEEFFEEDFACEDEADBEEFFEEDFACE) 275 | 276 | h.assert_eq[U8](BigEndianDecoder.peek_u8(b, 61)?, 'h') 277 | h.assert_eq[U8](BigEndianDecoder.peek_u8(b, 62)?, 'i') 278 | 279 | var offset: USize = 0 280 | var pos: USize = 71 281 | 282 | (let x, offset) = VarIntDecoder.peek_u8(b, pos)? 283 | h.assert_eq[U8](x, 0) 284 | pos = pos + offset 285 | 286 | (let x', offset) = VarIntDecoder.peek_u8(b, pos)? 287 | h.assert_eq[U8](x', 0x42) 288 | pos = pos + offset 289 | 290 | (let y, offset) = VarIntDecoder.peek_u16(b, pos)? 291 | h.assert_eq[U16](y, 0xDEAD) 292 | pos = pos + offset 293 | 294 | (let y', offset) = VarIntDecoder.peek_u32(b, pos)? 295 | h.assert_eq[U32](y', 0xDEADBEEF) 296 | pos = pos + offset 297 | 298 | (let y'', offset) = VarIntDecoder.peek_u64(b, pos)? 299 | h.assert_eq[U64](y'', 0xDEADBEEFFEEDFACE) 300 | pos = pos + offset 301 | 302 | 303 | (let z, offset) = VarIntDecoder.peek_i8(b, pos)? 304 | h.assert_eq[I8](z, -42) 305 | pos = pos + offset 306 | 307 | (let z', offset) = VarIntDecoder.peek_i16(b, pos)? 308 | h.assert_eq[I16](z', -0xEAD) 309 | pos = pos + offset 310 | 311 | (let z'', offset) = VarIntDecoder.peek_i32(b, pos)? 312 | h.assert_eq[I32](z'', -0xEADBEEF) 313 | pos = pos + offset 314 | 315 | (let z''', offset) = VarIntDecoder.peek_i64(b, pos)? 316 | h.assert_eq[I64](z''', -0xEADBEEFFEEDFACE) 317 | 318 | 319 | // These expectations consume bytes from the head of the buffer. 320 | h.assert_eq[U8](BigEndianDecoder.u8(b)?, 0x42) 321 | h.assert_eq[U16](BigEndianDecoder.u16(b)?, 0xDEAD) 322 | h.assert_eq[U16](LittleEndianDecoder.u16(b)?, 0xDEAD) 323 | h.assert_eq[U32](BigEndianDecoder.u32(b)?, 0xDEADBEEF) 324 | h.assert_eq[U32](LittleEndianDecoder.u32(b)?, 0xDEADBEEF) 325 | h.assert_eq[U64](BigEndianDecoder.u64(b)?, 0xDEADBEEFFEEDFACE) 326 | h.assert_eq[U64](LittleEndianDecoder.u64(b)?, 0xDEADBEEFFEEDFACE) 327 | h.assert_eq[U128](BigEndianDecoder.u128(b)?, 328 | 0xDEADBEEFFEEDFACEDEADBEEFFEEDFACE) 329 | h.assert_eq[U128](LittleEndianDecoder.u128(b)?, 330 | 0xDEADBEEFFEEDFACEDEADBEEFFEEDFACE) 331 | 332 | h.assert_eq[String](b.line()?, "hi") 333 | h.assert_eq[String](b.line()?, "there") 334 | 335 | h.assert_eq[U8](VarIntDecoder.u8(b)?, 0) 336 | h.assert_eq[U8](VarIntDecoder.u8(b)?, 0x42) 337 | h.assert_eq[U16](VarIntDecoder.u16(b)?, 0xDEAD) 338 | h.assert_eq[U32](VarIntDecoder.u32(b)?, 0xDEADBEEF) 339 | h.assert_eq[U64](VarIntDecoder.u64(b)?, 0xDEADBEEFFEEDFACE) 340 | h.assert_eq[I8](VarIntDecoder.i8(b)?, -42) 341 | h.assert_eq[I16](VarIntDecoder.i16(b)?, -0xEAD) 342 | h.assert_eq[I32](VarIntDecoder.i32(b)?, -0xEADBEEF) 343 | h.assert_eq[I64](VarIntDecoder.i64(b)?, -0xEADBEEFFEEDFACE) 344 | 345 | h.assert_eq[U64](VarIntDecoder.u64(b)?, 300) 346 | h.assert_eq[U64](VarIntDecoder.u64(b)?, 299) 347 | h.assert_eq[U64](VarIntDecoder.u64(b)?, 4294967295) 348 | 349 | b.append(recover [as U8: 'h'; 'i'] end) 350 | 351 | try 352 | b.line()? 353 | h.fail("shouldn't have a line") 354 | end 355 | 356 | b.append(recover [as U8: '!'; '\n'] end) 357 | h.assert_eq[String](b.line()?, "hi!") 358 | -------------------------------------------------------------------------------- /pony-kafka/custombuffered/val_reader.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Copyright (C) 2016-2017, Sendence LLC 4 | Copyright (C) 2016-2017, The Pony Developers 5 | Copyright (c) 2014-2015, Causality Ltd. 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | 1. Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 21 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | */ 29 | 30 | use "collections" 31 | 32 | class ValReader is PeekableReader 33 | """ 34 | Store network data and provide a parsing interface. 35 | 36 | `Reader` provides a way to extract typed data from a sequence of 37 | bytes. The `Reader` manages the underlying data structures to 38 | provide a read cursor over a contiguous sequence of bytes. It is 39 | useful for decoding data that is received over a network or stored 40 | in a file. Chunk of bytes are added to the `Reader` using the 41 | `append` method, and typed data is extracted using the getter 42 | methods. 43 | 44 | For example, suppose we have a UDP-based network data protocol where 45 | messages consist of the following: 46 | 47 | * `list_size` - the number of items in the following list of items 48 | as a big-endian 32-bit integer 49 | * zero or more items of the following data: 50 | * a big-endian 64-bit floating point number 51 | * a string that starts with a big-endian 32-bit integer that 52 | specifies the length of the string, followed by a number of 53 | bytes that represent the string 54 | 55 | A message would be something like this: 56 | 57 | ``` 58 | [message_length][list_size][float1][string1][float2][string2]... 59 | ``` 60 | 61 | The following program uses a `Reader` to decode a message of 62 | this type and print them: 63 | 64 | ``` 65 | use "net" 66 | use "collections" 67 | 68 | class Notify is StdinNotify 69 | let _env: Env 70 | new create(env: Env) => 71 | _env = env 72 | fun ref apply(data: Array[U8] iso) => 73 | let rb = Reader 74 | rb.append(consume data) 75 | try 76 | while true do 77 | let len = rb.i32_be() 78 | let items = rb.i32_be().usize() 79 | for range in Range(0, items) do 80 | let f = rb.f32_be() 81 | let str_len = rb.i32_be().usize() 82 | let str = String.from_array(rb.block(str_len)) 83 | _env.out.print("[(" + f.string() + "), (" + str + ")]") 84 | end 85 | end 86 | end 87 | 88 | actor Main 89 | new create(env: Env) => 90 | env.input(recover Notify(env) end, 1024) 91 | ``` 92 | """ 93 | embed _chunks: List[(Array[U8] val, USize)] = _chunks.create() 94 | var _available: USize = 0 95 | var _search_node: (ListNode[(Array[U8] val, USize)] | None) = None 96 | var _search_len: USize = 0 97 | 98 | fun size(): USize => 99 | """ 100 | Return the number of available bytes. 101 | """ 102 | _available 103 | 104 | fun ref clear() => 105 | """ 106 | Discard all pending data. 107 | """ 108 | _chunks.clear() 109 | _available = 0 110 | 111 | fun ref _append(data: ByteSeq) => 112 | """ 113 | Add a chunk of data. 114 | """ 115 | let data_array = 116 | match data 117 | | let data': Array[U8] val => data' 118 | | let data': String => data'.array() 119 | end 120 | 121 | _available = _available + data_array.size() 122 | _chunks.push((data_array, 0)) 123 | 124 | fun ref append(data: (ByteSeq | Array[ByteSeq] val)) => 125 | """ 126 | Add a chunk of data. 127 | """ 128 | match data 129 | | let data': ByteSeq => _append(data') 130 | | let data': Array[ByteSeq] val => 131 | for d in data'.values() do 132 | match d 133 | | let s: String => append(s.array()) 134 | | let a: Array[U8] val => append(a) 135 | end 136 | end 137 | end 138 | 139 | fun ref skip(n: USize) ? => 140 | """ 141 | Skip n bytes. 142 | """ 143 | if _available >= n then 144 | _available = _available - n 145 | var rem = n 146 | 147 | while rem > 0 do 148 | let node = _chunks.head()? 149 | (var data, var offset) = node()? 150 | let avail = data.size() - offset 151 | 152 | if avail > rem then 153 | node()? = (data, offset + rem) 154 | break 155 | end 156 | 157 | rem = rem - avail 158 | _chunks.shift()? 159 | end 160 | 161 | else 162 | error 163 | end 164 | 165 | fun ref block(len: USize): Array[U8] iso^ ? => 166 | """ 167 | Return a block as a contiguous chunk of memory. 168 | """ 169 | (let num_bytes, let data) = _read_bytes(len)? 170 | 171 | match data 172 | | let a: Array[U8] val => 173 | recover a.clone() end 174 | | let arr: Array[Array[U8] val] val => 175 | var out = recover Array[U8].>undefined(num_bytes) end 176 | var i: USize = 0 177 | for a in arr.values() do 178 | out = recover 179 | let r = consume ref out 180 | a.copy_to(r, 0, i, a.size()) 181 | i = i + a.size() 182 | consume r 183 | end 184 | end 185 | out 186 | end 187 | 188 | fun ref read_until(separator: U8): Array[U8] iso^ ? => 189 | """ 190 | Find the first occurence of the separator and return the block of bytes 191 | before its position. The separator is not included in the returned array, 192 | but it is removed from the buffer. To read a line of text, prefer line() 193 | that handles \n and \r\n. 194 | """ 195 | let b = block(_distance_of(separator)? - 1)? 196 | read_byte()? 197 | consume b 198 | 199 | fun ref line(): String ? => 200 | """ 201 | Return a \n or \r\n terminated line as a string. The newline is not 202 | included in the returned string, but it is removed from the network buffer. 203 | """ 204 | let len = _search_length()? 205 | 206 | _available = _available - len 207 | var out = recover String(len) end 208 | var i = USize(0) 209 | 210 | while i < len do 211 | let node = _chunks.head()? 212 | (let data, let offset) = node()? 213 | 214 | let avail = data.size() - offset 215 | let need = len - i 216 | let copy_len = need.min(avail) 217 | 218 | out.append(data, offset, copy_len) 219 | 220 | if avail > need then 221 | node()? = (data, offset + need) 222 | break 223 | end 224 | 225 | i = i + copy_len 226 | _chunks.shift()? 227 | end 228 | 229 | out.truncate(len - 230 | if (len >= 2) and (out.at_offset(-2)? == '\r') then 2 else 1 end) 231 | 232 | out 233 | 234 | fun ref read_byte(): U8 ? => 235 | """ 236 | Get a single byte. 237 | """ 238 | let node = _chunks.head()? 239 | (var data, var offset) = node()? 240 | let r = data(offset)? 241 | 242 | offset = offset + 1 243 | _available = _available - 1 244 | 245 | if offset < data.size() then 246 | node()? = (data, offset) 247 | else 248 | _chunks.shift()? 249 | end 250 | r 251 | 252 | fun ref read_bytes(len: USize): (Array[U8] val | Array[Array[U8] val] val) ? 253 | => 254 | _read_bytes(len)?._2 255 | 256 | fun ref _read_bytes(len: USize): (USize, (Array[U8] val | Array[Array[U8] val] val)) ? 257 | => 258 | """ 259 | Return a number of bytes as either a contiguous array or an array of arrays 260 | """ 261 | if len == 0 then 262 | return (0, recover Array[U8] end) 263 | end 264 | 265 | if _available < len then 266 | error 267 | end 268 | 269 | _available = _available - len 270 | var out = recover Array[Array[U8] val] end 271 | var i = USize(0) 272 | 273 | while i < len do 274 | let node = _chunks.head()? 275 | (let data, let offset) = node()? 276 | 277 | let avail = data.size() - offset 278 | let need = len - i 279 | let copy_len = need.min(avail) 280 | 281 | let next_segment = data.trim(offset, offset + copy_len) 282 | 283 | if avail > need then 284 | node()? = (data, offset + need) 285 | if out.size() == 0 then 286 | return (copy_len, next_segment) 287 | else 288 | out.push(next_segment) 289 | break 290 | end 291 | else 292 | out.push(next_segment) 293 | end 294 | 295 | i = i + copy_len 296 | _chunks.shift()? 297 | end 298 | 299 | (i, consume out) 300 | 301 | // TODO: Add rewind ability 302 | // TODO: Add get position 303 | // TODO: Add peek_contiguous_bytes function 304 | fun ref read_contiguous_bytes(len: USize): Array[U8] val ? => 305 | """ 306 | Return a block as a contiguous chunk of memory without copying if possible 307 | or throw an error. 308 | """ 309 | // TODO: enhance to fall back to a copy if have non-contiguous data and 310 | // return an iso? Not possible because iso/val distinction doesn't exist at 311 | // runtime? Maybe need to enhance callers to be able to work with 312 | // non-contiguous memory? 313 | 314 | if len == 0 then 315 | return recover Array[U8] end 316 | end 317 | 318 | if _available < len then 319 | error 320 | end 321 | 322 | var out = recover Array[Array[U8] val] end 323 | 324 | let node = _chunks.head()? 325 | (let data, let offset) = node()? 326 | 327 | let avail = data.size() - offset 328 | let need = len 329 | let copy_len = need.min(avail) 330 | 331 | if avail >= need then 332 | let next_segment = data.trim(offset, offset + copy_len) 333 | node()? = (data, offset + need) 334 | _available = _available - len 335 | return next_segment 336 | end 337 | 338 | node()? = (data, offset) 339 | error 340 | 341 | fun box peek_byte(offset: USize = 0): U8 ? => 342 | """ 343 | Get the byte at the given offset without moving the cursor forward. 344 | Raise an error if the given offset is not yet available. 345 | """ 346 | var offset' = offset 347 | var iter = _chunks.nodes() 348 | 349 | while true do 350 | let node = iter.next()? 351 | (var data, var node_offset) = node()? 352 | offset' = offset' + node_offset 353 | 354 | let data_size = data.size() 355 | if offset' >= data_size then 356 | offset' = offset' - data_size 357 | else 358 | return data(offset')? 359 | end 360 | end 361 | 362 | error 363 | 364 | fun box peek_bytes(len: USize, offset: USize = 0): 365 | (Array[U8] val | Array[Array[U8] val] val) ? 366 | => 367 | """ 368 | Return a number of bytes as either a contiguous array or an array of arrays 369 | without moving the cursor forward 370 | """ 371 | if _available < (offset + len) then 372 | error 373 | end 374 | 375 | var offset' = offset 376 | var iter = _chunks.nodes() 377 | 378 | while true do 379 | let node = iter.next()? 380 | (var data, var node_offset) = node()? 381 | offset' = offset' + node_offset 382 | 383 | let data_size = data.size() 384 | if offset' >= data_size then 385 | offset' = offset' - data_size 386 | else 387 | if (data_size - offset') > len then 388 | return data.trim(offset', offset' + len) 389 | end 390 | 391 | var out = recover Array[Array[U8] val] end 392 | var i = USize(0) 393 | 394 | i = i + (data_size - offset') 395 | out.push(data.trim(offset')) 396 | 397 | while i < len do 398 | let node' = iter.next()? 399 | (let data', let offset'') = node'()? 400 | 401 | let avail = data'.size() - offset'' 402 | let need = len - i 403 | let copy_len = need.min(avail) 404 | 405 | let next_segment = data'.trim(offset'', offset'' + copy_len) 406 | 407 | if avail > need then 408 | if out.size() == 0 then 409 | return next_segment 410 | else 411 | out.push(next_segment) 412 | break 413 | end 414 | else 415 | out.push(next_segment) 416 | end 417 | 418 | i = i + copy_len 419 | end 420 | 421 | return consume out 422 | 423 | end 424 | end 425 | 426 | error 427 | 428 | fun ref _distance_of(byte: U8): USize ? => 429 | """ 430 | Get the distance to the first occurence of the given byte 431 | """ 432 | if _chunks.size() == 0 then 433 | error 434 | end 435 | 436 | var node = if _search_len > 0 then 437 | let prev = _search_node as ListNode[(Array[U8] val, USize)] 438 | 439 | if not prev.has_next() then 440 | error 441 | end 442 | 443 | prev.next() as ListNode[(Array[U8] val, USize)] 444 | else 445 | _chunks.head()? 446 | end 447 | 448 | while true do 449 | (var data, var offset) = node()? 450 | 451 | try 452 | let len = (_search_len + data.find(byte, offset)? + 1) - offset 453 | _search_node = None 454 | _search_len = 0 455 | return len 456 | end 457 | 458 | _search_len = _search_len + (data.size() - offset) 459 | 460 | if not node.has_next() then 461 | break 462 | end 463 | 464 | node = node.next() as ListNode[(Array[U8] val, USize)] 465 | end 466 | 467 | _search_node = node 468 | error 469 | 470 | fun ref _search_length(): USize ? => 471 | """ 472 | Get the length of a pending line. Raise an error if there is no pending 473 | line. 474 | """ 475 | _distance_of('\n')? 476 | -------------------------------------------------------------------------------- /pony-kafka/compression/zlib.pony: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | 15 | */ 16 | 17 | use "../customlogger" 18 | use "lib:z" 19 | 20 | use @crc32[USize](crc: USize, buf: Pointer[U8] tag, len: U32) 21 | use @deflateInit2_[I32](zstream: MaybePointer[_ZStream], level: I32, 22 | method: I32, window_bits: I32, mem_level: I32, strategy: I32, 23 | zlib_version: Pointer[U8] tag, size_zstream: I32) 24 | use @deflateBound[ULong](zstream: MaybePointer[_ZStream], source_len: ULong) 25 | use @deflate[I32](zstream: MaybePointer[_ZStream], flush: I32) 26 | use @deflateEnd[I32](zstream: MaybePointer[_ZStream]) 27 | use @inflateInit2_[I32](zstream: MaybePointer[_ZStream], window_bits: I32, 28 | zlib_version: Pointer[U8] tag, size_zstream: I32) 29 | use @inflateGetHeader[I32](zstream: MaybePointer[_ZStream], 30 | gz_header: MaybePointer[_GZHeader]) 31 | use @inflateEnd[I32](zstream: MaybePointer[_ZStream]) 32 | use @inflate[I32](zstream: MaybePointer[_ZStream], flush: I32) 33 | 34 | struct _ZStream 35 | var next_in: Pointer[U8] tag = Pointer[U8] // next input byte 36 | var avail_in: U32 = 0 // number of bytes available at next input 37 | var total_in: ULong = 0 // total number of input bytes read so far 38 | var next_out: Pointer[U8] tag = Pointer[U8] // next output byte will go here 39 | var avail_out: U32 = 0 // remaining free space at next_out 40 | var total_out: ULong = 0 // total number of bytes output so far 41 | var msg: Pointer[U8] = Pointer[U8] // last error message, NULL if no error 42 | // internal state; not visible by applications 43 | var state: Pointer[U8] tag = Pointer[U8] 44 | // used to allocate the internal state 45 | var alloc_fn: Pointer[U8] tag = Pointer[U8] 46 | var free_fn: Pointer[U8] tag = Pointer[U8] // used to free the internal state 47 | // private data object passed to zalloc and zfree 48 | var opaque: Pointer[U8] tag = Pointer[U8] 49 | // best guess about the data type: binary or text for deflate, or the decoding 50 | // state for inflate 51 | var data_type: I32 = 0 52 | var adler: ULong = 0 // Adler-32 or CRC-32 value of the uncompressed data 53 | var reserved: ULong = 0 // reserved for future use 54 | 55 | new create() => None 56 | 57 | fun struct_size_bytes(): I32 => 58 | ifdef ilp32 or llp64 then 59 | 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 60 | else 61 | 8 + 8 + 8 + 8 + 8 + 8 + 8 + 8 + 8 + 8 + 8 + 8 + 8 + 8 62 | end 63 | 64 | struct _GZHeader 65 | var text: I32 = 0 // true if compressed data believed to be text 66 | var time: ULong = 0 // modification time 67 | var xflags: I32 = 0 // extra flags (not used when writing a gzip file) 68 | var os: I32 = 0 // operating system 69 | // pointer to extra field or Z_NULL if none 70 | var extra: Pointer[U8] = Pointer[U8] 71 | var extra_len: U32 = 0 // extra field length (valid if extra != Z_NULL) 72 | var extra_max: U32 = 0 // space at extra (only when reading header) 73 | // pointer to zero-terminated file name or Z_NULL 74 | var name: Pointer[U8] = Pointer[U8] 75 | var name_max: U32 = 0 // space at name (only when reading header) 76 | // pointer to zero-terminated comment or Z_NULL 77 | var comment: Pointer[U8] = Pointer[U8] 78 | var comm_max: U32 = 0 // space at comment (only when reading header) 79 | var hcrc: I32 = 0 // true if there was or will be a header crc 80 | // true when done reading gzip header (not used when writing a gzip file) 81 | var done: I32 = 0 82 | 83 | new create() => None 84 | 85 | // compression levels 86 | primitive ZNoCompression 87 | fun apply(): I32 => 0 88 | 89 | primitive ZBestSpeed 90 | fun apply(): I32 => 1 91 | 92 | primitive ZBestCompression 93 | fun apply(): I32 => 9 94 | 95 | primitive ZDefaultCompression 96 | fun apply(): I32 => -1 97 | 98 | // compression method 99 | primitive ZDeflated 100 | fun apply(): I32 => 8 101 | 102 | // null value 103 | primitive ZNull 104 | fun apply(): I32 => 0 105 | 106 | 107 | // compression strategies 108 | primitive ZDefaultStrategy 109 | fun apply(): I32 => 0 110 | 111 | primitive ZFiltered 112 | fun apply(): I32 => 1 113 | 114 | primitive ZHuffmanOnly 115 | fun apply(): I32 => 2 116 | 117 | primitive ZRle 118 | fun apply(): I32 => 3 119 | 120 | primitive ZFixed 121 | fun apply(): I32 => 4 122 | 123 | // possible data_type values 124 | primitive ZBinary 125 | fun apply(): I32 => 0 126 | 127 | primitive ZText 128 | fun apply(): I32 => 1 129 | 130 | primitive ZUnknown 131 | fun apply(): I32 => 2 132 | 133 | // possible flush values 134 | primitive ZNoFlush 135 | fun apply(): I32 => 0 136 | 137 | primitive ZPartialFlush 138 | fun apply(): I32 => 1 139 | 140 | primitive ZSyncFlush 141 | fun apply(): I32 => 2 142 | 143 | primitive ZFullFlush 144 | fun apply(): I32 => 3 145 | 146 | primitive ZFinish 147 | fun apply(): I32 => 4 148 | 149 | primitive ZBlock 150 | fun apply(): I32 => 5 151 | 152 | primitive ZTrees 153 | fun apply(): I32 => 6 154 | 155 | // possible return codes 156 | primitive ZOk 157 | fun apply(): I32 => 0 158 | 159 | primitive ZStreamEnd 160 | fun apply(): I32 => 1 161 | 162 | primitive ZNeedDict 163 | fun apply(): I32 => 2 164 | 165 | primitive ZErrno 166 | fun apply(): I32 => -1 167 | 168 | primitive ZStreamError 169 | fun apply(): I32 => -2 170 | 171 | primitive ZDataError 172 | fun apply(): I32 => -3 173 | 174 | primitive ZMemError 175 | fun apply(): I32 => -4 176 | 177 | primitive ZBufError 178 | fun apply(): I32 => -5 179 | 180 | primitive ZVersionError 181 | fun apply(): I32 => -6 182 | 183 | primitive Crc32 184 | fun crc32(data: ByteSeq box): USize => 185 | @crc32(crc32_init(), data.cpointer(), data.size().u32()) 186 | 187 | fun crc32_init(): USize => 188 | @crc32(USize(0), Pointer[U8], U32(0)) 189 | 190 | fun crc32_array(data: Array[ByteSeq] box): USize => 191 | var crc = crc32_init() 192 | for d in data.values() do 193 | crc = @crc32(crc, d.cpointer(), d.size().u32()) 194 | end 195 | 196 | crc 197 | 198 | primitive ZlibCompressor 199 | fun compress(logger: Logger[String], data: ByteSeq): Array[U8] iso^ ? => 200 | let zlib = Zlib.compressor(logger where window_bits = 15+16)? 201 | zlib.compress_array(recover val [data] end, data.size())? 202 | 203 | fun compress_array(logger: Logger[String], data: Array[ByteSeq] val, 204 | total_size: USize): Array[U8] iso^ ? 205 | => 206 | let zlib = Zlib.compressor(logger where window_bits = 15+16)? 207 | zlib.compress_array(data, total_size)? 208 | 209 | primitive ZlibDecompressor 210 | fun decompress(logger: Logger[String], data: ByteSeq): Array[U8] iso^ ? => 211 | let zlib = Zlib.decompressor(logger where window_bits = 15+32) 212 | zlib.decompress(data)? 213 | 214 | class Zlib 215 | var _stream: _ZStream = _ZStream 216 | var _stream_p: MaybePointer[_ZStream] 217 | let _logger: Logger[String] 218 | let _window_bits: I32 219 | 220 | let zlib_version: String = "1.2.8" 221 | 222 | new ref compressor(logger: Logger[String], level: I32 = ZDefaultCompression(), 223 | method: I32 = ZDeflated(), window_bits: I32 = 15, mem_level: I32 = 8, 224 | strategy: I32 = ZDefaultStrategy()) ? 225 | => 226 | _logger = logger 227 | _stream_p = MaybePointer[_ZStream](_stream) 228 | _window_bits = window_bits 229 | 230 | let err = @deflateInit2_(_stream_p, level, method, window_bits, mem_level, 231 | strategy, zlib_version.cstring(), _stream.struct_size_bytes()) 232 | 233 | if err != ZOk() then 234 | _check_error(err)? 235 | end 236 | 237 | new ref decompressor(logger: Logger[String], window_bits: I32 = 15) => 238 | _logger = logger 239 | _stream_p = MaybePointer[_ZStream](_stream) 240 | _window_bits = window_bits 241 | 242 | 243 | fun ref _check_error(err: I32) ? => 244 | // TODO: Any way to do this without copying the string? Doesn't seem 245 | // possible because GC will try and free data in pointer 246 | let err_str = String.copy_cstring(_stream.msg) 247 | match err 248 | | ZOk() => 249 | _logger(Error) and 250 | _logger.log(Error, "Encountered ZOk (" + err.string() + ").") 251 | | ZStreamEnd() => 252 | _logger(Error) and 253 | _logger.log(Error, "Zlib reached stream end (" + err.string() + ")!") 254 | | ZNeedDict() => 255 | _logger(Error) and 256 | _logger.log(Error, "Zlib needs dictionary (" + err.string() + 257 | ")! zlib error message: " + err_str) 258 | | ZErrno() => 259 | _logger(Error) and 260 | _logger.log(Error, "Zlib encountered error (" + err.string() + 261 | ")! zlib error message: " + err_str) 262 | | ZStreamError() => 263 | _logger(Error) and 264 | _logger.log(Error, "Zlib encountered stream error (" + err.string() + 265 | ")! zlib error message: " + err_str) 266 | | ZDataError() => 267 | _logger(Error) and 268 | _logger.log(Error, "Zlib encountered data error (" + err.string() + 269 | ")! zlib error message: " + err_str) 270 | | ZMemError() => 271 | _logger(Error) and 272 | _logger.log(Error, "Zlib encountered memory error (" + err.string() + 273 | ")! zlib error message: " + err_str) 274 | | ZBufError() => 275 | _logger(Error) and 276 | _logger.log(Error, "Zlib encountered buffer error (" + err.string() + 277 | ")! zlib error message: " + err_str) 278 | | ZVersionError() => 279 | _logger(Error) and 280 | _logger.log(Error, "Zlib encountered version error (" + err.string() + 281 | ")! zlib error message: " + err_str + ", zlib version: " + 282 | String.copy_cstring(@zlibVersion[Pointer[U8]]())) 283 | else 284 | _logger(Error) and 285 | _logger.log(Error, "Zlib encountered unknown error ( " + err.string() + 286 | ")! zlib error message: " + err_str) 287 | end 288 | 289 | error 290 | 291 | // based on https://github.com/edenhill/librdkafka/blob/master/src/rdgz.c 292 | fun ref decompress(data: ByteSeq, decompressed_size: (USize | None) = None): 293 | Array[U8] iso^ ? 294 | => 295 | let buffer_size = match decompressed_size 296 | | None => calculate_decompressed_size(data)? 297 | | let x: USize => x 298 | end 299 | 300 | _stream = _ZStream 301 | _stream_p = MaybePointer[_ZStream](_stream) 302 | let hdr = _GZHeader 303 | let hdr_p = MaybePointer[_GZHeader](hdr) 304 | 305 | var err = @inflateInit2_(_stream_p, _window_bits, zlib_version.cstring(), 306 | _stream.struct_size_bytes()) 307 | 308 | if err != ZOk() then 309 | _check_error(err)? 310 | end 311 | 312 | let buffer = recover Array[U8](buffer_size) end 313 | buffer.undefined(buffer.space()) 314 | 315 | _stream.next_in = data.cpointer() 316 | _stream.avail_in = data.size().u32() 317 | 318 | err = @inflateGetHeader(_stream_p, hdr_p) 319 | try 320 | if err != ZOk() then 321 | _check_error(err)? 322 | end 323 | else 324 | // clean up zlib internal state and end deflate stream 325 | @inflateEnd(_stream_p) 326 | error 327 | end 328 | 329 | _stream.next_out = buffer.cpointer() 330 | _stream.avail_out = buffer.size().u32() 331 | 332 | err = @inflate(_stream_p, ZFinish()) 333 | try 334 | if err != ZStreamEnd() then 335 | _check_error(err)? 336 | end 337 | if _stream.avail_in != 0 then 338 | _logger(Error) and _logger.log(Error, 339 | "Zlib inflate didn't read all input!") 340 | error 341 | end 342 | else 343 | // clean up zlib internal state and end deflate stream 344 | @inflateEnd(_stream_p) 345 | error 346 | end 347 | 348 | buffer.truncate(_stream.total_out.usize()) 349 | 350 | // clean up zlib internal state and end deflate stream 351 | @inflateEnd(_stream_p) 352 | 353 | buffer 354 | 355 | 356 | fun ref calculate_decompressed_size(data: ByteSeq): USize ? => 357 | _stream_p = MaybePointer[_ZStream](_stream) 358 | let hdr = _GZHeader 359 | let hdr_p = MaybePointer[_GZHeader](hdr) 360 | 361 | var err = @inflateInit2_(_stream_p, _window_bits, zlib_version.cstring(), 362 | _stream.struct_size_bytes()) 363 | 364 | if err != ZOk() then 365 | _check_error(err)? 366 | end 367 | 368 | let buffer_size: USize = 512 369 | let buffer = recover Array[U8](buffer_size) end 370 | buffer.undefined(buffer.space()) 371 | 372 | _stream.next_in = data.cpointer() 373 | _stream.avail_in = data.size().u32() 374 | 375 | var len: USize = 0 376 | 377 | err = @inflateGetHeader(_stream_p, hdr_p) 378 | try 379 | if err != ZOk() then 380 | _check_error(err)? 381 | end 382 | else 383 | // clean up zlib internal state and end deflate stream 384 | @inflateEnd(_stream_p) 385 | error 386 | end 387 | 388 | var p = buffer.cpointer() 389 | var p_size = buffer.size() 390 | 391 | repeat 392 | _stream.next_out = p 393 | _stream.avail_out = p_size.u32() 394 | 395 | err = @inflate(_stream_p, ZNoFlush()) 396 | try 397 | if (err != ZOk()) and (err != ZStreamEnd()) and 398 | (err != ZBufError()) then 399 | _check_error(err)? 400 | end 401 | else 402 | // clean up zlib internal state and end deflate stream 403 | @inflateEnd(_stream_p) 404 | error 405 | end 406 | 407 | until err == ZStreamEnd() end 408 | 409 | len = _stream.total_out.usize() 410 | 411 | // clean up zlib internal state and end deflate stream 412 | @inflateEnd(_stream_p) 413 | 414 | len 415 | 416 | 417 | // based on 418 | // https://github.com/edenhill/librdkafka/blob/master/src/rdkafka_msgset_writer.c#L723 419 | fun ref compress(data: ByteSeq): Array[U8] iso^ ? => 420 | compress_array(recover val [data] end, data.size())? 421 | 422 | fun ref compress_array(data: Array[ByteSeq] val, total_size: USize): 423 | Array[U8] iso^ ? 424 | => 425 | let max_len = @deflateBound(_stream_p, total_size.ulong()).u32() 426 | let buffer = recover Array[U8](max_len.usize()) end 427 | buffer.undefined(buffer.space()) 428 | _stream.next_out = buffer.cpointer() 429 | _stream.avail_out = buffer.size().u32() 430 | 431 | for d in data.values() do 432 | _stream.next_in = d.cpointer() 433 | _stream.avail_in = d.size().u32() 434 | let err = @deflate(_stream_p, ZNoFlush()) 435 | try 436 | if err != ZOk() then 437 | _check_error(err)? 438 | end 439 | 440 | if _stream.avail_in != 0 then 441 | _logger(Error) and _logger.log(Error, 442 | "Zlib deflate didn't read all input!") 443 | error 444 | end 445 | else 446 | // clean up zlib internal state and end deflate stream 447 | @deflateEnd(_stream_p) 448 | error 449 | end 450 | end 451 | 452 | let err = @deflate(_stream_p, ZFinish()) 453 | try 454 | if err != ZStreamEnd() then 455 | _check_error(err)? 456 | end 457 | else 458 | // clean up zlib internal state and end deflate stream 459 | @deflateEnd(_stream_p) 460 | error 461 | end 462 | 463 | buffer.truncate(_stream.total_out.usize()) 464 | 465 | // clean up zlib internal state and end deflate stream 466 | @deflateEnd(_stream_p) 467 | 468 | buffer 469 | --------------------------------------------------------------------------------