├── .ci ├── certs │ ├── ca_certificate.pem │ ├── ca_key.pem │ ├── client.p12 │ ├── client_certificate.pem │ ├── client_key.pem │ ├── server.p12 │ ├── server_certificate.pem │ └── server_key.pem ├── conf │ ├── enabled_plugins │ └── rabbitmq.conf ├── install.ps1 └── versions.json ├── .github ├── .codecov.yaml ├── dependabot.yml ├── release.yml └── workflows │ ├── build_and_test.yml │ └── codeql-analysis.yml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yml ├── CiDockerfile ├── Docker └── Dockerfile ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── VERSION ├── best_practices └── README.md ├── compose ├── .gitignore ├── README.md ├── ha_tls │ ├── Dockerfile │ ├── conf │ │ ├── enabled_plugins │ │ └── rabbitmq.conf │ ├── docker-compose.yml │ └── haproxy.cfg └── tls │ └── conf │ ├── enabled_plugins │ └── rabbitmq.conf ├── create_tag.sh ├── examples ├── README.md ├── automaticOffsetTracking │ └── automaticOffsetTracking.go ├── deduplication │ └── deduplication.go ├── filtering │ └── filtering.go ├── getting_started │ └── getting_started.go ├── offsetStart │ └── offset.go ├── offsetTracking │ └── offsetTracking.go ├── proxy │ └── proxy.go ├── reliable │ ├── README.md │ └── reliable_client.go ├── reliable_getting_started │ └── reliable_getting_started.go ├── single_active_consumer │ ├── README.md │ ├── producer │ │ └── producer.go │ └── single_active_consumer.go ├── sub-entries-batching │ └── sub_entries_batching.go ├── super_stream │ ├── README.md │ ├── producer │ │ └── super_stream_producer.go │ └── super_stream_sac.go ├── tail │ ├── Readme.md │ └── stream_tail.go └── tls │ └── getting_started_tls.go ├── generate ├── .gitignore └── generate_amqp10_messages.go ├── go.mod ├── go.sum ├── perfTest ├── .gitignore ├── REAMDE.md ├── cmd │ ├── commands.go │ ├── silent.go │ └── version.go └── perftest.go └── pkg ├── amqp ├── buffer.go ├── decode.go ├── encode.go ├── error_stdlib.go └── types.go ├── ha ├── ha_consumer.go ├── ha_consumer_test.go ├── ha_publisher.go ├── ha_publisher_test.go ├── ha_suite_test.go └── reliable_common.go ├── integration_test ├── integration_test_suite_test.go └── stream_integration_test.go ├── logs └── log.go ├── message └── interface.go ├── stream ├── aggregation.go ├── aggregation_test.go ├── available_features.go ├── available_features_test.go ├── blocking_queue.go ├── brokers.go ├── buffer_reader.go ├── buffer_writer.go ├── client.go ├── client_test.go ├── constants.go ├── consumer.go ├── consumer_sac_test.go ├── consumer_test.go ├── converters.go ├── converters_test.go ├── coordinator.go ├── coordinator_test.go ├── environment.go ├── environment_debug.go ├── environment_test.go ├── exchange_commands.go ├── filtering_test.go ├── listeners.go ├── producer.go ├── producer_test.go ├── producer_unconfirmed.go ├── server_frame.go ├── socket.go ├── stream_options.go ├── stream_stats.go ├── stream_suite_test.go ├── stream_test.go ├── super_stream.go ├── super_stream_consumer.go ├── super_stream_consumer_test.go ├── super_stream_producer.go ├── super_stream_producer_test.go ├── super_stream_test.go ├── utils.go └── utils_test.go └── test-helper └── http_utils.go /.ci/certs/ca_certificate.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDhjCCAm6gAwIBAgIUYqXLpnhFfIhE5o1qvs6gnL67IsQwDQYJKoZIhvcNAQEL 3 | BQAwTDE7MDkGA1UEAwwyVExTR2VuU2VsZlNpZ25lZHRSb290Q0EgMjAyMy0wOS0x 4 | MVQyMDo1MTozOS42MDMwMTMxDTALBgNVBAcMBCQkJCQwHhcNMjMwOTExMTg1MTM5 5 | WhcNMzMwOTA4MTg1MTM5WjBMMTswOQYDVQQDDDJUTFNHZW5TZWxmU2lnbmVkdFJv 6 | b3RDQSAyMDIzLTA5LTExVDIwOjUxOjM5LjYwMzAxMzENMAsGA1UEBwwEJCQkJDCC 7 | ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJl6TmIdlTdZpd3ZJaTafrYi 8 | 0MAcHE5FEfDQgGkTJgbeXG83MTUD2NYjlGgrb1v793PiO3/iYSK2uGv5AAQdvUmI 9 | jAP8yuJTFiIFpWvljERaDd3sg+RniUN4YaxI0xnM82A2UBWXpdAbS2ASMdPSY6+V 10 | ZX+xbBaY/H7HDL7zhrQEkl1OGgybX+segjOTX1jkNJ7QQZ924DHLvJWDNIIBt8S8 11 | aYVbf6V2MFziwD98hsfIRgF22T2bgEMkI5M0H5jO4hLkeTYE7Mhpb7TfeZCSLeVD 12 | /vmMbnOvgXJt0wvILwltH6MAviAQTjKIXiMbECTX81tmHInUQ+PKTiz8t5mVSLcC 13 | AwEAAaNgMF4wDwYDVR0TAQH/BAUwAwEB/zALBgNVHQ8EBAMCAQYwHQYDVR0OBBYE 14 | FCjyUnXFpZI5+zz7PJxHxDuulqqZMB8GA1UdIwQYMBaAFCjyUnXFpZI5+zz7PJxH 15 | xDuulqqZMA0GCSqGSIb3DQEBCwUAA4IBAQAw6PFWRnMqVd9rYXHHagxDSlIPj8xm 16 | 0RoeSHNgl+G8w2c6tXY2gWU0LWdmrdEX4/OpIBcw7USBSwIBCsv6vBU+HGosMhlk 17 | /K6arqvxENu/zafU1P0RMZnjiTmmzRObIsJiijFsgZQC2q6IjZetpPo6UfdHx0Xm 18 | PRrv+SnbkMk93/QCJJAOlodYwAhZqAkishR2fwlDnNDdR2Aj7qQLYuFr3t5Z92ej 19 | M7OPKbd6XudeWVR3FOxi7/fcNu8mttOtfXkFcUPigs8RJDHMEH1mLMrCzQsRMfA+ 20 | BVZiA+hifJn/9KgZXFlsANT+uLuAWmcEimDAcU/xlAf8eZLcntTB2Oep 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /.ci/certs/ca_key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCZek5iHZU3WaXd 3 | 2SWk2n62ItDAHBxORRHw0IBpEyYG3lxvNzE1A9jWI5RoK29b+/dz4jt/4mEitrhr 4 | +QAEHb1JiIwD/MriUxYiBaVr5YxEWg3d7IPkZ4lDeGGsSNMZzPNgNlAVl6XQG0tg 5 | EjHT0mOvlWV/sWwWmPx+xwy+84a0BJJdThoMm1/rHoIzk19Y5DSe0EGfduAxy7yV 6 | gzSCAbfEvGmFW3+ldjBc4sA/fIbHyEYBdtk9m4BDJCOTNB+YzuIS5Hk2BOzIaW+0 7 | 33mQki3lQ/75jG5zr4FybdMLyC8JbR+jAL4gEE4yiF4jGxAk1/NbZhyJ1EPjyk4s 8 | /LeZlUi3AgMBAAECggEABfZ+hDsi5P69U3GpOofEcBVXh9EBjdx8rYKBnj4kNk1w 9 | Ae6bdtC4x/kVdv+Drk7EPf94JovPSW37fvn5n4Smf142Tto9sJHR0sM7nhQ1fZQg 10 | Vq9moGw8elhe/cTNq0mdaURr06rvUH4bbV3kC3rF+vFLbR6hxqffawvBoMtisbrY 11 | xIp1MfsguOjHLhEDp9crJ+1N5XkqWKZPMpMgPI0mW4Yk+O409+hT5yg/ziNQ5a7o 12 | o86tK3axtPNiaSTPkxoU+sCVu2ILZVTbfjMk2lh7OCgoAA3A9jQ3ulRz+Cl4sXqr 13 | 1Ze5pPuRseBL10xmOmoNHR5kvqSNG3Kp6bxyTGiqIQKBgQDPYT2VyaSD4NSA5Es3 14 | p1DjJa/gItWWIsSSDnpBm5zF908g9rAVxLvx3JLUI4YPFY82o57DCi34jDrE6O0k 15 | SsjuZA2SiuqSqrHxP02RYdAh7/9S/LLM9kakj7QKUU2f92QoyQvKn8M3cygBBz3x 16 | G0uwLE0EU2wgm58SBdnAR6zHuwKBgQC9deipcKT+OG6MZWGhjYsnBiaK8XxxPl9v 17 | Kf2hBmImVDDdOkwthPLKJhP4VhjLETKo8Zi28Nxo6ueOpfn2Q5+0XrI18VNQZI/G 18 | ip6adbezgdIKB0OcWvkXH0Fwl7P7fgNWAPdla9jznSvGj7UnSgt7tFp1xJkXzEfU 19 | n1NEXDNdNQKBgDlOR7RimkGPGWncrCRe6e069tTbC2aHiQZLVeFXXQUfiBA12wbI 20 | 7J6zMyfIAT2d2Ythv3dqErYCGiNbslw7BjdKEq4SESwiWzWtJoQsIVWfelC2X7pf 21 | u7mxtDC9stOni1fx5n5Bk7J48e8Gz0kXH905ALdXTiPcnSJf14JYzBgNAoGBAITB 22 | dBAWkGZaYIwcFfc/2Tu1AZjmcY5gaDrar4//ixLUd5Ds4qgauo2PdPrUSXcxS9A5 23 | ygqWZ7tUroC0KJy48dVPbYyC1yBD9sLmKxCMX/Z2hxjj0ipjTJs5GX+trT4SJIBF 24 | GRWGJnU9sojl9cfcCIPb8m8HHUchq0t/gLcr7AnpAoGARNnUh19fK+8ldKx/h9jN 25 | svJZK1I615OXXRCAY2BWl4k7pbufseVVhtSHxkAfzhmv5gIqojfUGf4WCfcf0eZ9 26 | xWBXpuWgMwnWuMlPJLwIzlaU1phDsSaHdd2iuAYfZEKWmIIIxT7+vXDO4UaVpXs6 27 | kso6qBkSwQTymY4m5RATlK4= 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /.ci/certs/client.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabbitmq/rabbitmq-stream-go-client/4751689abc982bce39818754ee0161e762aea19a/.ci/certs/client.p12 -------------------------------------------------------------------------------- /.ci/certs/client_certificate.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIID7zCCAtegAwIBAgIBAjANBgkqhkiG9w0BAQsFADBMMTswOQYDVQQDDDJUTFNH 3 | ZW5TZWxmU2lnbmVkdFJvb3RDQSAyMDIzLTA5LTExVDIwOjUxOjM5LjYwMzAxMzEN 4 | MAsGA1UEBwwEJCQkJDAeFw0yMzA5MTExODUxNDBaFw0zMzA5MDgxODUxNDBaMDYx 5 | IzAhBgNVBAMMGmdzYW50b21hZ2c2TFZETS52bXdhcmUuY29tMQ8wDQYDVQQKDAZj 6 | bGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDQf+IB3RTjtvdY 7 | 3/Rii9zzrY3hCcFcG1k4aOwQAGnE3pgcpRzHaF+l6ZvFX8llP2hcix6ew/IZReDF 8 | p8kuK93PxM0qsYxvCj5fywaGI2mL9sNibrs6CFtvPL+Rj57LSt5UJHSaH3LmY0CE 9 | bV2OdBEuYEBR7eGtzmpupmA+PptHF/U0hTmfIaet6sVLjvJTmD2/3LcztNm/8ksH 10 | iqeHgJDUE+ERWUVl7AEcBo1rHDJw+z/jsKEtKbmqoNxsfcdb2UdZw9cJkB5ojKMr 11 | l73m35s9uIWZxf2iNd3/tqos7cXMLJcTpwr4x6n6F+PsMhBK5sVTw+kFkq+iyxsu 12 | nSVpVT6nAgMBAAGjgfEwge4wCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYDVR0l 13 | BAwwCgYIKwYBBQUHAwIwTAYDVR0RBEUwQ4IaZ3NhbnRvbWFnZzZMVkRNLnZtd2Fy 14 | ZS5jb22CGmdzYW50b21hZ2c2TFZETS52bXdhcmUuY29tgglsb2NhbGhvc3QwMQYD 15 | VR0fBCowKDAmoCSgIoYgaHR0cDovL2NybC1zZXJ2ZXI6ODAwMC9iYXNpYy5jcmww 16 | HQYDVR0OBBYEFF+biSCzxAazbay1NaTfGDWawU6dMB8GA1UdIwQYMBaAFCjyUnXF 17 | pZI5+zz7PJxHxDuulqqZMA0GCSqGSIb3DQEBCwUAA4IBAQCREnq62BDzp61MRlzL 18 | lsheI/13hkLutFl+OJAoNGcSgprys7d0zwQJGakCO5o05Csi1pQmP0MCKSyPN2Xb 19 | CTEb1qeDBt3FQkgSzXUCAjVL2wvWoL1nIZaAkD5XDjDvGr5Yd4Eczc7WYwujlT5B 20 | JausVa/ShyYatuiTfgPI7UKASW625fkdi+h30OxQ6vnP+X3FUjOV5NO5/GSrlyFN 21 | Fk0M1YqcypUa9meFooDo2aSMTF8zUuZKsOhFLO9B1z7Io/iAiACdPvjdZWjcpJmI 22 | m+gUWeyMH/R4ql6VlPaitUus+CUWkWtdNuQIZEH8HKR1CIOeCW3xwmIJCK9rnbvI 23 | oGb4 24 | -----END CERTIFICATE----- 25 | -------------------------------------------------------------------------------- /.ci/certs/client_key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDQf+IB3RTjtvdY 3 | 3/Rii9zzrY3hCcFcG1k4aOwQAGnE3pgcpRzHaF+l6ZvFX8llP2hcix6ew/IZReDF 4 | p8kuK93PxM0qsYxvCj5fywaGI2mL9sNibrs6CFtvPL+Rj57LSt5UJHSaH3LmY0CE 5 | bV2OdBEuYEBR7eGtzmpupmA+PptHF/U0hTmfIaet6sVLjvJTmD2/3LcztNm/8ksH 6 | iqeHgJDUE+ERWUVl7AEcBo1rHDJw+z/jsKEtKbmqoNxsfcdb2UdZw9cJkB5ojKMr 7 | l73m35s9uIWZxf2iNd3/tqos7cXMLJcTpwr4x6n6F+PsMhBK5sVTw+kFkq+iyxsu 8 | nSVpVT6nAgMBAAECggEAI86Cit1j9AN9ERdNCguJA5Q/tHEPcvkDZoumVs0rXPL5 9 | XpoAikJjgsPOy6O6m5e7HYGK3as7DZoRkwBQrYw9CKx8q6NYQc2Zjqv4T9sPCklz 10 | npjzsHC0+zKMl7v5gmI2Mm8cU0epXOWYs+VwVsGaHcPL6AYZZtb3Mk+CYc4wrpGg 11 | BBzkFhMMRDHAhdz5O0tiV/zawZyqxAXrlgBkIWb+lQZm+cZiC/NGEQNnSErNoHiN 12 | mjaC86jjGaI6TXmn0bte0H+KSUSCfWm3xHJGKFIxV74GzWeS3ZtBOMOTZRwZ9qYU 13 | MS/7YeV26J2tGeC3RhWTyuZm7zrn7ZqGp9QzVWbrSQKBgQD+O24nMmveW9yQnahX 14 | rPWwRZ+E0DDblB0M+CjHfI8bn6JEi56B+akkYjjAYi2JgVWwiWJS9TCbWkk4wEmu 15 | o8IxEL0SH0SuBjLXWwMt4vDM5AmxNqdOyoO3J+Ewapw9zKY6uz8b/FV1eZNPQ9+J 16 | bXQxQODDs5GC3QeR+3uJr3uIEwKBgQDR8wq9SgsBfwuqKxSX3rBA7jx/Xxfl3Qbz 17 | OraCCl8o00C7P1jTVk32K+JmzRB2I6lVhDNNjh0hQkc39P1o6SXWH5OAC98hdZDh 18 | q14qGit/oTtQ3Ps9Sw7dt1AAcSbEpKfkIS0T/uf2c1uTvC7tPzAqHViEG+WUwMuD 19 | gVssX5FpnQKBgDnXzb0vVKmX3vwsUsP3/0Jm5N9z+tnKvj1YLPcOWQUg8euElMDf 20 | y+MSUfU7oT665YMwDuXvEWsXRLeb0GfirGk7dLkt0hOCJ4kmFPgYvU7wx1/Bnpln 21 | rEY81ZiNeRT6fgu41KgKZms/CQws1ixPcfNO3pTIQ2Ax5+oH/Niby5BvAoGALDzK 22 | yYG9ee48FfoH18w7VSMdqjTuQyfkXAHGDPaEgISqwgmh/L3VpYYvqTuSOWJgPr2h 23 | VbkZGDXv7bF4Z8+gglKa8MMPm+w6v+Is8DAddEITzoERiyOymTMT71PoOEz9d0sq 24 | RWlTlRFPfXyMYr8KtgUC7qs2H7bT6vypqlrkt90CgYEA1FtcmoIdj0aUH1Ph3ReQ 25 | HSfG19UDmI0o0CaD0d6k8cjDZnxAMe7rHW9oejIzEB+fLOuQeyGpnWKtUXLTHxYz 26 | HmGAlmshG6r4G0Vs7En0syXB7Kd8ZLAj+oRAobzRVTOj2opiF47mK5z4rO9ymUUS 27 | 09Wnq4lVGnpvuTeviY+fA3Y= 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /.ci/certs/server.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabbitmq/rabbitmq-stream-go-client/4751689abc982bce39818754ee0161e762aea19a/.ci/certs/server.p12 -------------------------------------------------------------------------------- /.ci/certs/server_certificate.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIID7zCCAtegAwIBAgIBATANBgkqhkiG9w0BAQsFADBMMTswOQYDVQQDDDJUTFNH 3 | ZW5TZWxmU2lnbmVkdFJvb3RDQSAyMDIzLTA5LTExVDIwOjUxOjM5LjYwMzAxMzEN 4 | MAsGA1UEBwwEJCQkJDAeFw0yMzA5MTExODUxNDBaFw0zMzA5MDgxODUxNDBaMDYx 5 | IzAhBgNVBAMMGmdzYW50b21hZ2c2TFZETS52bXdhcmUuY29tMQ8wDQYDVQQKDAZz 6 | ZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6H9gnMoGCmgDN 7 | GXpqgLiIJBmDvbo64P+FsPOvKEYFNKj/Poz2UVVY96kOJRDTBXW3p42C0GCll/2z 8 | /4RcOwN4Jcf4TIU+IsytOyQ39FYNVMDJpMzH4dQPYlvx9euyIqxUccTYCiXtHkrd 9 | xw5cV3gs7HPQLcklQtBgoVNnlf1fPQcPgYPa5x95+oEki2yWhScXa9EP3W6G+KXE 10 | guCi1enoIZ3+MfxbEkfdm+C9Yo47vh6LXcokyKpiuOYk2TGrfaw5JQb1tRwb4BOQ 11 | ORriMCHi6+TkQf58yQ5GRZvJ5sjBeJgLtmCvRJXbdZXcw25jKXPwz74qS1Q728kD 12 | c2k7lgKvAgMBAAGjgfEwge4wCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYDVR0l 13 | BAwwCgYIKwYBBQUHAwEwTAYDVR0RBEUwQ4IaZ3NhbnRvbWFnZzZMVkRNLnZtd2Fy 14 | ZS5jb22CGmdzYW50b21hZ2c2TFZETS52bXdhcmUuY29tgglsb2NhbGhvc3QwHQYD 15 | VR0OBBYEFG5VGCQucC7FqyOJOTzIYtclS9/SMB8GA1UdIwQYMBaAFCjyUnXFpZI5 16 | +zz7PJxHxDuulqqZMDEGA1UdHwQqMCgwJqAkoCKGIGh0dHA6Ly9jcmwtc2VydmVy 17 | OjgwMDAvYmFzaWMuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQB3nWIIa+9Oo29gU0us 18 | fvryYJo92A/mEGIBpixX2i4eQoPhgTSJvFWN3QCHDexbnccM6tRksQmKwn5Rrf+P 19 | DdM8BiTLP/jOQWJXChZro8xpHLmNjlOGletsQ7wo7/p5hvD6Y7pB6FK6LdLcbwbI 20 | Rmvy8olsfOMewEyyWLbKB7e7+iwDIO5lxxgNWXKspO+Kx7wgVeS3j2OhLaOBj1N4 21 | a+YAXVVaN3IkkdHwUHBTPfuvguXCD8fZxVW5RkYDiweeHAMuwpu3o2rd7y2dGzG7 22 | u5mLzNazq4Ki/FTSZMkMAloN4/vfXQfGUO4UJcGXB/c3XO9XURsF2N1k0T9ThIUh 23 | bhmL 24 | -----END CERTIFICATE----- 25 | -------------------------------------------------------------------------------- /.ci/certs/server_key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC6H9gnMoGCmgDN 3 | GXpqgLiIJBmDvbo64P+FsPOvKEYFNKj/Poz2UVVY96kOJRDTBXW3p42C0GCll/2z 4 | /4RcOwN4Jcf4TIU+IsytOyQ39FYNVMDJpMzH4dQPYlvx9euyIqxUccTYCiXtHkrd 5 | xw5cV3gs7HPQLcklQtBgoVNnlf1fPQcPgYPa5x95+oEki2yWhScXa9EP3W6G+KXE 6 | guCi1enoIZ3+MfxbEkfdm+C9Yo47vh6LXcokyKpiuOYk2TGrfaw5JQb1tRwb4BOQ 7 | ORriMCHi6+TkQf58yQ5GRZvJ5sjBeJgLtmCvRJXbdZXcw25jKXPwz74qS1Q728kD 8 | c2k7lgKvAgMBAAECggEAA45fNA8wJUgpqBbgCA/nNrFbNsarERFCdedLHfd1hbJG 9 | C6vCHWSxbkPmcX7ozYyxp+17c9qPtipAw9h9IBaqkCLx/qmrnrkjKFN/HiiSH6q/ 10 | t4/pWmT+BxXjLtd624pJDktRg6qHTfqsWy9rSVK3LkM4SpZ4B45kFSyP6jnrP49j 11 | vmSHzDISiN4G4GqFK12IbA0nioF5tbuacZVGEAoKcUFxEHQmq1e8b6sjg6uKSexk 12 | GFEpvcBm8XgSYjiBPit/AHKxGqzuO0/BbjvQK7gSeSLLaV6DUdKFi9GFsOQjmedq 13 | DLbxy0CIleCnK0hYwp1SN9gvNBLrOIT5lqZfv4+X0QKBgQDrlRjiNJ6rWwQZcidp 14 | ncStV2f/RVuztAacSYHz+0ZZY72uQh+8+W/7+BDkKsZIV2Bswv2wN9/8mPluvyI8 15 | OLWf1MOwRC5RUsBJqykRuPzFCoe4aZksBVnQsx82bBpfzQFqspCXw3UAcZcM+dHg 16 | jTkFVsSOeCoQxg2DFN0bwz8/vwKBgQDKQWogOStxgnJisxffSQYH0pHbMmVV/V8+ 17 | OTIhx87YHV+cnWT9JGv+Qsnz2s18E9zj1FEYwBJqAbHd5qytATsvMYB4y3voR03G 18 | AzPeDOdFBNZqQaJyGqYPD+HEuVKYlPx8NC2ew+CEh+h5kPUoO/DgCQJrYA3ZXZKw 19 | dOpieR75EQKBgApgtArq9H5p8QFJ9RCDAbH9IritDoAZEx15Y38i95NigG2Xvhwu 20 | BM/duqjCdZ+kMbw4zsIfg/91oa9OPizW9rFGxyQRrNSqR4w3PQTp2EC52Qa3qCa3 21 | SaCW824LTxIfTsureBEnbBUL6/KHYsZ4kiV5EAmSo4+/mcLHfYIGlNezAoGAMcw5 22 | XQW2dJQxpauCzS8llPd7ggS+fpWLxb4/YaHYg813pQ/7tXgqPsgjAS92OH6LfGzi 23 | Kr3fysnwCTqqeU48TDpb72HqeB5WP9K6CooSxyORx0exv3ZgPIUkiVM3yumj4NDY 24 | CqcfuIHd81CFjAp2HDMbrWRRBJvNajTfIK/BuIECgYBJPClPwYzJaXYMlc5rmAgL 25 | lJ9rlbjxRffyLOG4BAsapfGYS789MpoP8WzFPkCNcweXWnI9ftKAE57R/o+a+ov6 26 | hMlnqI4fij2N9BZuFbVWvpf3th72WU8CE1wpzY10Gp//iePXntJqsiwkr+eStPSL 27 | 9H/WjV5NwVwqe+YYC6ABMw== 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /.ci/conf/enabled_plugins: -------------------------------------------------------------------------------- 1 | [rabbitmq_management, rabbitmq_stream, rabbitmq_stream_management]. -------------------------------------------------------------------------------- /.ci/conf/rabbitmq.conf: -------------------------------------------------------------------------------- 1 | loopback_users.guest = false 2 | 3 | ssl_options.cacertfile = /etc/rabbitmq/certs/ca_certificate.pem 4 | ssl_options.certfile = /etc/rabbitmq/certs/server_certificate.pem 5 | ssl_options.keyfile = /etc/rabbitmq/certs/server_key.pem 6 | listeners.ssl.default = 5671 7 | stream.listeners.ssl.default = 5551 8 | ssl_options.verify = verify_peer 9 | ssl_options.fail_if_no_peer_cert = false 10 | -------------------------------------------------------------------------------- /.ci/install.ps1: -------------------------------------------------------------------------------- 1 | $ProgressPreference = 'Continue' 2 | $ErrorActionPreference = 'Stop' 3 | Set-StrictMode -Version 2.0 4 | 5 | [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor 'Tls12' 6 | 7 | $versions_path = Join-Path -Path $env:GITHUB_WORKSPACE -ChildPath '.ci' | Join-Path -ChildPath 'versions.json' 8 | $versions = Get-Content $versions_path | ConvertFrom-Json 9 | Write-Host "[INFO] versions: $versions" 10 | $erlang_ver = $versions.erlang 11 | $rabbitmq_ver = $versions.rabbitmq 12 | 13 | $base_installers_dir = Join-Path -Path $HOME -ChildPath 'installers' 14 | if (-Not (Test-Path $base_installers_dir)) 15 | { 16 | New-Item -Verbose -ItemType Directory $base_installers_dir 17 | } 18 | 19 | $erlang_download_url = "https://github.com/erlang/otp/releases/download/OTP-$erlang_ver/otp_win64_$erlang_ver.exe" 20 | $erlang_installer_path = Join-Path -Path $base_installers_dir -ChildPath "otp_win64_$erlang_ver.exe" 21 | $erlang_install_dir = Join-Path -Path $HOME -ChildPath 'erlang' 22 | 23 | Write-Host '[INFO] Downloading Erlang...' 24 | 25 | if (-Not (Test-Path $erlang_installer_path)) 26 | { 27 | Invoke-WebRequest -UseBasicParsing -Uri $erlang_download_url -OutFile $erlang_installer_path 28 | } 29 | else 30 | { 31 | Write-Host "[INFO] Found '$erlang_installer_path' in cache!" 32 | } 33 | 34 | Write-Host "[INFO] Installing Erlang to $erlang_install_dir..." 35 | & $erlang_installer_path '/S' "/D=$erlang_install_dir" | Out-Null 36 | 37 | $rabbitmq_installer_download_url = "https://github.com/rabbitmq/rabbitmq-server/releases/download/v$rabbitmq_ver/rabbitmq-server-$rabbitmq_ver.exe" 38 | $rabbitmq_installer_path = Join-Path -Path $base_installers_dir -ChildPath "rabbitmq-server-$rabbitmq_ver.exe" 39 | Write-Host "[INFO] rabbitmq installer path $rabbitmq_installer_path" 40 | 41 | $erlang_reg_path = 'HKLM:\SOFTWARE\Ericsson\Erlang' 42 | if (Test-Path 'HKLM:\SOFTWARE\WOW6432Node\') 43 | { 44 | $erlang_reg_path = 'HKLM:\SOFTWARE\WOW6432Node\Ericsson\Erlang' 45 | } 46 | $erlang_erts_version = Get-ChildItem -Path $erlang_reg_path -Name 47 | $erlang_home = (Get-ItemProperty -LiteralPath $erlang_reg_path\$erlang_erts_version).'(default)' 48 | 49 | Write-Host "[INFO] Setting ERLANG_HOME to '$erlang_home'..." 50 | $env:ERLANG_HOME = $erlang_home 51 | [Environment]::SetEnvironmentVariable('ERLANG_HOME', $erlang_home, 'Machine') 52 | 53 | Write-Host "[INFO] Setting RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS..." 54 | $env:RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS = '-rabbitmq_stream advertised_host localhost' 55 | [Environment]::SetEnvironmentVariable('RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS', '-rabbitmq_stream advertised_host localhost', 'Machine') 56 | 57 | Write-Host '[INFO] Downloading RabbitMQ...' 58 | 59 | if (-Not (Test-Path $rabbitmq_installer_path)) 60 | { 61 | Invoke-WebRequest -UseBasicParsing -Uri $rabbitmq_installer_download_url -OutFile $rabbitmq_installer_path 62 | } 63 | else 64 | { 65 | Write-Host "[INFO] Found '$rabbitmq_installer_path' in cache!" 66 | } 67 | 68 | Write-Host "[INFO] Installer dir '$base_installers_dir' contents:" 69 | Get-ChildItem -Verbose -Path $base_installers_dir 70 | 71 | Write-Host '[INFO] Creating Erlang cookie files...' 72 | 73 | function Set-ErlangCookie { 74 | Param($Path, $Value = 'RABBITMQ-COOKIE') 75 | Remove-Item -Force $Path -ErrorAction SilentlyContinue 76 | [System.IO.File]::WriteAllText($Path, $Value, [System.Text.Encoding]::ASCII) 77 | } 78 | 79 | $erlang_cookie_user = Join-Path -Path $HOME -ChildPath '.erlang.cookie' 80 | $erlang_cookie_system = Join-Path -Path $env:SystemRoot -ChildPath 'System32\config\systemprofile\.erlang.cookie' 81 | 82 | Set-ErlangCookie -Path $erlang_cookie_user 83 | Set-ErlangCookie -Path $erlang_cookie_system 84 | 85 | Write-Host '[INFO] Installing and starting RabbitMQ with default config...' 86 | 87 | & $rabbitmq_installer_path '/S' | Out-Null 88 | (Get-Service -Name RabbitMQ).Status 89 | 90 | $rabbitmq_base_path = (Get-ItemProperty -Name Install_Dir -Path 'HKLM:\SOFTWARE\WOW6432Node\VMware, Inc.\RabbitMQ Server').Install_Dir 91 | $regPath = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\RabbitMQ' 92 | if (Test-Path 'HKLM:\SOFTWARE\WOW6432Node\') 93 | { 94 | $regPath = 'HKLM:\SOFTWARE\WOW6432Node\Microsoft\Windows\CurrentVersion\Uninstall\RabbitMQ' 95 | } 96 | $rabbitmq_version = (Get-ItemProperty $regPath "DisplayVersion").DisplayVersion 97 | Write-Host "[INFO] RabbitMQ version path: $rabbitmq_base_path and version: $rabbitmq_version" 98 | 99 | $rabbitmq_home = Join-Path -Path $rabbitmq_base_path -ChildPath "rabbitmq_server-$rabbitmq_version" 100 | Write-Host "[INFO] Setting RABBITMQ_HOME to '$rabbitmq_home'..." 101 | [Environment]::SetEnvironmentVariable('RABBITMQ_HOME', $rabbitmq_home, 'Machine') 102 | $env:RABBITMQ_HOME = $rabbitmq_home 103 | 104 | $rabbitmqctl_path = Join-Path -Path $rabbitmq_base_path -ChildPath "rabbitmq_server-$rabbitmq_version" | Join-Path -ChildPath 'sbin' | Join-Path -ChildPath 'rabbitmqctl.bat' 105 | $rabbitmq_plugins_path = Join-Path -Path $rabbitmq_base_path -ChildPath "rabbitmq_server-$rabbitmq_version" | Join-Path -ChildPath 'sbin' | Join-Path -ChildPath 'rabbitmq-plugins.bat' 106 | 107 | Write-Host "[INFO] Setting RABBITMQ_RABBITMQCTL_PATH to '$rabbitmqctl_path'..." 108 | $env:RABBITMQ_RABBITMQCTL_PATH = $rabbitmqctl_path 109 | [Environment]::SetEnvironmentVariable('RABBITMQ_RABBITMQCTL_PATH', $rabbitmqctl_path, 'Machine') 110 | 111 | $epmd_running = $false 112 | [int]$count = 1 113 | 114 | $epmd_exe = Join-Path -Path $erlang_home -ChildPath "erts-$erlang_erts_version" | Join-Path -ChildPath 'bin' | Join-Path -ChildPath 'epmd.exe' 115 | 116 | Write-Host "[INFO] Waiting for epmd ($epmd_exe) to report that RabbitMQ has started..." 117 | 118 | Do { 119 | $epmd_running = & $epmd_exe -names | Select-String -CaseSensitive -SimpleMatch -Quiet -Pattern 'name rabbit at port' 120 | if ($epmd_running -eq $true) { 121 | Write-Host '[INFO] epmd reports that RabbitMQ is running!' 122 | break 123 | } 124 | 125 | if ($count -gt 60) { 126 | throw '[ERROR] too many tries waiting for epmd to report RabbitMQ running!' 127 | } 128 | 129 | Write-Host "[INFO] epmd NOT reporting yet that RabbitMQ is running, count: '$count'..." 130 | $count = $count + 1 131 | Start-Sleep -Seconds 5 132 | 133 | } While ($true) 134 | 135 | [int]$count = 1 136 | 137 | Do { 138 | $proc_id = (Get-Process -Name erl).Id 139 | if (-Not ($proc_id -is [array])) { 140 | & $rabbitmqctl_path wait -t 300000 -P $proc_id 141 | if ($LASTEXITCODE -ne 0) { 142 | throw "[ERROR] rabbitmqctl wait returned error: $LASTEXITCODE" 143 | } 144 | break 145 | } 146 | 147 | if ($count -gt 120) { 148 | throw '[ERROR] too many tries waiting for just one erl process to be running!' 149 | } 150 | 151 | Write-Host '[INFO] multiple erl instances running still...' 152 | $count = $count + 1 153 | Start-Sleep -Seconds 5 154 | 155 | } While ($true) 156 | 157 | $ErrorActionPreference = 'Continue' 158 | Write-Host '[INFO] Getting RabbitMQ status...' 159 | & $rabbitmqctl_path status 160 | 161 | $ErrorActionPreference = 'Continue' 162 | Write-Host '[INFO] Enabling plugins...' 163 | & $rabbitmq_plugins_path enable rabbitmq_management rabbitmq_stream rabbitmq_stream_management 164 | -------------------------------------------------------------------------------- /.ci/versions.json: -------------------------------------------------------------------------------- 1 | { 2 | "erlang": "27.2", 3 | "rabbitmq": "4.0.5" 4 | } 5 | -------------------------------------------------------------------------------- /.github/.codecov.yaml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | project: off 4 | patch: off -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "gomod" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "daily" 12 | -------------------------------------------------------------------------------- /.github/release.yml: -------------------------------------------------------------------------------- 1 | # .github/release.yml 2 | 3 | changelog: 4 | exclude: 5 | labels: 6 | - ignore-for-release 7 | categories: 8 | - title: Breaking Changes 9 | labels: 10 | - Semver-Major 11 | - breaking-change 12 | - title: Enhancements 13 | labels: 14 | - Semver-Minor 15 | - enhancement 16 | - title: Bug Fixes 17 | labels: 18 | - Semver-Patch 19 | - bug 20 | - title: Documentation 21 | labels: 22 | - documentation 23 | - title: Dependency Updates 24 | labels: 25 | - dependencies 26 | -------------------------------------------------------------------------------- /.github/workflows/build_and_test.yml: -------------------------------------------------------------------------------- 1 | name: Build and Test 2 | 3 | on: 4 | push: 5 | 6 | jobs: 7 | test: 8 | runs-on: ubuntu-latest 9 | strategy: 10 | fail-fast: true 11 | matrix: 12 | go: ['1.24'] 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v4 16 | - name: Set up Docker Buildx 17 | uses: docker/setup-buildx-action@v3 18 | - name: Build and export 19 | uses: docker/build-push-action@v5 20 | with: 21 | context: . 22 | file: CiDockerfile 23 | tags: rabbitmq_tls:latest 24 | outputs: type=docker,dest=/tmp/rabbitmq_tls.tar 25 | - name: Upload artifact 26 | uses: actions/upload-artifact@v4 27 | with: 28 | name: rabbitmq_tls 29 | path: /tmp/rabbitmq_tls.tar 30 | - name: Download artifact 31 | uses: actions/download-artifact@v4 32 | with: 33 | name: rabbitmq_tls 34 | path: /tmp 35 | - name: Load image 36 | run: | 37 | docker load --input /tmp/rabbitmq_tls.tar 38 | docker image ls -a 39 | docker run -d --rm --name rabbitmq-stream-client-test \ 40 | -p 5552:5552 -p 5672:5672 -p 5671:5671 -p 5551:5551 -p 15672:15672 \ 41 | -e RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="-rabbitmq_stream advertised_host localhost" \ 42 | rabbitmq_tls 43 | - name: wait for running 44 | run: | 45 | docker exec rabbitmq-stream-client-test /bin/bash -c 'ps -aux' 46 | docker exec rabbitmq-stream-client-test /bin/bash -c 'sleep 10' 47 | docker exec rabbitmq-stream-client-test /bin/bash -c 'rabbitmqctl status' 48 | docker exec rabbitmq-stream-client-test /bin/bash -c 'rabbitmqctl wait --pid 1 --timeout 70' 49 | - uses: actions/checkout@v4 50 | - uses: actions/setup-go@v5 51 | id: setup_go 52 | with: 53 | go-version: ${{ matrix.go }} 54 | check-latest: true 55 | - name: Install golangci-lint 56 | run: go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@latest 57 | - run: make test GO_VERSION=${{ steps.setup_go.outputs.go-version }} 58 | - uses: actions/checkout@main 59 | - uses: codecov/codecov-action@v5 60 | with: 61 | fail_ci_if_error: false # optional (default = false) 62 | files: ./coverage.txt 63 | flags: unittests 64 | name: codecov-umbrella # optional 65 | verbose: true # optional (default = false) 66 | env: 67 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 68 | # temporany removed due of https://github.com/actions/checkout/issues/1186 69 | # test-win32: 70 | # runs-on: windows-latest 71 | # strategy: 72 | # matrix: 73 | # go: [ '1.22'] 74 | # steps: 75 | # - uses: actions/checkout@v4 76 | # - uses: actions/setup-go@v5 77 | # id: setup_go 78 | # with: 79 | # go-version: ${{ matrix.go }} 80 | # check-latest: true 81 | # - name: Cache installers 82 | # uses: actions/cache@v4 83 | # with: 84 | # # Note: the cache path is relative to the workspace directory 85 | # # https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#using-the-cache-action 86 | # path: ~/installers 87 | # key: ${{ runner.os }}-v0-${{ hashFiles('.ci/versions.json') }} 88 | # - name: Install and start RabbitMQ 89 | # run: ./.ci/install.ps1 90 | # - name: Install GNU make 91 | # run: choco install make 92 | # - run: make test GO_VERSION=${{ steps.setup_go.outputs.go-version }} 93 | publish: 94 | runs-on: ubuntu-latest 95 | needs: [test] 96 | steps: 97 | - uses: docker/setup-buildx-action@v2 98 | - uses: docker/login-action@v2 99 | with: 100 | username: ${{ secrets.DOCKERHUB_USERNAME }} 101 | password: ${{ secrets.DOCKERHUB_TOKEN }} 102 | - uses: actions/checkout@v3 103 | - name: Publish Docker Image 104 | run: | 105 | set -x 106 | VERSION=latest 107 | export VERSION 108 | if [[ ! $GITHUB_REF =~ "/tags/" ]] 109 | then 110 | VERSION=dev 111 | fi 112 | make perf-test-docker-push 113 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '0 0 * * 0' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | 28 | strategy: 29 | fail-fast: false 30 | matrix: 31 | language: [ 'go' ] 32 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 33 | # Learn more: 34 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 35 | 36 | steps: 37 | - name: Checkout repository 38 | uses: actions/checkout@v4 39 | 40 | # Initializes the CodeQL tools for scanning. 41 | - name: Initialize CodeQL 42 | uses: github/codeql-action/init@v3 43 | with: 44 | languages: ${{ matrix.language }} 45 | # If you wish to specify custom queries, you can do so here or in a config file. 46 | # By default, queries listed here will override any specified in a config file. 47 | # Prefix the list here with "+" to use these queries and those in the config file. 48 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 49 | 50 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 51 | # If this step fails, then you should remove it and run the build manually (see below) 52 | - name: Autobuild 53 | uses: github/codeql-action/autobuild@v3 54 | 55 | # ℹ️ Command-line programs to run using the OS shell. 56 | # 📚 https://git.io/JvXDl 57 | 58 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 59 | # and modify them (or add more) to build your code if your project 60 | # uses a compiled language 61 | 62 | #- run: | 63 | # make bootstrap 64 | # make release 65 | 66 | - name: Perform CodeQL Analysis 67 | uses: github/codeql-action/analyze@v3 68 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | cue.mod/ 14 | 15 | # Dependency directories (remove the comment below to include it) 16 | # vendor/ 17 | .idea 18 | coverage 19 | coverage.txt 20 | bin/ 21 | examples/examples 22 | vet 23 | .DS_Store 24 | .vagrant/ 25 | local/ 26 | tls-gen/ 27 | dist/ 28 | 29 | perfTest/perfTest 30 | go.dev/ 31 | local_ex/ 32 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | linters: 3 | default: none 4 | enable: 5 | - bodyclose 6 | - copyloopvar 7 | - dogsled 8 | - dupl 9 | - errcheck 10 | - goconst 11 | - gocritic 12 | - goprintffuncname 13 | - gosec 14 | - govet 15 | - ineffassign 16 | - misspell 17 | - nakedret 18 | - noctx 19 | - nolintlint 20 | - prealloc 21 | - revive 22 | - staticcheck 23 | - unconvert 24 | - unused 25 | - whitespace 26 | settings: 27 | gosec: 28 | excludes: 29 | - G404 30 | - G115 31 | revive: 32 | rules: 33 | - name: unexported-return 34 | disabled: true 35 | - name: unused-parameter 36 | 37 | formatters: 38 | enable: 39 | - gofmt 40 | - goimports 41 | settings: 42 | gofmt: 43 | simplify: false 44 | rewrite-rules: 45 | - pattern: interface{} 46 | replacement: any 47 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # This is an example .goreleaser.yml file with some sane defaults. 2 | # Make sure to check the documentation at https://goreleaser.com 3 | before: 4 | hooks: 5 | - go mod tidy 6 | - go generate ./... 7 | builds: 8 | - id: perf-test 9 | binary: stream-perf-test 10 | main: ./perfTest 11 | env: 12 | - CGO_ENABLED=0 13 | goos: 14 | - linux 15 | - windows 16 | - darwin 17 | goarch: 18 | - amd64 19 | - arm64 20 | goarm: 21 | - 7 22 | archives: 23 | - name_template: "{{ .Binary }}_{{ .Os }}_{{ .Arch }}{{ .Arm }}" 24 | format: tar.gz 25 | format_overrides: 26 | - goos: windows 27 | format: zip 28 | checksum: 29 | name_template: 'checksums.txt' 30 | snapshot: 31 | name_template: "{{ incpatch .Version }}-next" 32 | changelog: 33 | sort: asc 34 | filters: 35 | exclude: 36 | - "README.md" 37 | - ".gitignore" 38 | - "^examples:" 39 | - Merge pull request 40 | - Merge branch 41 | -------------------------------------------------------------------------------- /CiDockerfile: -------------------------------------------------------------------------------- 1 | FROM rabbitmq:4-management 2 | 3 | COPY .ci/conf/rabbitmq.conf /etc/rabbitmq/rabbitmq.conf 4 | COPY .ci/conf/enabled_plugins /etc/rabbitmq/enabled_plugins 5 | 6 | COPY .ci/certs /etc/rabbitmq/certs -------------------------------------------------------------------------------- /Docker/Dockerfile: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabbitmq/rabbitmq-stream-go-client/4751689abc982bce39818754ee0161e762aea19a/Docker/Dockerfile -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.23 as builder 2 | ENV GOPATH=/go GOOS=linux CGO_ENABLED=0 3 | WORKDIR /go/src/github.com/rabbitmq/rabbitmq-stream-go-client 4 | COPY go.mod go.sum VERSION ./ 5 | COPY pkg pkg 6 | COPY Makefile Makefile 7 | COPY perfTest perfTest 8 | 9 | RUN mkdir /stream_perf_test 10 | RUN go get -d -v ./... 11 | RUN VERSION=$(cat VERSION) && go build -ldflags "-X main.Version=$VERSION" -o /stream_perf_test/stream-perf-test perfTest/perftest.go 12 | 13 | FROM ubuntu:20.04 14 | 15 | RUN set -eux; \ 16 | apt-get update; \ 17 | apt-get install -y --no-install-recommends \ 18 | locales 19 | 20 | 21 | 22 | #RUN apt-get install golang -y 23 | #RUN mkdir -p /stream_perf_test 24 | COPY --from=builder /stream_perf_test /bin/ 25 | # 26 | RUN rm -rf /var/lib/apt/lists/*; \ 27 | locale-gen en_US.UTF-8 28 | 29 | 30 | ENTRYPOINT ["stream-perf-test"] 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (C) 2021 Gabriele Santomaggio 4 | Portions Copyright (C) vmware 5 | 6 | Permission is hereby granted, free of charge, to any person 7 | obtaining a copy of this software and associated documentation 8 | files (the "Software"), to deal in the Software without 9 | restriction, including without limitation the rights to use, 10 | copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | copies of the Software, and to permit persons to whom the 12 | Software is furnished to do so, subject to the following 13 | conditions: 14 | 15 | The above copyright notice and this permission notice shall be 16 | included in all copies or substantial portions of the Software. 17 | 18 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 19 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 20 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 21 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 22 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 23 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 24 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 | OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) 2 | ifeq (,$(shell go env GOBIN)) 3 | GOBIN = $(shell go env GOPATH)/bin 4 | else 5 | GOBIN = $(shell go env GOBIN) 6 | endif 7 | 8 | VERSION ?= latest 9 | LDFLAGS = "-X main.Version=$(VERSION)" 10 | 11 | all: test build 12 | 13 | vet: $(go_sources) 14 | go vet ./pkg/stream 15 | 16 | fmt: 17 | go fmt ./... 18 | 19 | check: 20 | golangci-lint run --fix 21 | 22 | NUM_PROCS ?= 2 23 | TEST_TIMEOUT ?= 3m 24 | test: vet fmt check 25 | go run -mod=mod github.com/onsi/ginkgo/v2/ginkgo -r --procs=$(NUM_PROCS) --compilers=$(NUM_PROCS) \ 26 | --randomize-all --randomize-suites \ 27 | --cover --coverprofile=coverage.txt --covermode=atomic \ 28 | --race --trace \ 29 | --tags debug \ 30 | --timeout=$(TEST_TIMEOUT) 31 | 32 | build-all: vet fmt check build-darwin build-windows build-linux 33 | 34 | integration-test: vet fmt check 35 | go test -race -tags debug -v -cpu 2 ./pkg/system_integration -coverprofile coverage.txt -covermode atomic -timeout 99999s -ginkgo.v 36 | 37 | build-%: vet fmt check 38 | GOOS=$(*) GOARCH=amd64 go build -ldflags=$(LDFLAGS) -v ./... 39 | 40 | build: vet fmt check 41 | go build -ldflags=$(LDFLAGS) -v ./... 42 | 43 | PERFTEST_FLAGS ?= --publishers 1 --consumers 1 44 | perf-test-run: perf-test-build 45 | go run perfTest/perftest.go silent $(PERFTEST_FLAGS) 46 | 47 | perf-test-help: perf-test-build 48 | go run perfTest/perftest.go help 49 | 50 | perf-test-build: 51 | go build -ldflags=$(LDFLAGS) -o bin/stream-perf-test perfTest/perftest.go 52 | 53 | BUILDKIT ?= docker 54 | perf-test-docker-build: perf-test-build 55 | $(BUILDKIT) build -t pivotalrabbitmq/go-stream-perf-test:$(VERSION) . 56 | 57 | perf-test-docker-push: perf-test-docker-build 58 | $(BUILDKIT) push pivotalrabbitmq/go-stream-perf-test:$(VERSION) 59 | 60 | RABBITMQ_OCI ?= rabbitmq:3-management 61 | BUILDKIT_RUN_ARGS ?= --pull always 62 | .PHONY: rabbitmq-server 63 | rabbitmq-server: 64 | $(BUILDKIT) build -t rabbitmq-tls-test -f CiDockerfile . 65 | $(BUILDKIT) run -it --rm --name rabbitmq-tls-test \ 66 | -p 5552:5552 -p 5551:5551 -p 5672:5672 -p 15672:15672 \ 67 | -e RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="-rabbitmq_stream advertised_host localhost" \ 68 | rabbitmq-tls-test 69 | 70 | 71 | rabbitmq-ha-proxy: 72 | cd compose/ha_tls; rm -rf tls-gen; 73 | cd compose/ha_tls; git clone https://github.com/michaelklishin/tls-gen tls-gen; cd tls-gen/basic; make 74 | mv compose/ha_tls/tls-gen/basic/result/server_*_certificate.pem compose/ha_tls/tls-gen/basic/result/server_certificate.pem 75 | mv compose/ha_tls/tls-gen/basic/result/server_*key.pem compose/ha_tls/tls-gen/basic/result/server_key.pem 76 | cd compose/ha_tls; docker build -t haproxy-rabbitmq-cluster . 77 | cd compose/ha_tls; docker compose down 78 | cd compose/ha_tls; docker compose up 79 | 80 | rabbitmq-server-tls: 81 | cd compose/tls; rm -rf tls-gen; 82 | cd compose/tls; git clone https://github.com/michaelklishin/tls-gen tls-gen; cd tls-gen/basic; make 83 | mv compose/tls/tls-gen/basic/result/server_*_certificate.pem compose/tls/tls-gen/basic/result/server_certificate.pem 84 | mv compose/tls/tls-gen/basic/result/server_*key.pem compose/tls/tls-gen/basic/result/server_key.pem 85 | docker run -d --name rabbitmq-stream-client-test \ 86 | -p 5552:5552 -p 5672:5672 -p 5671:5671 -p 5551:5551 -p 15672:15672 \ 87 | -v $(shell pwd)/compose/tls/conf/:/etc/rabbitmq/ -v $(shell pwd)/compose/tls/tls-gen/basic/result/:/certs \ 88 | -e RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="-rabbitmq_stream advertised_host localhost" \ 89 | --pull always \ 90 | docker.io/rabbitmq:3-management 91 | 92 | local-release: 93 | goreleaser release --skip-publish --rm-dist --skip=validate 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 1.5.8 2 | -------------------------------------------------------------------------------- /best_practices/README.md: -------------------------------------------------------------------------------- 1 | # Client best practices 2 | 3 | The scope of this document is to provide a set of best practices for the client applications that use the Go client library.
4 | 5 | #### General recommendations 6 | 7 | - Messages are not thread-safe, you should not share the same message between different go-routines or different Send/BatchSend calls. 8 | - Use the producer name only if you need deduplication. 9 | - Avoid to store the consumer offset to the server too often. 10 | - `Send` works well in most of the cases, use `BatchSend` when you need more control. 11 | - Connections/producers/consumers are designed to be long-lived. You should avoid creating and closing them too often. 12 | - The library is generally thread-safe,even it is better to use one producer/consumer per go-routine. 13 | 14 | #### Default configuration 15 | 16 | The default configuration of the client library is designed to be used in most of the cases. 17 | No particular tuning is required. Just follow the [Getting started](../examples/getting_started.go) example. 18 | 19 | #### Multiple producers and consumers 20 | 21 | Each connection can support multiple producers and consumers, you can reduce the number of connections by using the same connection for multiple producers and consumers.
22 | With: 23 | 24 | ```golang 25 | SetMaxConsumersPerClient(10). 26 | SetMaxConsumersPerClient(10) 27 | ``` 28 | 29 | The TCP connection will be shared between the producers and consumers. 30 | Note about consumers: One slow consumer can block the others, so it is important: 31 | 32 | - To have a good balance between the number of consumers and the speed of the consumers. 33 | - work application side to avoid slow consumers, for example, by using a go-routines/buffers. 34 | 35 | #### High throughput 36 | 37 | To achieve high throughput, you should use one producer per connection, and one consumer per connection. 38 | This will avoid lock contention between the producers when sending messages and between the consumers when receiving messages. 39 | 40 | The method `Send` is usually enough to achieve high throughput. 41 | In some case you can use the `BatchSend` method. See the `Send` vs `BatchSend` documentation for more details. 42 | 43 | #### Low latency 44 | 45 | To achieve Low latency, you should use one producer per connection, and one consumer per connection. 46 | 47 | The method `Send` is the best choice to achieve low latency. Default values are tuned for low latency. 48 | You can change the `BatchSize` parameter to increase or reduce the max number of messages sent in one batch. 49 | Note: Since the client uses dynamic send, the `BatchSize` parameter is a hint to the client, the client can send less than the `BatchSize`. 50 | 51 | #### Store several text based messages 52 | 53 | In case you want to store logs, text-based or big messages, you can use the `Sub Entries Batching` method. 54 | Where it is possible to store multiple messages in one entry and compress the entry with different algorithms. 55 | It is useful to reduce the disk space and the network bandwidth. 56 | See the `Sub Entries Batching` documentation for more details.
57 | 58 | #### Store several small messages 59 | 60 | In case you want to store a lot of small messages, you can use the `BatchSend` method. 61 | Where it is possible to store multiple messages in one entry. This will avoid creating small chunks on the server side.
62 | 63 | #### Avoid duplications 64 | 65 | In case you want to store messages with deduplication, you need to set the producer name and the deduplication id. 66 | See the `Deduplication` documentation for more details.
67 | 68 | #### Consumer fail over 69 | 70 | In case you want to have a consumer fail over, you can use the `Single Active Consumer` method. 71 | Where only one consumer is active at a time, and the other consumers are in standby mode. 72 | 73 | #### Reliable producer and consumer 74 | 75 | The client library provides a reliable producer and consumer, where the producer and consumer can recover from a connection failure. 76 | See the `Reliable` documentation for more details.
77 | 78 | #### Scaling the streams 79 | 80 | In case you want to scale the streams, you can use the `Super Stream` method. 81 | Where you can have multiple streams and only one stream is active at a time. 82 | See the `Super Stream` documentation for more details.
83 | 84 | #### Filtering the data when consuming 85 | 86 | In case you want to filter the data when consuming, you can use the `Stream Filtering` method. 87 | Where you can filter the data based on the metadata. 88 | See the `Stream Filtering` documentation for more details.
89 | 90 | #### Using a load balancer 91 | 92 | In case you want to use a load balancer, you can use the `Using a load balancer` method. 93 | In Kubernetes, you can use the service name as load balancer dns. 94 | See the `Using a load balancer` documentation for more details.
95 | 96 | #### Configure TCP parameters 97 | 98 | By default, this client uses optimized TCP read and write buffer sizes to achieve the best performance. 99 | In some environments, this optimization may cause latency issues. To restore the default OS parameters, you can call: 100 | 101 | ```go 102 | env, err := stream.NewEnvironment(stream.NewEnvironmentOptions(). 103 | SetWriteBuffer(-1). 104 | SetReadBuffer(-1) 105 | ) 106 | ``` 107 | 108 | See these issues [Issue #293](https://github.com/rabbitmq/rabbitmq-stream-go-client/issues/293) and [PR #374](https://github.com/rabbitmq/rabbitmq-stream-go-client/pull/374), to get more insight. 109 | -------------------------------------------------------------------------------- /compose/.gitignore: -------------------------------------------------------------------------------- 1 | tls-gen/ 2 | .DS_Store 3 | -------------------------------------------------------------------------------- /compose/README.md: -------------------------------------------------------------------------------- 1 | RabbitMQ cluster with HA proxy 2 | === 3 | 4 | how to run: 5 | 6 | ```bash 7 | git clone git@github.com:rabbitmq/rabbitmq-stream-go-client.git . 8 | make rabbitmq-ha-proxy 9 | ``` 10 | 11 | ports: 12 | ``` 13 | - localhost:5553 #standard stream port 14 | - localhost:5554 #TLS stream port 15 | - http://localhost:15673 #management port 16 | ``` 17 | 18 | RabbitMQ single node with TLS 19 | === 20 | 21 | ```bash 22 | git clone git@github.com:rabbitmq/rabbitmq-stream-go-client.git . 23 | make rabbitmq-server-tls 24 | ``` 25 | 26 | ports: 27 | ``` 28 | - localhost:5552 #standard stream port 29 | - localhost:5551 #TLS stream port 30 | - http://localhost:15672 #management port 31 | ``` -------------------------------------------------------------------------------- /compose/ha_tls/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM haproxy:2.2.22 2 | 3 | COPY haproxy.cfg /usr/local/etc/haproxy/haproxy.cfg -------------------------------------------------------------------------------- /compose/ha_tls/conf/enabled_plugins: -------------------------------------------------------------------------------- 1 | [rabbitmq_management, rabbitmq_stream, rabbitmq_stream_management]. -------------------------------------------------------------------------------- /compose/ha_tls/conf/rabbitmq.conf: -------------------------------------------------------------------------------- 1 | cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config 2 | 3 | cluster_formation.classic_config.nodes.1 = rabbit@node0 4 | cluster_formation.classic_config.nodes.2 = rabbit@node1 5 | cluster_formation.classic_config.nodes.3 = rabbit@node2 6 | loopback_users.guest = false 7 | 8 | ssl_options.cacertfile = /certs/ca_certificate.pem 9 | ssl_options.certfile = /certs/server_certificate.pem 10 | ssl_options.keyfile = /certs/server_key.pem 11 | listeners.ssl.default = 5671 12 | stream.listeners.ssl.default = 5551 13 | ssl_options.verify = verify_peer 14 | ssl_options.fail_if_no_peer_cert = false 15 | -------------------------------------------------------------------------------- /compose/ha_tls/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | rabbit_node0: 4 | environment: 5 | - RABBITMQ_ERLANG_COOKIE='secret_cookie' 6 | - RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=-rabbitmq_stream advertised_host node0 advertised_port 5562 7 | networks: 8 | - back 9 | hostname: node0 10 | image: rabbitmq:4-management 11 | pull_policy: always 12 | ports: 13 | - "5561:5551" 14 | - "5562:5552" 15 | - "5682:5672" 16 | tty: true 17 | volumes: 18 | - ./conf/:/etc/rabbitmq/ 19 | - "./tls-gen/basic/result/:/certs" 20 | rabbit_node1: 21 | environment: 22 | - RABBITMQ_ERLANG_COOKIE='secret_cookie' 23 | - RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=-rabbitmq_stream advertised_host node1 advertised_port 5572 24 | networks: 25 | - back 26 | hostname: node1 27 | image: rabbitmq:4-management 28 | pull_policy: always 29 | ports: 30 | - "5571:5551" 31 | - "5572:5552" 32 | - "5692:5672" 33 | tty: true 34 | volumes: 35 | - ./conf/:/etc/rabbitmq/ 36 | - "./tls-gen/basic/result/:/certs" 37 | rabbit_node2: 38 | environment: 39 | - RABBITMQ_ERLANG_COOKIE='secret_cookie' 40 | - RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=-rabbitmq_stream advertised_host node2 advertised_port 5582 41 | networks: 42 | - back 43 | hostname: node2 44 | image: rabbitmq:4-management 45 | pull_policy: always 46 | ports: 47 | - "5581:5551" 48 | - "5582:5552" 49 | - "5602:5672" 50 | tty: true 51 | volumes: 52 | - ./conf/:/etc/rabbitmq/ 53 | - "./tls-gen/basic/result/:/certs" 54 | haproxy: 55 | image: haproxy-rabbitmq-cluster 56 | # container_name: haproxy 57 | hostname: haproxy 58 | ports: 59 | - "5553:5552" 60 | - "5554:5551" 61 | - "5674:5672" 62 | - "15673:15672" 63 | networks: 64 | - back 65 | networks: 66 | back: -------------------------------------------------------------------------------- /compose/ha_tls/haproxy.cfg: -------------------------------------------------------------------------------- 1 | global 2 | maxconn 4096 3 | 4 | defaults 5 | timeout connect 60s 6 | timeout client 60s 7 | timeout server 60s 8 | 9 | frontend tcp-0_0_0_0-443 10 | bind *:5551 11 | mode tcp 12 | use_backend rabbitmq-stream-tls 13 | tcp-request inspect-delay 5s 14 | tcp-request content accept if { req_ssl_hello_type 1 } 15 | 16 | backend rabbitmq-stream-tls 17 | mode tcp 18 | server rabbit_node0 rabbit_node0:5551 check inter 5000 fall 3 19 | server rabbit_node1 rabbit_node1:5551 check inter 5000 fall 3 20 | server rabbit_node2 rabbit_node2:5551 check inter 5000 fall 3 21 | 22 | listen rabbitmq-stream 23 | bind 0.0.0.0:5552 24 | balance roundrobin 25 | server rabbit_node0 rabbit_node0:5552 check inter 5000 fall 3 26 | server rabbit_node1 rabbit_node1:5552 check inter 5000 fall 3 27 | server rabbit_node2 rabbit_node2:5552 check inter 5000 fall 3 28 | 29 | listen rabbitmq-amqp 30 | bind 0.0.0.0:5672 31 | balance roundrobin 32 | server rabbit_node0 rabbit_node0:5672 check inter 5000 fall 3 33 | server rabbit_node1 rabbit_node1:5672 check inter 5000 fall 3 34 | server rabbit_node2 rabbit_node2:5672 check inter 5000 fall 3 35 | 36 | 37 | listen rabbitmq-ui 38 | bind 0.0.0.0:15672 39 | balance roundrobin 40 | server rabbit_node0 rabbit_node0:15672 check inter 5000 fall 3 41 | server rabbit_node1 rabbit_node1:15672 check inter 5000 fall 3 42 | server rabbit_node2 rabbit_node2:15672 check inter 5000 fall 3 43 | -------------------------------------------------------------------------------- /compose/tls/conf/enabled_plugins: -------------------------------------------------------------------------------- 1 | [rabbitmq_amqp1_0,rabbitmq_auth_mechanism_ssl,rabbitmq_management,rabbitmq_stream,rabbitmq_stream_management]. 2 | -------------------------------------------------------------------------------- /compose/tls/conf/rabbitmq.conf: -------------------------------------------------------------------------------- 1 | loopback_users.guest = false 2 | 3 | ssl_options.cacertfile = /certs/ca_certificate.pem 4 | ssl_options.certfile = /certs/server_certificate.pem 5 | ssl_options.keyfile = /certs/server_key.pem 6 | listeners.ssl.default = 5671 7 | stream.listeners.ssl.default = 5551 8 | ssl_options.verify = verify_peer 9 | ssl_options.fail_if_no_peer_cert = false 10 | -------------------------------------------------------------------------------- /create_tag.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | version=$1 4 | gpg_key=$2 5 | regex="^([0-9]+)\.([0-9]+)\.([0-9]+)(-(alpha|beta|rc)\.[0-9]+)?$" 6 | tag="v$version" 7 | 8 | if [ $# -lt 2 ]; then 9 | echo "Usage: $0 " 10 | exit 1 11 | fi 12 | 13 | if [[ ! $version =~ $regex ]]; then 14 | echo "Invalid version format: $version" 15 | exit 1 16 | fi 17 | 18 | echo "Updating version and constants to $version" 19 | echo $version > VERSION 20 | sed -i -e "s/.*ClientVersion = \"*.*/ClientVersion = \"$version\"/" pkg/stream/constants.go 21 | go fmt ./... 22 | 23 | echo "" 24 | echo "Committing changes" 25 | git add VERSION pkg/stream/constants.go README.md 26 | git commit -m "rabbitmq-stream-go-client $tag" 27 | 28 | echo "" 29 | echo "Creating and pushing tag $tag" 30 | git tag -a -s -u $gpg_key -m "rabbitmq-stream-go-client $tag" $tag && git push && git push --tags 31 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | Stream examples 2 | === 3 | - [Reliable getting started](./getting_started/getting_started.go) - The structures you need to start. 4 | - [Getting started](./getting_started/getting_started.go) - Producer and Consumer example without reconnection 5 | - [Offset Start](./offsetStart/offset.go) - How to set different points to start consuming 6 | - [Offset Tracking](./offsetTracking/offsetTracking.go) - Manually store the consumer offset 7 | - [Automatic Offset Tracking](./automaticOffsetTracking/automaticOffsetTracking.go) - Automatic store the consumer offset 8 | - [Getting started TLS](./tls/getting_started_tls.go) - A TLS example. ( you can run `make rabbitmq-server-tls` to create a tls single rabbitmq node ) 9 | - [Deduplication](./deduplication/deduplication.go) - Deduplication example, run it more than one time, and the records
10 | won't change, since the server will handle the deduplication. 11 | - [Using a load balancer](./proxy/proxy.go) - An example how to use the client with a TLS load balancer.
12 | Use the [RabbitMQ TLS cluster](../compose) to run a TLS and no TLS cluster.
13 | For more details: https://blog.rabbitmq.com/posts/2021/07/connecting-to-streams/ 14 | - [Sub Entries Batching](./sub-entries-batching/sub_entries_batching.go) - Sub Entries Batching example 15 | - [Stream Filtering](./filtering/filtering.go) - Stream Filtering example 16 | - [Single Active Consumer](./single_active_consumer) - Single Active Consumer example 17 | - [Reliable](./reliable) - Reliable Producer and Reliable Consumer example 18 | - [Super Stream](./super_stream) - Super Stream example with Single Active Consumer 19 | -------------------------------------------------------------------------------- /examples/automaticOffsetTracking/automaticOffsetTracking.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "strconv" 8 | "sync/atomic" 9 | "time" 10 | 11 | "github.com/google/uuid" 12 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 13 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 14 | ) 15 | 16 | func CheckErr(err error) { 17 | if err != nil { 18 | fmt.Printf("%s ", err) 19 | os.Exit(1) 20 | } 21 | } 22 | 23 | func main() { 24 | reader := bufio.NewReader(os.Stdin) 25 | 26 | fmt.Println("Automatic Offset tracking example") 27 | fmt.Println("Connecting to RabbitMQ streaming ...") 28 | 29 | env, err := stream.NewEnvironment( 30 | stream.NewEnvironmentOptions(). 31 | SetHost("localhost"). 32 | SetPort(5552). 33 | SetUser("guest"). 34 | SetPassword("guest")) 35 | CheckErr(err) 36 | streamName := uuid.New().String() 37 | err = env.DeclareStream(streamName, 38 | &stream.StreamOptions{ 39 | MaxLengthBytes: stream.ByteCapacity{}.GB(2), 40 | }, 41 | ) 42 | 43 | CheckErr(err) 44 | 45 | producer, err := env.NewProducer(streamName, nil) 46 | CheckErr(err) 47 | 48 | go func() { 49 | for i := range 220 { 50 | err := producer.Send(amqp.NewMessage([]byte("hello_world_" + strconv.Itoa(i)))) 51 | CheckErr(err) 52 | } 53 | }() 54 | 55 | var counter int32 56 | handleMessages := func(_ stream.ConsumerContext, _ *amqp.Message) { 57 | if atomic.AddInt32(&counter, 1)%20 == 0 { 58 | fmt.Printf("messages consumed with auto commit: %d \n ", atomic.LoadInt32(&counter)) 59 | } 60 | } 61 | 62 | consumerOffsetNumber, err := env.NewConsumer(streamName, 63 | handleMessages, 64 | stream.NewConsumerOptions(). 65 | // set a consumerOffsetNumber name 66 | SetConsumerName("my_consumer"). 67 | // nil is also a valid value. Default values will be used 68 | SetAutoCommit(stream.NewAutoCommitStrategy(). 69 | SetCountBeforeStorage(50). // each 50 messages stores the index 70 | SetFlushInterval(20*time.Second)). 71 | SetOffset(stream.OffsetSpecification{}.First())) // or after 20 seconds 72 | CheckErr(err) 73 | 74 | time.Sleep(2 * time.Second) 75 | atomic.StoreInt32(&counter, 0) 76 | // so here we consume only 20 messages 77 | handleMessagesAfter := func(_ stream.ConsumerContext, _ *amqp.Message) { 78 | if atomic.AddInt32(&counter, 1)%20 == 0 { 79 | fmt.Printf("messages consumed after: %d \n ", atomic.LoadInt32(&counter)) 80 | } 81 | } 82 | 83 | offset, err := env.QueryOffset("my_consumer", streamName) 84 | CheckErr(err) 85 | consumerNext, err := env.NewConsumer(streamName, 86 | handleMessagesAfter, 87 | stream.NewConsumerOptions(). 88 | SetConsumerName("my_consumer"). // set a consumerOffsetNumber name 89 | SetOffset(stream.OffsetSpecification{}.Offset(offset))) // With last consumed we point to the last saved. 90 | // in this case will be 200. So it will consume 20 91 | // messages 92 | CheckErr(err) 93 | 94 | fmt.Println("Press any key to stop ") 95 | _, _ = reader.ReadString('\n') 96 | err = producer.Close() 97 | CheckErr(err) 98 | err = consumerOffsetNumber.Close() 99 | CheckErr(err) 100 | err = consumerNext.Close() 101 | CheckErr(err) 102 | err = env.DeleteStream(streamName) 103 | CheckErr(err) 104 | } 105 | -------------------------------------------------------------------------------- /examples/deduplication/deduplication.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 9 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 10 | ) 11 | 12 | func CheckErr(err error) { 13 | if err != nil { 14 | fmt.Printf("%s ", err) 15 | os.Exit(1) 16 | } 17 | } 18 | 19 | func main() { 20 | reader := bufio.NewReader(os.Stdin) 21 | 22 | fmt.Println("Deduplication example") 23 | fmt.Println("Connecting to RabbitMQ streaming ...") 24 | 25 | env, err := stream.NewEnvironment( 26 | stream.NewEnvironmentOptions(). 27 | SetHost("localhost"). 28 | SetPort(5552)) 29 | CheckErr(err) 30 | streamName := "deduplication" 31 | err = env.DeclareStream(streamName, 32 | stream.NewStreamOptions().SetMaxLengthBytes(stream.ByteCapacity{}.GB(2))) 33 | if err != stream.StreamAlreadyExists { 34 | CheckErr(err) 35 | } 36 | 37 | producer, err := env.NewProducer(streamName, 38 | stream.NewProducerOptions(). 39 | // producer name is mandatory to handle the deduplication 40 | // don't use the producer name if you don't need the deduplication 41 | SetProducerName("myProducer")) 42 | 43 | CheckErr(err) 44 | 45 | chConfirm := producer.NotifyPublishConfirmation() 46 | go func(ch stream.ChannelPublishConfirm, _ *stream.Producer) { 47 | for messagesStatus := range ch { 48 | for _, messageStatus := range messagesStatus { 49 | if messageStatus.IsConfirmed() { 50 | fmt.Printf("publishingId: %d - Confirmed: %s \n", 51 | /// In this case the PublishingId is the one provided by the user 52 | messageStatus.GetMessage().GetPublishingId(), 53 | messageStatus.GetMessage().GetData()[0]) 54 | } 55 | } 56 | } 57 | }(chConfirm, producer) 58 | 59 | // In case you need to know which is the last ID for the producer: GetLastPublishingId 60 | lastPublishingId, err := producer.GetLastPublishingId() 61 | CheckErr(err) 62 | fmt.Printf("lastPublishingId: %d\n", 63 | lastPublishingId, 64 | ) 65 | 66 | data := make(map[int]string) 67 | data[0] = "Piaggio" 68 | data[1] = "Ferrari" 69 | data[2] = "Ducati" 70 | data[3] = "Maserati" 71 | data[4] = "Fiat" 72 | data[5] = "Lamborghini" 73 | data[6] = "Bugatti" 74 | data[7] = "Alfa Romeo" 75 | data[8] = "Aprilia" 76 | data[9] = "Benelli" 77 | 78 | for i := range len(data) { 79 | msg := amqp.NewMessage([]byte(data[i])) 80 | msg.SetPublishingId(int64(i)) // mandatory to handle the deduplication 81 | err := producer.Send(msg) 82 | CheckErr(err) 83 | } 84 | 85 | fmt.Println("Press any key to stop ") 86 | _, _ = reader.ReadString('\n') 87 | err = producer.Close() 88 | CheckErr(err) 89 | err = env.DeleteStream(streamName) 90 | CheckErr(err) 91 | } 92 | -------------------------------------------------------------------------------- /examples/filtering/filtering.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "errors" 6 | "fmt" 7 | "os" 8 | "time" 9 | 10 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 11 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/message" 12 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 13 | ) 14 | 15 | func CheckErr(err error) { 16 | if err != nil { 17 | fmt.Printf("%s ", err) 18 | os.Exit(1) 19 | } 20 | } 21 | 22 | func handlePublishConfirm(confirms stream.ChannelPublishConfirm) { 23 | go func() { 24 | for confirmed := range confirms { 25 | for _, msg := range confirmed { 26 | if msg.IsConfirmed() { 27 | fmt.Printf("message %s stored \n ", msg.GetMessage().GetData()) 28 | } else { 29 | fmt.Printf("message %s failed \n ", msg.GetMessage().GetData()) 30 | } 31 | } 32 | } 33 | }() 34 | } 35 | 36 | func consumerClose(channelClose stream.ChannelClose) { 37 | event := <-channelClose 38 | fmt.Printf("Consumer: %s closed on the stream: %s, reason: %s \n", event.Name, event.StreamName, event.Reason) 39 | } 40 | 41 | func main() { 42 | reader := bufio.NewReader(os.Stdin) 43 | 44 | // You need RabbitMQ 3.13.0 or later to run this example 45 | fmt.Println("Filtering example.") 46 | fmt.Println("Connecting to RabbitMQ streaming ...") 47 | 48 | // Connect to the broker ( or brokers ) 49 | env, err := stream.NewEnvironment( 50 | stream.NewEnvironmentOptions(). 51 | SetHost("localhost"). 52 | SetPort(5552). 53 | SetUser("guest"). 54 | SetPassword("guest")) 55 | CheckErr(err) 56 | 57 | streamName := "FilteringExampleStream" 58 | err = env.DeleteStream(streamName) 59 | if err != nil && errors.Is(err, stream.StreamDoesNotExist) { 60 | // we can ignore the error if the stream does not exist 61 | // it will be created later 62 | fmt.Println("Stream does not exist. ") 63 | } else { 64 | CheckErr(err) 65 | } 66 | 67 | err = env.DeclareStream(streamName, 68 | &stream.StreamOptions{ 69 | MaxLengthBytes: stream.ByteCapacity{}.GB(2), 70 | }, 71 | ) 72 | 73 | CheckErr(err) 74 | 75 | producer, err := env.NewProducer(streamName, 76 | stream.NewProducerOptions().SetFilter( 77 | // Here we enable the filter 78 | // for each message we set the filter key. 79 | // the filter result is a string 80 | stream.NewProducerFilter(func(message message.StreamMessage) string { 81 | return fmt.Sprintf("%s", message.GetApplicationProperties()["state"]) 82 | }))) 83 | CheckErr(err) 84 | 85 | chPublishConfirm := producer.NotifyPublishConfirmation() 86 | handlePublishConfirm(chPublishConfirm) 87 | 88 | // Send messages with the state property == New York 89 | send(producer, "New York") 90 | // Here we wait a bit to be sure that the messages are stored in the same chunk 91 | // and we can filter them 92 | // This is only for the example, in a real case you don't need to wait 93 | time.Sleep(2 * time.Second) 94 | 95 | // Send messages with the state property == Alabama 96 | send(producer, "Alabama") 97 | 98 | // Here we wait a bit to be sure that the messages are stored in another chunk 99 | // only for testing the filter 100 | time.Sleep(1 * time.Second) 101 | 102 | err = producer.Close() 103 | CheckErr(err) 104 | 105 | // post filter is applied client side after the server side filter 106 | // the server side filter is applied on the broker side 107 | // In real scenarios, the chunk could contain messages that do not match the filter 108 | // that's why we need to apply the filter client side 109 | // NOTE: This code _must_ be simple and fast. Don't introduce complex logic here with possible bugs 110 | // Post filter is mandatory as function even you can return always true 111 | postFilter := func(message *amqp.Message) bool { 112 | // you can use any amqp.Message field to filter 113 | // be sure the field is set ann valid before sending the message 114 | return message.ApplicationProperties["state"] == "New York" 115 | } 116 | 117 | // Here we create a consumer with a filter 118 | // the filter is applied server side 119 | // with "New York" as a filter 120 | filter := stream.NewConsumerFilter([]string{"New York"}, true, postFilter) 121 | 122 | handleMessages := func(consumerContext stream.ConsumerContext, message *amqp.Message) { 123 | // Here you should process received only messages that match the filter 124 | // "New York" messages should be received 125 | // "Alabama" messages should not be received 126 | fmt.Printf("consumer name: %s, data: %s, message offset %d, chunk entities count: %d \n ", 127 | consumerContext.Consumer.GetName(), message.Data, consumerContext.Consumer.GetOffset(), consumerContext.GetEntriesCount()) 128 | } 129 | 130 | consumer, err := env.NewConsumer( 131 | streamName, 132 | handleMessages, 133 | stream.NewConsumerOptions(). 134 | SetOffset(stream.OffsetSpecification{}.First()). // start consuming from the beginning 135 | SetFilter(filter)) // set the filter 136 | CheckErr(err) 137 | channelClose := consumer.NotifyClose() 138 | defer consumerClose(channelClose) 139 | 140 | fmt.Println("Press any key to stop ") 141 | _, _ = reader.ReadString('\n') 142 | err = consumer.Close() 143 | time.Sleep(200 * time.Millisecond) 144 | CheckErr(err) 145 | err = env.DeleteStream(streamName) 146 | CheckErr(err) 147 | err = env.Close() 148 | CheckErr(err) 149 | } 150 | 151 | func send(producer *stream.Producer, state string) { 152 | for i := range 100 { 153 | msg := amqp.NewMessage([]byte(fmt.Sprintf("message %d, state %s", i, state))) 154 | msg.ApplicationProperties = map[string]any{"state": state} 155 | err := producer.Send(msg) 156 | CheckErr(err) 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /examples/getting_started/getting_started.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "strconv" 8 | "time" 9 | 10 | "github.com/google/uuid" 11 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 12 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 13 | ) 14 | 15 | func CheckErr(err error) { 16 | if err != nil { 17 | fmt.Printf("%s ", err) 18 | os.Exit(1) 19 | } 20 | } 21 | 22 | func handlePublishConfirm(confirms stream.ChannelPublishConfirm) { 23 | go func() { 24 | for confirmed := range confirms { 25 | for _, msg := range confirmed { 26 | if msg.IsConfirmed() { 27 | fmt.Printf("message %s stored \n ", msg.GetMessage().GetData()) 28 | } else { 29 | fmt.Printf("message %s failed \n ", msg.GetMessage().GetData()) 30 | } 31 | } 32 | } 33 | }() 34 | } 35 | 36 | func consumerClose(channelClose stream.ChannelClose) { 37 | event := <-channelClose 38 | fmt.Printf("Consumer: %s closed on the stream: %s, reason: %s \n", event.Name, event.StreamName, event.Reason) 39 | } 40 | 41 | func main() { 42 | reader := bufio.NewReader(os.Stdin) 43 | // Set log level, not mandatory by default is INFO 44 | // you cn set DEBUG for more information 45 | // stream.SetLevelInfo(logs.DEBUG) 46 | 47 | fmt.Println("Getting started with Streaming client for RabbitMQ") 48 | fmt.Println("Connecting to RabbitMQ streaming ...") 49 | 50 | // Connect to the broker ( or brokers ) 51 | env, err := stream.NewEnvironment( 52 | stream.NewEnvironmentOptions(). 53 | SetHost("localhost"). 54 | SetPort(5552). 55 | SetUser("guest"). 56 | SetPassword("guest")) 57 | CheckErr(err) 58 | // Create a stream, you can create streams without any option like: 59 | // err = env.DeclareStream(streamName, nil) 60 | // it is a best practise to define a size, 1GB for example: 61 | 62 | streamName := uuid.New().String() 63 | err = env.DeclareStream(streamName, 64 | &stream.StreamOptions{ 65 | MaxLengthBytes: stream.ByteCapacity{}.GB(2), 66 | }, 67 | ) 68 | 69 | CheckErr(err) 70 | 71 | // Get a new producer for a stream 72 | producer, err := env.NewProducer(streamName, nil) 73 | CheckErr(err) 74 | 75 | // optional publish confirmation channel 76 | chPublishConfirm := producer.NotifyPublishConfirmation() 77 | handlePublishConfirm(chPublishConfirm) 78 | 79 | // the send method automatically aggregates the messages 80 | // based on batch size 81 | for i := range 10000 { 82 | err := producer.Send(amqp.NewMessage([]byte("hello_world_" + strconv.Itoa(i)))) 83 | CheckErr(err) 84 | } 85 | 86 | // this sleep is not mandatory, just to show the confirmed messages 87 | time.Sleep(1 * time.Second) 88 | err = producer.Close() 89 | CheckErr(err) 90 | 91 | // Define a consumer per stream, there are different offset options to define a consumer, default is 92 | // env.NewConsumer(streamName, func(Context streaming.ConsumerContext, message *amqp.message) { 93 | // 94 | // }, nil) 95 | // if you need to track the offset you need a consumer name like: 96 | handleMessages := func(consumerContext stream.ConsumerContext, message *amqp.Message) { 97 | fmt.Printf("consumer name: %s, data: %s, message offset %d, chunk entities count: %d \n ", 98 | consumerContext.Consumer.GetName(), message.Data, consumerContext.Consumer.GetOffset(), consumerContext.GetEntriesCount()) 99 | } 100 | 101 | consumer, err := env.NewConsumer( 102 | streamName, 103 | handleMessages, 104 | stream.NewConsumerOptions(). 105 | SetClientProvidedName("my_consumer"). // connection name 106 | SetConsumerName("my_consumer"). // set a consumer name 107 | SetOffset(stream.OffsetSpecification{}.First()). // start consuming from the beginning 108 | SetCRCCheck(false)) // Disable crc control, increase the performances 109 | CheckErr(err) 110 | channelClose := consumer.NotifyClose() 111 | // channelClose receives all the closing events, here you can handle the 112 | // client reconnection or just log 113 | go func() { 114 | consumerClose(channelClose) 115 | }() 116 | 117 | fmt.Println("Press any key to stop ") 118 | _, _ = reader.ReadString('\n') 119 | err = consumer.Close() 120 | time.Sleep(200 * time.Millisecond) 121 | CheckErr(err) 122 | err = env.DeleteStream(streamName) 123 | CheckErr(err) 124 | err = env.Close() 125 | CheckErr(err) 126 | } 127 | -------------------------------------------------------------------------------- /examples/offsetStart/offset.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "strconv" 8 | "sync/atomic" 9 | "time" 10 | 11 | "github.com/google/uuid" 12 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 13 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 14 | ) 15 | 16 | func CheckErr(err error) { 17 | if err != nil { 18 | fmt.Printf("%s ", err) 19 | os.Exit(1) 20 | } 21 | } 22 | 23 | func main() { 24 | reader := bufio.NewReader(os.Stdin) 25 | 26 | fmt.Println("Start Offset example") 27 | fmt.Println("Connecting to RabbitMQ streaming ...") 28 | 29 | env, err := stream.NewEnvironment( 30 | stream.NewEnvironmentOptions(). 31 | SetHost("localhost"). 32 | SetPort(5552). 33 | SetUser("guest"). 34 | SetPassword("guest"). 35 | SetMaxConsumersPerClient(1)) 36 | CheckErr(err) 37 | streamName := uuid.New().String() 38 | err = env.DeclareStream(streamName, 39 | &stream.StreamOptions{ 40 | MaxLengthBytes: stream.ByteCapacity{}.GB(2), 41 | }, 42 | ) 43 | 44 | CheckErr(err) 45 | 46 | producer, err := env.NewProducer(streamName, nil) 47 | CheckErr(err) 48 | 49 | go func() { 50 | for i := range 200 { 51 | err := producer.Send(amqp.NewMessage([]byte("hello_world_" + strconv.Itoa(i)))) 52 | CheckErr(err) 53 | } 54 | }() 55 | 56 | var counter int32 57 | handleMessages := func(_ stream.ConsumerContext, _ *amqp.Message) { 58 | fmt.Printf("messages consumed: %d \n ", atomic.AddInt32(&counter, 1)) 59 | } 60 | 61 | consumerOffsetNumber, err := env.NewConsumer(streamName, 62 | handleMessages, 63 | stream.NewConsumerOptions(). 64 | SetConsumerName("my_consumer"). // set a consumerOffsetNumber name 65 | SetOffset(stream.OffsetSpecification{}.Offset(100))) 66 | // start specific offset, in this case we start from the 100 so it will consume 100 messages 67 | // see the others stream.OffsetSpecification{}.XXX 68 | CheckErr(err) 69 | 70 | /// wait a bit just for demo and reset the counters 71 | time.Sleep(2 * time.Second) 72 | atomic.StoreInt32(&counter, 0) 73 | 74 | consumerNext, err := env.NewConsumer(streamName, 75 | handleMessages, 76 | stream.NewConsumerOptions(). 77 | SetConsumerName("my_consumer_1"). // set a consumerOffsetNumber name 78 | SetOffset(stream.OffsetSpecification{}.First())) // with first() the the stream is loaded from the beginning 79 | CheckErr(err) 80 | 81 | fmt.Println("Press any key to stop ") 82 | _, _ = reader.ReadString('\n') 83 | err = producer.Close() 84 | CheckErr(err) 85 | err = consumerOffsetNumber.Close() 86 | CheckErr(err) 87 | err = consumerNext.Close() 88 | CheckErr(err) 89 | err = env.DeleteStream(streamName) 90 | CheckErr(err) 91 | } 92 | -------------------------------------------------------------------------------- /examples/offsetTracking/offsetTracking.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "strconv" 8 | "sync/atomic" 9 | "time" 10 | 11 | "github.com/google/uuid" 12 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 13 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 14 | ) 15 | 16 | func CheckErr(err error) { 17 | if err != nil { 18 | fmt.Printf("%s ", err) 19 | os.Exit(1) 20 | } 21 | } 22 | 23 | func main() { 24 | reader := bufio.NewReader(os.Stdin) 25 | 26 | fmt.Println("Tracking offset example") 27 | fmt.Println("Connecting to RabbitMQ streaming ...") 28 | 29 | env, err := stream.NewEnvironment( 30 | stream.NewEnvironmentOptions(). 31 | SetHost("localhost"). 32 | SetPort(5552). 33 | SetUser("guest"). 34 | SetPassword("guest")) 35 | CheckErr(err) 36 | 37 | streamName := uuid.New().String() 38 | err = env.DeclareStream(streamName, 39 | &stream.StreamOptions{ 40 | MaxLengthBytes: stream.ByteCapacity{}.MB(500), 41 | }, 42 | ) 43 | CheckErr(err) 44 | 45 | producer, err := env.NewProducer(streamName, nil) 46 | CheckErr(err) 47 | 48 | go func() { 49 | for i := range 2000 { 50 | err := producer.Send(amqp.NewMessage([]byte("hello_world_" + strconv.Itoa(i)))) 51 | CheckErr(err) 52 | time.Sleep(100 * time.Millisecond) 53 | } 54 | }() 55 | 56 | var count int32 57 | 58 | handleMessages := func(consumerContext stream.ConsumerContext, _ *amqp.Message) { 59 | if atomic.AddInt32(&count, 1)%1000 == 0 { 60 | fmt.Printf("cousumed %d messages \n", atomic.LoadInt32(&count)) 61 | // AVOID to store for each single message, it will reduce the performances 62 | // The server keeps the consume tracking using the consumer name 63 | err := consumerContext.Consumer.StoreOffset() 64 | if err != nil { 65 | CheckErr(err) 66 | } 67 | } 68 | } 69 | 70 | consumer, err := env.NewConsumer( 71 | streamName, 72 | handleMessages, 73 | stream.NewConsumerOptions(). 74 | SetManualCommit(). // disable auto commit 75 | SetConsumerName("my_consumer"). // set a consumer name 76 | SetOffset(stream.OffsetSpecification{}.First())) // start consuming from the beginning 77 | CheckErr(err) 78 | 79 | fmt.Println("Press any key to stop ") 80 | _, _ = reader.ReadString('\n') 81 | err = producer.Close() 82 | CheckErr(err) 83 | err = consumer.Close() 84 | CheckErr(err) 85 | err = env.DeleteStream(streamName) 86 | CheckErr(err) 87 | } 88 | -------------------------------------------------------------------------------- /examples/proxy/proxy.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "crypto/tls" 6 | "fmt" 7 | "os" 8 | "strconv" 9 | 10 | "github.com/google/uuid" 11 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 12 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 13 | ) 14 | 15 | func CheckErr(err error) { 16 | if err != nil { 17 | fmt.Printf("%s ", err) 18 | os.Exit(1) 19 | } 20 | } 21 | 22 | func main() { 23 | reader := bufio.NewReader(os.Stdin) 24 | 25 | // stream.SetLevelInfo(logs.DEBUG) 26 | fmt.Println("Configure a load-balancer TLS for RabbitMQ") 27 | fmt.Println("Connecting to RabbitMQ streaming ...") 28 | 29 | // load balancer address in TLS 30 | addressResolver := stream.AddressResolver{ 31 | Host: "localhost", 32 | Port: 5554, 33 | } 34 | //nolint:gosec 35 | conf := &tls.Config{InsecureSkipVerify: true} 36 | 37 | env, err := stream.NewEnvironment( 38 | stream.NewEnvironmentOptions(). 39 | SetHost(addressResolver.Host). 40 | SetPort(addressResolver.Port). 41 | IsTLS(true). 42 | SetTLSConfig(conf). 43 | SetAddressResolver(addressResolver). 44 | SetMaxProducersPerClient(5)) 45 | 46 | CheckErr(err) 47 | 48 | /// We create a few streams, in order to distribute the streams across the cluster 49 | streamsName := make([]string, 0, 3) 50 | for range 3 { 51 | streamsName = append(streamsName, uuid.New().String()) 52 | } 53 | 54 | for _, streamName := range streamsName { 55 | fmt.Printf("Create stream %s ...\n", streamName) 56 | err = env.DeclareStream(streamName, 57 | &stream.StreamOptions{ 58 | MaxLengthBytes: stream.ByteCapacity{}.GB(2), 59 | }, 60 | ) 61 | } 62 | 63 | CheckErr(err) 64 | producers := make([]*stream.Producer, 0, len(streamsName)) 65 | // The producer MUST connect to the leader stream 66 | // here the AddressResolver try to get the leader 67 | // if fails retry 68 | for _, streamName := range streamsName { 69 | fmt.Printf("Create producer for %s ...\n", streamName) 70 | producer, err := env.NewProducer(streamName, nil) 71 | producers = append(producers, producer) 72 | CheckErr(err) 73 | } 74 | 75 | // just publish some message 76 | for i := 0; i < 50; i++ { 77 | for _, producer := range producers { 78 | err := producer.Send(amqp.NewMessage([]byte("hello_world_" + strconv.Itoa(i)))) 79 | CheckErr(err) 80 | } 81 | } 82 | 83 | handleMessages := func(consumerContext stream.ConsumerContext, message *amqp.Message) { 84 | fmt.Printf("consumer name: %s, text: %s \n ", consumerContext.Consumer.GetName(), message.Data) 85 | } 86 | 87 | // the consumer can connect to the leader o follower 88 | // the AddressResolver just resolve the ip 89 | for _, streamName := range streamsName { 90 | fmt.Printf("Create consumer for %s ...\n", streamName) 91 | _, err := env.NewConsumer( 92 | streamName, 93 | handleMessages, 94 | stream.NewConsumerOptions(). 95 | SetConsumerName(uuid.New().String()). // set a random name 96 | SetOffset(stream.OffsetSpecification{}.First())) // start consuming from the beginning 97 | CheckErr(err) 98 | } 99 | 100 | /// check on the UI http://localhost:15673/#/stream/connections 101 | // the producers are connected to the leader node 102 | /// the consumers random nodes it doesn't matter 103 | 104 | fmt.Println("Press any key to stop ") 105 | _, _ = reader.ReadString('\n') 106 | for _, streamName := range streamsName { 107 | fmt.Printf("Delete stream %s ...\n", streamName) 108 | err = env.DeleteStream(streamName) 109 | } 110 | CheckErr(err) 111 | err = env.Close() 112 | CheckErr(err) 113 | } 114 | -------------------------------------------------------------------------------- /examples/reliable/README.md: -------------------------------------------------------------------------------- 1 | ### Reliable Producer/Consumer example 2 | 3 | This example demonstrates how to use reliable producers and consumers to send and receive messages. 4 | The `ReliableProducer` and `ReliableConsumer` are in the `ha` package and use the disconnection event to reconnect to the broker. 5 | You can write your own `ReliableProducer` and `ReliableConsumer` by using the `Close` channel. 6 | 7 | The `ReliableProducer` blocks the sending of messages when the broker is disconnected and resumes sending when the broker is reconnected. 8 | 9 | In this example, we use `unConfirmedMessages` to re-send the messages that were not confirmed by the broker, for instance, in case of a disconnection. 10 | Then, the `unConfirmedMessages` are sent to the broker again. 11 | Note: 12 | - The `unConfirmedMessages` are not persisted, so if the application is restarted, the `unConfirmedMessages` will be lost. 13 | - The `unConfirmedMessages` order is not guaranteed 14 | - The `unConfirmedMessages` can grow indefinitely if the broker is unavailable for a long time. 15 | - The `re-send` in an option that can be enabled by setting `enableResend` to `true`. 16 | 17 | The example enables golang `pprof` you can check the url: localhost:6060/debug/pprof/.
18 | The scope is to check the resources used by the application in case of reconnection. 19 | 20 | 21 | The `reliable_common.go/retry` function does different checks because during the restart broker can happen different events, please check: 22 | - [this presentation](https://docs.google.com/presentation/d/111PccBLRGb-RNpYEKeIm2MQvdky-fXrQ/edit?usp=sharing&ouid=106772747306273309885&rtpof=true&sd=true) for more details. 23 | - [the code](../../pkg/ha/reliable_common.go) for the implementation details. 24 | 25 | -------------------------------------------------------------------------------- /examples/reliable_getting_started/reliable_getting_started.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | 7 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 8 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/ha" 9 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 10 | ) 11 | 12 | func main() { 13 | fmt.Printf("Getting started with Streaming client for RabbitMQ\n") 14 | 15 | // Create the environment. You can set the log level to DEBUG for more information 16 | // stream.SetLevelInfo(logs.DEBUG) 17 | // the environment is the connection to the broker(s) 18 | env, err := stream.NewEnvironment(stream.NewEnvironmentOptions(). 19 | SetHost("localhost"). 20 | SetPort(5552)) 21 | if err != nil { 22 | fmt.Printf("Error creating environment: %v\n", err) 23 | return 24 | } 25 | 26 | // Create a stream 27 | streamName := "my-stream" 28 | // It is highly recommended to define the stream retention policy 29 | err = env.DeclareStream(streamName, stream.NewStreamOptions(). 30 | SetMaxLengthBytes(stream.ByteCapacity{}.GB(2))) 31 | 32 | // ignore the error if the stream already exists 33 | if err != nil && !errors.Is(err, stream.StreamAlreadyExists) { 34 | fmt.Printf("Error declaring stream: %v\n", err) 35 | return 36 | } 37 | 38 | // declare the reliable consumer using the package ha 39 | consumer, err := ha.NewReliableConsumer(env, streamName, 40 | // start from the beginning of the stream 41 | stream.NewConsumerOptions(). 42 | SetOffset(stream.OffsetSpecification{}.First()), 43 | // handler where the messages will be processed 44 | func(_ stream.ConsumerContext, message *amqp.Message) { 45 | fmt.Printf("Message received: %s\n", message.GetData()) 46 | }) 47 | 48 | if err != nil { 49 | fmt.Printf("Error creating consumer: %v\n", err) 50 | return 51 | } 52 | 53 | // Create the reliable producer using the package ha 54 | producer, err := ha.NewReliableProducer(env, streamName, 55 | // we leave the default options 56 | stream.NewProducerOptions(), 57 | // handler for the confirmation of the messages 58 | func(messageConfirm []*stream.ConfirmationStatus) { 59 | for _, msg := range messageConfirm { 60 | if msg.IsConfirmed() { 61 | fmt.Printf("message %s confirmed \n", msg.GetMessage().GetData()) 62 | } else { 63 | fmt.Printf("message %s failed \n", msg.GetMessage().GetData()) 64 | } 65 | } 66 | }) 67 | 68 | if err != nil { 69 | fmt.Printf("Error creating producer: %v\n", err) 70 | return 71 | } 72 | 73 | // Send a message 74 | for i := range 10 { 75 | err = producer.Send(amqp.NewMessage([]byte(fmt.Sprintf("Hello stream:%d", i)))) 76 | if err != nil { 77 | fmt.Printf("Error sending message: %v\n", err) 78 | return 79 | } 80 | } 81 | 82 | // press any key to exit 83 | fmt.Printf("Press any close the producer, consumer and environment\n") 84 | _, _ = fmt.Scanln() 85 | 86 | //// Close the producer 87 | err = producer.Close() 88 | if err != nil { 89 | fmt.Printf("Error closing producer: %v\n", err) 90 | } 91 | 92 | // Close the consumer 93 | err = consumer.Close() 94 | if err != nil { 95 | fmt.Printf("Error closing consumer: %v\n", err) 96 | } 97 | 98 | err = env.DeleteStream(streamName) 99 | if err != nil { 100 | fmt.Printf("Error deleting stream: %v\n", err) 101 | } 102 | 103 | // Close the environment 104 | err = env.Close() 105 | if err != nil { 106 | fmt.Printf("Error closing environment: %s\n", err) 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /examples/single_active_consumer/README.md: -------------------------------------------------------------------------------- 1 | ## Single Active Consumer Example 2 | 3 | This example demonstrates how to use the `Single Active Consumer` pattern to ensure that only one consumer processes messages from a stream at a time. 4 | 5 | ### Run the example 6 | 7 | 8 | 1. Start the producer: 9 | ```bash 10 | go run producer.go 11 | ``` 12 | The producer will start sending messages to the stream. It is voluntary slow to make the example easy. 13 | You should see the following output: 14 | 15 | ```bash 16 | go run producer/producer.go 17 | Producer for Single Active Consumer example 18 | Connecting to RabbitMQ streaming ... 19 | [08:16:20] sending message hello_world_0 ... 20 | [08:16:20] message [hello_world_0] stored 21 | [08:16:23] sending message hello_world_1 ... 22 | [08:16:23] message [hello_world_1] stored 23 | [08:16:26] sending message hello_world_2 ... 24 | [08:16:26] message [hello_world_2] stored 25 | [08:16:29] sending message hello_world_3 ... 26 | [08:16:29] message [hello_world_3] stored 27 | [08:16:32] sending message hello_world_4 ... 28 | [08:16:32] message [hello_world_4] stored 29 | [08:16:35] sending message hello_world_5 ... 30 | [08:16:35] message [hello_world_5] stored 31 | [08:16:38] sending message hello_world_6 ... 32 | [08:16:38] message [hello_world_6] stored 33 | ``` 34 | 2. Start the consumer: 35 | In a new terminal, run the consumer: 36 | ```bash 37 | go run single_active_consumer.go myFirstConsumer 38 | ``` 39 | 40 | You should see the following output: 41 | 42 | ```bash 43 | Single Active Consumer example. 44 | Connecting to RabbitMQ streaming ... 45 | Press any key to stop 46 | Single Active Consumer example. 47 | Connecting to RabbitMQ streaming ... 48 | Press any key to stop 49 | [08:16:32] - Consumer promoted. Active status: true 50 | [08:16:32] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_0], message offset 0, 51 | [08:16:32] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_1], message offset 1, 52 | [08:16:32] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_2], message offset 2, 53 | [08:16:32] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_3], message offset 3, 54 | [08:16:32] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_4], message offset 4, 55 | [08:16:35] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_5], message offset 6, 56 | [08:16:38] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_6], message offset 8, 57 | [08:16:41] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_7], message offset 10, 58 | [08:16:44] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_8], message offset 12, 59 | [08:16:47] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_9], message offset 14, 60 | [08:16:50] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_10], message offset 16, 61 | [08:16:53] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_11], message offset 18, 62 | [08:16:56] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_12], message offset 20, 63 | [08:16:59] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_13], message offset 22, 64 | [08:17:02] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_14], message offset 24, 65 | [08:17:05] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_15], message offset 26, 66 | [08:17:08] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_16], message offset 28, 67 | [08:17:11] - [ myFirstConsumer ] - consumer name: MyApplication, data: [hello_world_17], message offset 30, 68 | ``` 69 | 70 | 3. Start a second consumer: 71 | In a new terminal, run the consumer: 72 | ```bash 73 | go run single_active_consumer.go mySecondConsumer 74 | ``` 75 | 76 | You should see the following output: 77 | 78 | ```bash 79 | Single Active Consumer example. 80 | Connecting to RabbitMQ streaming ... 81 | Press any key to stop 82 | ``` 83 | 84 | 4. Stop the first consumer: 85 | 86 | In the first terminal, press any key to stop the first consumer.
87 | The second consumer should be promoted to active status and restart processing messages from the last stored offset. 88 | 89 | You should see: 90 | ```bash 91 | [08:17:11] - Consumer promoted. Active status: true 92 | [08:17:14] - [ mySecondConsumer ] - consumer name: MyApplication, data: [hello_world_18], message offset 32, 93 | [08:17:17] - [ mySecondConsumer ] - consumer name: MyApplication, data: [hello_world_19], message offset 34, 94 | [08:17:20] - [ mySecondConsumer ] - consumer name: MyApplication, data: [hello_world_20], message offset 36, 95 | [08:17:23] - [ mySecondConsumer ] - consumer name: MyApplication, data: [hello_world_21], message offset 38, 96 | [08:17:26] - [ mySecondConsumer ] - consumer name: MyApplication, data: [hello_world_22], message offset 40, 97 | [08:17:29] - [ mySecondConsumer ] - consumer name: MyApplication, data: [hello_world_23], message offset 42, 98 | [08:17:32] - [ mySecondConsumer ] - consumer name: MyApplication, data: [hello_world_24], message offset 44, 99 | [08:17:35] - [ mySecondConsumer ] - consumer name: MyApplication, data: [hello_world_25], message offset 46, 100 | [08:17:38] - [ mySecondConsumer ] - consumer name: MyApplication, data: [hello_world_26], message offset 48, 101 | [08:17:41] - [ mySecondConsumer ] - consumer name: MyApplication, data: [hello_world_27], message offset 50, 102 | [08:17:44] - [ mySecondConsumer ] - consumer name: MyApplication, data: [hello_world_28], message offset 52, 103 | [08:17:47] - [ mySecondConsumer ] - consumer name: MyApplication, data: [hello_world_29], message offset 54, 104 | [08:17:50] - [ mySecondConsumer ] - consumer name: MyApplication, data: [hello_world_30], message offset 56, 105 | ``` 106 | 107 | -------------------------------------------------------------------------------- /examples/single_active_consumer/producer/producer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "os" 7 | "time" 8 | 9 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 10 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 11 | ) 12 | 13 | func CheckErr(err error) { 14 | if err != nil { 15 | fmt.Printf("%s ", err) 16 | os.Exit(1) 17 | } 18 | } 19 | 20 | func handlePublishConfirm(confirms stream.ChannelPublishConfirm) { 21 | go func() { 22 | for confirmed := range confirms { 23 | for _, msg := range confirmed { 24 | if msg.IsConfirmed() { 25 | fmt.Printf("[%s] message %s stored \n", time.Now().Format(time.TimeOnly), msg.GetMessage().GetData()) 26 | } else { 27 | fmt.Printf("[%s] message %s failed \n", time.Now().Format(time.TimeOnly), msg.GetMessage().GetData()) 28 | } 29 | } 30 | } 31 | }() 32 | } 33 | 34 | func main() { 35 | fmt.Println("Producer for Single Active Consumer example") 36 | fmt.Println("Connecting to RabbitMQ streaming ...") 37 | 38 | // Connect to the broker ( or brokers ) 39 | env, err := stream.NewEnvironment( 40 | stream.NewEnvironmentOptions(). 41 | SetHost("localhost"). 42 | SetPort(5552). 43 | SetUser("guest"). 44 | SetPassword("guest")) 45 | CheckErr(err) 46 | streamName := "SingleActiveConsumer" 47 | err = env.DeleteStream(streamName) 48 | if err != nil && errors.Is(err, stream.StreamDoesNotExist) { 49 | // we can ignore the error if the stream does not exist 50 | // it will be created later 51 | fmt.Println("Stream does not exist. ") 52 | } else { 53 | CheckErr(err) 54 | } 55 | 56 | err = env.DeclareStream(streamName, 57 | &stream.StreamOptions{ 58 | MaxLengthBytes: stream.ByteCapacity{}.GB(2), 59 | }, 60 | ) 61 | CheckErr(err) 62 | 63 | // Get a new producer for a stream 64 | producer, err := env.NewProducer(streamName, nil) 65 | CheckErr(err) 66 | 67 | // optional publish confirmation channel 68 | chPublishConfirm := producer.NotifyPublishConfirmation() 69 | handlePublishConfirm(chPublishConfirm) 70 | 71 | // Put some sleep to make the example easy 72 | for i := range 10000 { 73 | var body = fmt.Sprintf("hello_world_%d", i) 74 | fmt.Printf("[%s] sending message %s ...\n", time.Now().Format(time.TimeOnly), body) 75 | err := producer.Send(amqp.NewMessage([]byte(body))) 76 | CheckErr(err) 77 | time.Sleep(3 * time.Second) 78 | } 79 | 80 | err = producer.Close() 81 | CheckErr(err) 82 | 83 | err = env.Close() 84 | CheckErr(err) 85 | } 86 | -------------------------------------------------------------------------------- /examples/single_active_consumer/single_active_consumer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "time" 8 | 9 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 10 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 11 | ) 12 | 13 | func CheckErrConsumer(err error) { 14 | if err != nil { 15 | fmt.Printf("%s ", err) 16 | os.Exit(1) 17 | } 18 | } 19 | func main() { 20 | if len(os.Args) != 2 { 21 | fmt.Printf("You need to specify the Name\n") 22 | os.Exit(1) 23 | } 24 | appName := os.Args[1] 25 | reader := bufio.NewReader(os.Stdin) 26 | 27 | // You need RabbitMQ 3.11.0 or later to run this example 28 | fmt.Println("Single Active Consumer example.") 29 | fmt.Println("Connecting to RabbitMQ streaming ...") 30 | 31 | // Connect to the broker ( or brokers ) 32 | env, err := stream.NewEnvironment( 33 | stream.NewEnvironmentOptions(). 34 | SetHost("localhost"). 35 | SetPort(5552). 36 | SetUser("guest"). 37 | SetPassword("guest")) 38 | CheckErrConsumer(err) 39 | 40 | streamName := "SingleActiveConsumer" 41 | // you need to set the same name. 42 | // The name indicates the group of consumers 43 | // to make the single active consumer work 44 | consumerName := "MyApplication" 45 | 46 | handleMessages := func(consumerContext stream.ConsumerContext, message *amqp.Message) { 47 | fmt.Printf("[%s] - [ %s ] - consumer name: %s, data: %s, message offset %d, \n ", 48 | time.Now().Format(time.TimeOnly), 49 | appName, 50 | consumerContext.Consumer.GetName(), message.Data, consumerContext.Consumer.GetOffset()) 51 | // This is only for the example, in a real application you should not store the offset 52 | // for each message, it is better to store the offset for a batch of messages 53 | err := consumerContext.Consumer.StoreOffset() 54 | 55 | CheckErrConsumer(err) 56 | } 57 | 58 | consumerUpdate := func(streamName string, isActive bool) stream.OffsetSpecification { 59 | // This function is called when the consumer is promoted to active 60 | // be careful with the logic here, it is called in the consumer thread 61 | // the code here should be fast, non-blocking and without side effects 62 | fmt.Printf("[%s] - Consumer promoted for: %s. Active status: %t\n", time.Now().Format(time.TimeOnly), 63 | streamName, isActive) 64 | 65 | // In this example, we store the offset server side and we retrieve it 66 | // when the consumer is promoted to active 67 | offset, err := env.QueryOffset(consumerName, streamName) 68 | if err != nil { 69 | // If the offset is not found, we start from the beginning 70 | return stream.OffsetSpecification{}.First() 71 | } 72 | 73 | // If the offset is found, we start from the last offset 74 | // we add 1 to the offset to start from the next message 75 | return stream.OffsetSpecification{}.Offset(offset + 1) 76 | } 77 | 78 | consumer, err := env.NewConsumer( 79 | streamName, 80 | handleMessages, 81 | stream.NewConsumerOptions(). 82 | SetConsumerName(consumerName). 83 | // It is not needed to set the SetOffset() when the SingleActiveConsumer is active 84 | // the `consumerUpdate` function replaces it 85 | SetSingleActiveConsumer( 86 | stream.NewSingleActiveConsumer(consumerUpdate))) 87 | 88 | CheckErrConsumer(err) 89 | 90 | fmt.Println("Press any key to stop ") 91 | _, _ = reader.ReadString('\n') 92 | err = consumer.Close() 93 | CheckErrConsumer(err) 94 | fmt.Printf("[%s] Consumer stopped.... in 5 seconds the environment will be closed", time.Now().Format(time.TimeOnly)) 95 | time.Sleep(5 * time.Second) 96 | err = env.Close() 97 | CheckErrConsumer(err) 98 | } 99 | -------------------------------------------------------------------------------- /examples/sub-entries-batching/sub_entries_batching.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "strconv" 8 | "time" 9 | 10 | "github.com/google/uuid" 11 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 12 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 13 | ) 14 | 15 | func CheckErr(err error) { 16 | if err != nil { 17 | fmt.Printf("%s ", err) 18 | os.Exit(1) 19 | } 20 | } 21 | 22 | func handlePublishConfirm(confirms stream.ChannelPublishConfirm) { 23 | go func() { 24 | for confirmed := range confirms { 25 | for _, msg := range confirmed { 26 | if msg.IsConfirmed() { 27 | fmt.Printf("message %s stored \n ", msg.GetMessage().GetData()) 28 | } else { 29 | fmt.Printf("message %s failed \n ", msg.GetMessage().GetData()) 30 | } 31 | } 32 | } 33 | }() 34 | } 35 | 36 | func consumerClose(channelClose stream.ChannelClose) { 37 | event := <-channelClose 38 | fmt.Printf("Consumer: %s closed on the stream: %s, reason: %s \n", event.Name, event.StreamName, event.Reason) 39 | } 40 | 41 | func main() { 42 | reader := bufio.NewReader(os.Stdin) 43 | 44 | fmt.Println("Sub Entry batch example") 45 | fmt.Println("Connecting to RabbitMQ streaming ...") 46 | 47 | // Connect to the broker ( or brokers ) 48 | env, err := stream.NewEnvironment( 49 | stream.NewEnvironmentOptions(). 50 | SetHost("localhost"). 51 | SetPort(5552). 52 | SetUser("guest"). 53 | SetPassword("guest")) 54 | CheckErr(err) 55 | // Create a stream, you can create streams without any option like: 56 | // err = env.DeclareStream(streamName, nil) 57 | // it is a best practise to define a size, 1GB for example: 58 | 59 | streamName := uuid.New().String() 60 | err = env.DeclareStream(streamName, 61 | &stream.StreamOptions{ 62 | MaxLengthBytes: stream.ByteCapacity{}.GB(2), 63 | }, 64 | ) 65 | CheckErr(err) 66 | 67 | // Get a new producer for a stream 68 | // define the sub entry batch using producer options 69 | // ex: stream.NewProducerOptions().SetSubEntrySize(100) 70 | // set compression: SetCompression(stream.Compression{}.Gzip() 71 | // or SetCompression(stream.Compression{}.None() <<-- Default value 72 | producer, err := env.NewProducer(streamName, stream.NewProducerOptions(). 73 | SetSubEntrySize(100). 74 | SetCompression(stream.Compression{}.Gzip())) 75 | CheckErr(err) 76 | 77 | // optional publish confirmation channel 78 | chPublishConfirm := producer.NotifyPublishConfirmation() 79 | handlePublishConfirm(chPublishConfirm) 80 | 81 | // the send method automatically aggregates the messages 82 | // based on batch size 83 | for i := range 10000 { 84 | err := producer.Send(amqp.NewMessage([]byte("hello_world_" + strconv.Itoa(i)))) 85 | CheckErr(err) 86 | } 87 | 88 | // this sleep is not mandatory, just to show the confirmed messages 89 | time.Sleep(1 * time.Second) 90 | err = producer.Close() 91 | CheckErr(err) 92 | 93 | // Consumer side don't need to specify anything 94 | // 95 | handleMessages := func(consumerContext stream.ConsumerContext, message *amqp.Message) { 96 | fmt.Printf("consumer name: %s, text: %s \n ", consumerContext.Consumer.GetName(), message.Data) 97 | } 98 | 99 | consumer, err := env.NewConsumer( 100 | streamName, 101 | handleMessages, 102 | stream.NewConsumerOptions(). 103 | SetConsumerName("my_consumer"). // set a consumer name 104 | SetOffset(stream.OffsetSpecification{}.First())) // start consuming from the beginning 105 | CheckErr(err) 106 | channelClose := consumer.NotifyClose() 107 | // channelClose receives all the closing events, here you can handle the 108 | // client reconnection or just log 109 | defer consumerClose(channelClose) 110 | 111 | fmt.Println("Press any key to stop ") 112 | _, _ = reader.ReadString('\n') 113 | err = consumer.Close() 114 | time.Sleep(200 * time.Millisecond) 115 | CheckErr(err) 116 | err = env.DeleteStream(streamName) 117 | CheckErr(err) 118 | err = env.Close() 119 | CheckErr(err) 120 | } 121 | -------------------------------------------------------------------------------- /examples/super_stream/README.md: -------------------------------------------------------------------------------- 1 | ## Super Stream Example 2 | 3 | This example demonstrates how to use the [`Super Stream` feature](https://www.rabbitmq.com/blog/2022/07/13/rabbitmq-3-11-feature-preview-super-streams). 4 | 5 | 6 | ### Run the example 7 | 8 | 1. Start the producer: 9 | ```bash 10 | go run producer/producer.go 11 | ``` 12 | 13 | The producer will start sending messages to the stream. It is voluntary slow to make the example easy. 14 | 15 | You should see the following output: 16 | 17 | ```bash 18 | Super stream example - partitions 19 | Connecting to RabbitMQ streaming ... 20 | Message with key: key_0 stored in partition invoices-0, total: 1 21 | Message with key: key_1 stored in partition invoices-1, total: 2 22 | Message with key: key_2 stored in partition invoices-2, total: 3 23 | ``` 24 | 25 | 2. Start three consumers in three different terminals: 26 | 27 | and you should see the following output: 28 | ```bash 29 | [15:19:38] - [ partition: invoices-0] consumer name: MyApplication, data: [hello_super_stream_3], message offset 8398, 30 | [15:19:39] - [ partition: invoices-0] consumer name: MyApplication, data: [hello_super_stream_4], message offset 8400, 31 | [15:19:40] - [ partition: invoices-0] consumer name: MyApplication, data: [hello_super_stream_7], message offset 8402, 32 | [15:19:42] - [ partition: invoices-0] consumer name: MyApplication, data: [hello_super_stream_10], message offset 8404, 33 | [15:19:42] - [ partition: invoices-0] consumer name: MyApplication, data: [hello_super_stream_11], message offset 8406, 34 | ``` 35 | 36 | ```bash 37 | [15:19:29] - Consumer update for: invoices-1. The cosumer is now active ....Restarting from offset: offset, value: 8628 38 | [15:19:37] - [ partition: invoices-1] consumer name: MyApplication, data: [hello_super_stream_1], message offset 8638, 39 | [15:19:41] - [ partition: invoices-1] consumer name: MyApplication, data: [hello_super_stream_8], message offset 8640, 40 | [15:19:41] - [ partition: invoices-1] consumer name: MyApplication, data: [hello_super_stream_9], message offset 8642, 41 | [15:19:43] - [ partition: invoices-1] consumer name: MyApplication, data: [hello_super_stream_12], message offset 8644, 42 | ``` 43 | 44 | 45 | ```bash 46 | [15:19:38] - [ partition: invoices-2] consumer name: MyApplication, data: [hello_super_stream_2], message offset 8501, 47 | [15:19:39] - [ partition: invoices-2] consumer name: MyApplication, data: [hello_super_stream_5], message offset 8503, 48 | [15:19:40] - [ partition: invoices-2] consumer name: MyApplication, data: [hello_super_stream_6], message offset 8505, 49 | [15:19:43] - [ partition: invoices-2] consumer name: MyApplication, data: [hello_super_stream_13], message offset 8507, 50 | [15:19:44] - [ partition: invoices-2] consumer name: MyApplication, data: [hello_super_stream_15], message offset 8509, 51 | ``` 52 | 53 | 54 | Stop random consumers and see how the system rebalances the partitions. 55 | -------------------------------------------------------------------------------- /examples/super_stream/producer/super_stream_producer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "errors" 6 | "fmt" 7 | "math/rand" 8 | "os" 9 | "strings" 10 | "sync/atomic" 11 | "time" 12 | 13 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 14 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/message" 15 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 16 | ) 17 | 18 | func CheckErr(err error) { 19 | if err != nil { 20 | fmt.Printf("%s ", err) 21 | os.Exit(1) 22 | } 23 | } 24 | func main() { 25 | // Example for super stream with partitions 26 | fmt.Println("Super stream example - partitions") 27 | fmt.Println("Connecting to RabbitMQ streaming ...") 28 | 29 | // Set the log level to DEBUG. 30 | // Enable it only for debugging purposes or to 31 | // have more information about the client behavior 32 | // stream.SetLevelInfo(logs.DEBUG) 33 | 34 | // Connect to the broker ( or brokers ) 35 | env, err := stream.NewEnvironment( 36 | stream.NewEnvironmentOptions()) 37 | CheckErr(err) 38 | superStreamName := "invoices" 39 | 40 | // Create a super stream 41 | err = env.DeclareSuperStream(superStreamName, 42 | // the partitions strategy is mandatory 43 | // can be partition or by key ( see BindingsOptions) 44 | // In this case we create a super stream with 3 partitions 45 | stream.NewPartitionsOptions(3). 46 | SetMaxLengthBytes(stream.ByteCapacity{}.GB(3))) 47 | CheckErr(err) 48 | 49 | // Create a superStreamProducer 50 | superStreamProducer, err := env.NewSuperStreamProducer(superStreamName, 51 | stream.NewSuperStreamProducerOptions( 52 | stream.NewHashRoutingStrategy(func(message message.StreamMessage) string { 53 | // here the code _must_ be fast and safe 54 | // The code evaluation is before sending the message 55 | return message.GetMessageProperties().MessageID.(string) 56 | })).SetClientProvidedName("my-super-stream-producer")) 57 | CheckErr(err) 58 | 59 | // HandlePartitionClose it not mandatory, but it is a good practice to handle it 60 | go func(ch <-chan stream.PPartitionClose) { 61 | // Here we deal with the partition close event 62 | // in case the connection is dropped due of network issues or metadata update 63 | // we can reconnect using context 64 | for partitionCloseEvent := range ch { 65 | // important to check the event Reason. SocketClosed and MetaDataUpdate 66 | // are usually unexpected reasons 67 | if strings.EqualFold(partitionCloseEvent.Event.Reason, stream.SocketClosed) || strings.EqualFold(partitionCloseEvent.Event.Reason, stream.MetaDataUpdate) { 68 | // A random sleep is recommended to avoid to try too often. 69 | // avoid to reconnect in the same time in case there are multiple clients 70 | sleepValue := rand.Intn(5) + 2 71 | fmt.Printf("Partition %s closed unexpectedly! Reconnecting in %v seconds..\n", partitionCloseEvent.Partition, sleepValue) 72 | time.Sleep(time.Duration(sleepValue) * time.Second) 73 | err := partitionCloseEvent.Context.ConnectPartition(partitionCloseEvent.Partition) 74 | // tries only one time. Good for testing not enough for real use case 75 | CheckErr(err) 76 | fmt.Printf("Partition %s reconnected.\n", partitionCloseEvent.Partition) 77 | } 78 | } 79 | }(superStreamProducer.NotifyPartitionClose(1)) 80 | 81 | var confirmed int32 82 | var failed int32 83 | go func(ch <-chan stream.PartitionPublishConfirm) { 84 | for superStreamPublishConfirm := range ch { 85 | for _, confirm := range superStreamPublishConfirm.ConfirmationStatus { 86 | if confirm.IsConfirmed() { 87 | fmt.Printf("Message with key: %s stored in partition %s, total: %d\n", 88 | confirm.GetMessage().GetMessageProperties().MessageID, 89 | superStreamPublishConfirm.Partition, 90 | atomic.AddInt32(&confirmed, 1)) 91 | } else { 92 | // here you should store the message in another list and try again 93 | // like unConfirmed.append(msg...) messages ... 94 | // In this example we won't handle it to leave it simple 95 | // the messages can't be stored for different reasons ( see the ConfirmationStatus for more details) 96 | atomic.AddInt32(&failed, 1) 97 | fmt.Printf("Message failed to be stored in partition %s\n", superStreamPublishConfirm.Partition) 98 | } 99 | } 100 | } 101 | }(superStreamProducer.NotifyPublishConfirmation(1)) 102 | 103 | // Publish messages 104 | loop: 105 | for i := range 5_000 { 106 | msg := amqp.NewMessage([]byte(fmt.Sprintf("hello_super_stream_%d", i))) 107 | msg.Properties = &amqp.MessageProperties{ 108 | MessageID: fmt.Sprintf("key_%d", i), 109 | } 110 | err = superStreamProducer.Send(msg) 111 | switch { 112 | case errors.Is(err, stream.ErrProducerNotFound): 113 | atomic.AddInt32(&failed, 1) 114 | 115 | // that's can be a temp situation. 116 | // maybe the producer is in reconnection due of unexpected disconnection 117 | // it is up to the user to decide what to do. 118 | // In this can we can ignore the log and continue to send messages 119 | fmt.Printf("can't send the message ... the producer was not found") 120 | // here you should store the message in another list and try again 121 | // like unConfirmed.append(msg...) messages ... 122 | // In this example we won't handle it to leave it simple 123 | // like the superStreamPublishConfirm event for messages 124 | break loop 125 | case errors.Is(err, stream.ErrMessageRouteNotFound): 126 | atomic.AddInt32(&failed, 1) 127 | // the message can't be routed to a partition 128 | // this error can happen if the routing strategy can't find a partition 129 | // in this specific case the routing strategy is a hash routing strategy so won't happen 130 | // if the strategy is based on key routing strategy it can happen if the key is not found 131 | fmt.Printf("can't send the message ... the message route was not found") 132 | break loop 133 | default: 134 | CheckErr(err) 135 | } 136 | time.Sleep(500 * time.Millisecond) 137 | } 138 | 139 | reader := bufio.NewReader(os.Stdin) 140 | fmt.Println("Press enter to close the producer") 141 | _, _ = reader.ReadString('\n') 142 | err = superStreamProducer.Close() 143 | CheckErr(err) 144 | fmt.Printf("Producer closed. Total confirmed: %d, total failed: %d total messages: %d\n", 145 | confirmed, failed, confirmed+failed) 146 | 147 | fmt.Println("Press enter to exit") 148 | _, _ = reader.ReadString('\n') 149 | } 150 | -------------------------------------------------------------------------------- /examples/super_stream/super_stream_sac.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "math/rand" 7 | "os" 8 | "strings" 9 | "time" 10 | 11 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 12 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 13 | ) 14 | 15 | func CheckErr(err error) { 16 | if err != nil { 17 | fmt.Printf("%s ", err) 18 | os.Exit(1) 19 | } 20 | } 21 | func main() { 22 | // *** RUN the producer first ***** 23 | // Example for super stream with single active consumer 24 | // with EnableSingleActiveConsumer is possible to enable/disable the single active consumer feature 25 | const EnableSingleActiveConsumer = true 26 | 27 | // stream.SetLevelInfo(logs.DEBUG) 28 | appName := "MyApplication" 29 | 30 | fmt.Printf("Super stream consumer example - Single Active Consumer active: %t\n", EnableSingleActiveConsumer) 31 | fmt.Println("Connecting to RabbitMQ streaming ...") 32 | 33 | // Connect to the broker ( or brokers ) 34 | env, err := stream.NewEnvironment( 35 | stream.NewEnvironmentOptions()) 36 | CheckErr(err) 37 | superStreamName := "invoices" 38 | 39 | // Create a super stream 40 | err = env.DeclareSuperStream(superStreamName, 41 | // the partitions strategy is mandatory 42 | // can be partition or by key ( see BindingsOptions) 43 | // In this case we create a super stream with 3 partitions 44 | stream.NewPartitionsOptions(3). 45 | SetMaxLengthBytes(stream.ByteCapacity{}.GB(3))) 46 | CheckErr(err) 47 | 48 | handleMessages := func(consumerContext stream.ConsumerContext, message *amqp.Message) { 49 | fmt.Printf("[%s] - [ partition: %s] consumer name: %s, data: %s, message offset %d, \n ", 50 | time.Now().Format(time.TimeOnly), 51 | consumerContext.Consumer.GetStreamName(), 52 | consumerContext.Consumer.GetName(), message.Data, consumerContext.Consumer.GetOffset()) 53 | // This is only for the example, in a real application you should not store the offset 54 | // for each message, it is better to store the offset for a batch of messages 55 | err := consumerContext.Consumer.StoreOffset() 56 | CheckErr(err) 57 | } 58 | 59 | // Create a single active consumer struct 60 | // In this example, we set the offset to the last one stored 61 | sac := stream.NewSingleActiveConsumer( 62 | func(partition string, isActive bool) stream.OffsetSpecification { 63 | // This function is called when the consumer is promoted to active 64 | // or not active anymore 65 | restart := stream.OffsetSpecification{}.First() 66 | offset, err := env.QueryOffset(appName, partition) 67 | if err == nil { 68 | restart = stream.OffsetSpecification{}.Offset(offset + 1) 69 | } 70 | 71 | addInfo := fmt.Sprintf("The cosumer is now active ....Restarting from offset: %s", restart) 72 | if !isActive { 73 | addInfo = "The consumer is not active anymore for this partition." 74 | } 75 | 76 | fmt.Printf("[%s] - Consumer update for: %s. %s\n", time.Now().Format(time.TimeOnly), 77 | partition, addInfo) 78 | 79 | return restart 80 | }, 81 | ) 82 | 83 | // Create a superStreamConsumer 84 | superStreamConsumer, err := env.NewSuperStreamConsumer(superStreamName, handleMessages, 85 | stream.NewSuperStreamConsumerOptions(). 86 | SetSingleActiveConsumer(sac. 87 | // by default the single active consumer is enabled 88 | // This flag is used to enable/disable the single active consumer feature 89 | // In normal use cases, it is not needed to disable it 90 | SetEnabled(EnableSingleActiveConsumer)). 91 | 92 | // The consumer name is mandatory to enable the single active consumer 93 | // and _must_ be the same for all the consumers in the same group 94 | SetConsumerName(appName). 95 | SetOffset(stream.OffsetSpecification{}.First())) 96 | 97 | CheckErr(err) 98 | 99 | // HandlePartitionClose it not mandatory, but it is a good practice to handle it 100 | go func(ch <-chan stream.CPartitionClose) { 101 | // Here we deal with the partition close event 102 | // in case the connection is dropped due of network issues or metadata update 103 | // we can reconnect using context 104 | for partitionCloseEvent := range ch { 105 | // important to check the event Reason. SocketClosed and MetaDataUpdate 106 | // are usually unexpected reasons 107 | if strings.EqualFold(partitionCloseEvent.Event.Reason, stream.SocketClosed) || strings.EqualFold(partitionCloseEvent.Event.Reason, stream.MetaDataUpdate) { 108 | // A random sleep is recommended to avoid to try too often. 109 | // avoid to reconnect in the same time in case there are multiple clients 110 | sleepValue := rand.Intn(5) + 2 111 | fmt.Printf("Partition %s closed unexpectedly! Reconnecting in %v seconds..\n", partitionCloseEvent.Partition, sleepValue) 112 | time.Sleep(time.Duration(sleepValue) * time.Second) 113 | 114 | restart := stream.OffsetSpecification{}.First() 115 | offset, err := env.QueryOffset(appName, partitionCloseEvent.Partition) 116 | if err == nil { 117 | restart = stream.OffsetSpecification{}.Offset(offset + 1) 118 | } 119 | 120 | err = partitionCloseEvent.Context.ConnectPartition(partitionCloseEvent.Partition, restart) 121 | // tries only one time. Good for testing not enough for real use case 122 | CheckErr(err) 123 | fmt.Printf("Partition %s reconnected.\n", partitionCloseEvent.Partition) 124 | } 125 | } 126 | }(superStreamConsumer.NotifyPartitionClose(1)) 127 | 128 | reader := bufio.NewReader(os.Stdin) 129 | fmt.Println("Press enter to close the consumer") 130 | _, _ = reader.ReadString('\n') 131 | err = superStreamConsumer.Close() 132 | CheckErr(err) 133 | } 134 | -------------------------------------------------------------------------------- /examples/tail/Readme.md: -------------------------------------------------------------------------------- 1 | Stream Tail 2 | === 3 | 4 | Arguments: 5 | 6 | 1. Stream URI 7 | 2. Stream name 8 | 3. Start position (first, last, next) 9 | ``` 10 | "rabbitmq-stream://guest:guest@localhost:5552/" my_stream first 11 | ``` -------------------------------------------------------------------------------- /examples/tail/stream_tail.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "sync/atomic" 8 | 9 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 10 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 11 | ) 12 | 13 | func CheckErr(err error) { 14 | if err != nil { 15 | fmt.Printf("%s ", err) 16 | os.Exit(1) 17 | } 18 | } 19 | 20 | func main() { 21 | reader := bufio.NewReader(os.Stdin) 22 | serverUri := os.Args[1] 23 | streamName := os.Args[2] 24 | offsetStart := os.Args[3] 25 | offsetSpec := stream.OffsetSpecification{}.First() 26 | 27 | switch offsetStart { 28 | case "first": 29 | offsetSpec = stream.OffsetSpecification{}.First() 30 | case "last": 31 | offsetSpec = stream.OffsetSpecification{}.Last() 32 | case "next": 33 | offsetSpec = stream.OffsetSpecification{}.Next() 34 | } 35 | 36 | fmt.Printf("Stream Tail, serverUri: %s, streamName: %s, offsetStart: %s \n", 37 | serverUri, streamName, offsetStart) 38 | fmt.Println("Connecting to RabbitMQ streaming ...") 39 | 40 | env, err := stream.NewEnvironment( 41 | stream.NewEnvironmentOptions().SetUri(serverUri). 42 | SetMaxConsumersPerClient(1)) 43 | CheckErr(err) 44 | 45 | var counter int32 46 | handleMessages := func(_ stream.ConsumerContext, message *amqp.Message) { 47 | prop := message.Properties 48 | appProp := message.ApplicationProperties 49 | fmt.Printf("\n") 50 | fmt.Printf("message body: %s properties: %v, app properties: %s, consumed: %d \n ", 51 | message.GetData(), prop, appProp, atomic.AddInt32(&counter, 1)) 52 | } 53 | 54 | consumerTail, err := env.NewConsumer(streamName, 55 | handleMessages, 56 | stream.NewConsumerOptions(). 57 | SetOffset(offsetSpec)) 58 | CheckErr(err) 59 | 60 | fmt.Println("Press any key to stop ") 61 | _, _ = reader.ReadString('\n') 62 | CheckErr(err) 63 | err = consumerTail.Close() 64 | CheckErr(err) 65 | CheckErr(err) 66 | } 67 | -------------------------------------------------------------------------------- /examples/tls/getting_started_tls.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "crypto/tls" 6 | "fmt" 7 | "os" 8 | "strconv" 9 | "time" 10 | 11 | "github.com/google/uuid" 12 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 13 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/logs" 14 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 15 | ) 16 | 17 | func CheckErr(err error) { 18 | if err != nil { 19 | fmt.Printf("%s ", err) 20 | os.Exit(1) 21 | } 22 | } 23 | 24 | func handlePublishConfirm(confirms stream.ChannelPublishConfirm) { 25 | go func() { 26 | for confirmed := range confirms { 27 | for _, msg := range confirmed { 28 | if msg.IsConfirmed() { 29 | fmt.Printf("message %s stored \n ", msg.GetMessage().GetData()) 30 | } else { 31 | fmt.Printf("message %s failed \n ", msg.GetMessage().GetData()) 32 | } 33 | } 34 | } 35 | }() 36 | } 37 | 38 | func consumerClose(channelClose stream.ChannelClose) { 39 | event := <-channelClose 40 | fmt.Printf("Consumer: %s closed on the stream: %s, reason: %s \n", event.Name, event.StreamName, event.Reason) 41 | } 42 | 43 | func main() { 44 | reader := bufio.NewReader(os.Stdin) 45 | // Set log level, not mandatory by default is INFO 46 | stream.SetLevelInfo(logs.DEBUG) 47 | 48 | fmt.Println("Getting started with Streaming TLS client for RabbitMQ") 49 | fmt.Println("Connecting to RabbitMQ streaming ...") 50 | 51 | // Connect to the broker ( or brokers ) 52 | env, err := stream.NewEnvironment( 53 | stream.NewEnvironmentOptions(). 54 | SetHost("localhost"). 55 | SetPort(5551). // standard TLS port 56 | SetUser("guest"). 57 | SetPassword("guest"). 58 | IsTLS(true). 59 | // use tls.Config to customize the TLS configuration 60 | // for tests you may need InsecureSkipVerify: true 61 | //nolint:gosec 62 | SetTLSConfig(&tls.Config{InsecureSkipVerify: true}), 63 | ) 64 | /// TLS connection 65 | // it is also possible to configure the TLS connection using the URI 66 | // env, err := stream.NewEnvironment( 67 | // stream.NewEnvironmentOptions(). 68 | // SetUri("rabbitmq-stream+tls://guest:guest@localhost:5551/"). 69 | // SetTLSConfig(&tls.Config{}), 70 | //) 71 | 72 | CheckErr(err) 73 | // Create a stream, you can create streams without any option like: 74 | // err = env.DeclareStream(streamName, nil) 75 | // it is the best practise to define a size, 1GB for example: 76 | 77 | streamName := uuid.New().String() 78 | err = env.DeclareStream(streamName, 79 | &stream.StreamOptions{ 80 | MaxLengthBytes: stream.ByteCapacity{}.GB(2), 81 | }, 82 | ) 83 | 84 | CheckErr(err) 85 | 86 | // Get a new producer for a stream 87 | producer, err := env.NewProducer(streamName, nil) 88 | CheckErr(err) 89 | 90 | // optional publish confirmation channel 91 | chPublishConfirm := producer.NotifyPublishConfirmation() 92 | handlePublishConfirm(chPublishConfirm) 93 | 94 | // the send method automatically aggregates the messages 95 | // based on batch size 96 | for i := range 1000 { 97 | err := producer.Send(amqp.NewMessage([]byte("hello_world_" + strconv.Itoa(i)))) 98 | CheckErr(err) 99 | } 100 | 101 | // this sleep is not mandatory, just to show the confirmed messages 102 | time.Sleep(1 * time.Second) 103 | err = producer.Close() 104 | CheckErr(err) 105 | 106 | // Define a consumer per stream, there are different offset options to define a consumer, default is 107 | // env.NewConsumer(streamName, func(Context streaming.ConsumerContext, message *amqp.message) { 108 | // 109 | // }, nil) 110 | // if you need to track the offset you need a consumer name like: 111 | handleMessages := func(consumerContext stream.ConsumerContext, message *amqp.Message) { 112 | fmt.Printf("consumer name: %s, text: %s \n ", consumerContext.Consumer.GetName(), message.Data) 113 | } 114 | 115 | consumer, err := env.NewConsumer( 116 | streamName, 117 | handleMessages, 118 | stream.NewConsumerOptions(). 119 | SetConsumerName("my_consumer"). // set a consumer name 120 | SetOffset(stream.OffsetSpecification{}.First())) // start consuming from the beginning 121 | CheckErr(err) 122 | channelClose := consumer.NotifyClose() 123 | // channelClose receives all the closing events, here you can handle the 124 | // client reconnection or just log 125 | defer consumerClose(channelClose) 126 | 127 | fmt.Println("Press any key to stop ") 128 | _, _ = reader.ReadString('\n') 129 | err = consumer.Close() 130 | time.Sleep(200 * time.Millisecond) 131 | CheckErr(err) 132 | err = env.DeleteStream(streamName) 133 | CheckErr(err) 134 | err = env.Close() 135 | CheckErr(err) 136 | } 137 | -------------------------------------------------------------------------------- /generate/.gitignore: -------------------------------------------------------------------------------- 1 | files/* -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/rabbitmq/rabbitmq-stream-go-client 2 | 3 | go 1.23.0 4 | 5 | require ( 6 | github.com/golang/snappy v1.0.0 7 | github.com/google/uuid v1.6.0 8 | github.com/klauspost/compress v1.18.0 9 | github.com/onsi/ginkgo/v2 v2.22.2 10 | github.com/onsi/gomega v1.36.2 11 | github.com/pierrec/lz4 v2.6.1+incompatible 12 | github.com/pkg/errors v0.9.1 13 | github.com/spaolacci/murmur3 v1.1.0 14 | github.com/spf13/cobra v1.9.1 15 | golang.org/x/text v0.27.0 16 | ) 17 | 18 | require ( 19 | github.com/frankban/quicktest v1.14.6 // indirect 20 | github.com/go-logr/logr v1.4.2 // indirect 21 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 22 | github.com/google/go-cmp v0.6.0 // indirect 23 | github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect 24 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 25 | github.com/rogpeppe/go-internal v1.11.0 // indirect 26 | github.com/spf13/pflag v1.0.6 // indirect 27 | golang.org/x/net v0.41.0 // indirect 28 | golang.org/x/sys v0.33.0 // indirect 29 | golang.org/x/tools v0.34.0 // indirect 30 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect 31 | gopkg.in/yaml.v3 v3.0.1 // indirect 32 | ) 33 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= 2 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 3 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 4 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 5 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= 6 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= 7 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= 8 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 9 | github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= 10 | github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= 11 | github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= 12 | github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 13 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 14 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 15 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 16 | github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= 17 | github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= 18 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 19 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 20 | github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= 21 | github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= 22 | github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= 23 | github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= 24 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 25 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 26 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 27 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 28 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 29 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 30 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 31 | github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= 32 | github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= 33 | github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= 34 | github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= 35 | github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= 36 | github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= 37 | github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= 38 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 39 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 40 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 41 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 42 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 43 | github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= 44 | github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= 45 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 46 | github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= 47 | github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 48 | github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= 49 | github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= 50 | github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= 51 | github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 52 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= 53 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 54 | golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= 55 | golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= 56 | golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= 57 | golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 58 | golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= 59 | golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= 60 | golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= 61 | golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= 62 | google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= 63 | google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 64 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 65 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 66 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 67 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 68 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 69 | -------------------------------------------------------------------------------- /perfTest/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | dist/ 3 | -------------------------------------------------------------------------------- /perfTest/REAMDE.md: -------------------------------------------------------------------------------- 1 | Go stream performances test 2 | === 3 | 4 | This test is to measure the performance of the stream package. 5 | 6 | #### Install the performance test tool 7 | To install you can download the version from GitHub: 8 | 9 | Mac: 10 | ``` 11 | https://github.com/rabbitmq/rabbitmq-stream-go-client/releases/latest/download/stream-perf-test_darwin_amd64.tar.gz 12 | ``` 13 | 14 | Linux: 15 | ``` 16 | https://github.com/rabbitmq/rabbitmq-stream-go-client/releases/latest/download/stream-perf-test_linux_amd64.tar.gz 17 | ``` 18 | 19 | Windows 20 | ``` 21 | https://github.com/rabbitmq/rabbitmq-stream-go-client/releases/latest/download/stream-perf-test_windows_amd64.zip 22 | ``` 23 | 24 | execute `stream-perf-test --help` to see the parameters. By default it executes a test with one producer, one consumer in `BatchSend` mode. 25 | 26 | here an example: 27 | ```shell 28 | stream-perf-test --publishers 3 --consumers 2 --streams my_stream --max-length-bytes 2GB --uris rabbitmq-stream://guest:guest@localhost:5552/ --fixed-body 400 --time 10 29 | ``` 30 | 31 | ### Performance test tool Docker 32 | A docker image is available: `pivotalrabbitmq/go-stream-perf-test`, to test it: 33 | 34 | Run the server is host mode: 35 | ```shell 36 | docker run -it --rm --name rabbitmq --network host \ 37 | rabbitmq:4-management 38 | ``` 39 | enable the plugin: 40 | ``` 41 | docker exec rabbitmq rabbitmq-plugins enable rabbitmq_stream 42 | ``` 43 | then run the docker image: 44 | ```shell 45 | docker run -it --network host pivotalrabbitmq/go-stream-perf-test 46 | ``` 47 | 48 | To see all the parameters: 49 | ```shell 50 | docker run -it --network host pivotalrabbitmq/go-stream-perf-test --help 51 | ``` 52 | 53 | ### Examples 54 | 55 | #### 1. Simple test 56 | 1 producer, 1 consumer, 1 stream, 1GB max length 57 | ```shell 58 | stream-perf-test --streams my_stream --max-length-bytes 1GB 59 | ``` 60 | 61 | #### 2. Multiple producers and consumers 62 | 3 producers, 2 consumers, 1 stream, 2GB max length 63 | ```shell 64 | stream-perf-test --publishers 3 --consumers 2 --streams my_stream --max-length-bytes 2GB 65 | ``` 66 | 67 | #### 3. Fixed body size 68 | 1 producer, 1 consumer, 1 stream, 1GB max length, 400 bytes body 69 | ```shell 70 | stream-perf-test --streams my_stream --max-length-bytes 1GB --fixed-body 400 71 | ``` 72 | 73 | #### 4. Test async-send 74 | By default, the test uses the `BatchSend` mode, to test the `Send` mode: 75 | ```shell 76 | stream-perf-test --streams my_stream --max-length-bytes 1GB --async-send 77 | ``` 78 | 79 | #### 5. Test fixed rate and async-send 80 | This test is useful to test the latency, the producer sends messages at a fixed rate. 81 | ```shell 82 | stream-perf-test --streams my_stream --max-length-bytes 1GB --async-send --rate 100 83 | ``` 84 | 85 | #### 6. Batch Size 86 | 87 | Batch Size is valid only for `Send` mode, it is the max number of messages sent in a batch. 88 | ```shell 89 | stream-perf-test --streams my_stream --max-length-bytes 1GB --async-send --batch-size 100 90 | ``` 91 | 92 | -------------------------------------------------------------------------------- /perfTest/cmd/commands.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | 8 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | func logInfo(message string, v ...any) { 13 | log.Printf(fmt.Sprintf("[info] - %s", message), v...) 14 | } 15 | func logError(message string, v ...any) { 16 | log.Printf(fmt.Sprintf("[error] - %s", message), v...) 17 | } 18 | 19 | var rootCmd = &cobra.Command{ 20 | Use: "PerfTest", 21 | Short: "RabbitMQ Golang PerfTest for streaming queues", 22 | SilenceUsage: true, 23 | SilenceErrors: true, 24 | } 25 | 26 | var ( 27 | rabbitmqBrokerUrl []string 28 | publishers int 29 | consumers int 30 | publishersPerClient int 31 | consumersPerClient int 32 | streams []string 33 | maxLengthBytes string 34 | maxAge int 35 | maxSegmentSizeBytes string 36 | consumerOffset string 37 | printStatsV bool 38 | rate int 39 | variableRate int 40 | variableBody int 41 | fixedBody int 42 | batchSize int 43 | queueSize int 44 | subEntrySize int 45 | compression string 46 | exitOnError bool 47 | debugLogs bool 48 | crcCheck bool 49 | runDuration int 50 | initialCredits int 51 | isAsyncSend bool 52 | clientProvidedName string 53 | ) 54 | 55 | func init() { 56 | setupCli(rootCmd) 57 | } 58 | 59 | func setupCli(baseCmd *cobra.Command) { 60 | baseCmd.PersistentFlags().StringSliceVarP(&rabbitmqBrokerUrl, "uris", "", []string{stream.LocalhostUriConnection}, "Broker URLs") 61 | baseCmd.PersistentFlags().IntVarP(&publishers, "publishers", "", 1, "Number of Publishers") 62 | baseCmd.PersistentFlags().IntVarP(&batchSize, "batch-size", "", 200, "Batch Size, from 1 to 300") 63 | baseCmd.PersistentFlags().IntVarP(&queueSize, "queue-size", "", 50_000, "Queue Size for the server back pressure = messages send - messages confirmed") 64 | baseCmd.PersistentFlags().IntVarP(&subEntrySize, "sub-entry-size", "", 1, "SubEntry size, default 1. > 1 Enable the subEntryBatch") 65 | baseCmd.PersistentFlags().StringVarP(&compression, "compression", "", "", "Compression for sub batching, none,gzip,lz4,snappy,zstd") 66 | baseCmd.PersistentFlags().IntVarP(&consumers, "consumers", "", 1, "Number of Consumers") 67 | baseCmd.PersistentFlags().IntVarP(&publishersPerClient, "publishers-per-client", "", 3, "Publishers Per Client") 68 | baseCmd.PersistentFlags().IntVarP(&consumersPerClient, "consumers-per-client", "", 3, "Consumers Per Client") 69 | baseCmd.PersistentFlags().IntVarP(&rate, "rate", "", 0, "Limit publish rate") 70 | baseCmd.PersistentFlags().IntVarP(&variableRate, "variable-rate", "", 0, "Variable rate to value") 71 | baseCmd.PersistentFlags().IntVarP(&variableBody, "variable-body", "", 0, "Variable body size") 72 | baseCmd.PersistentFlags().IntVarP(&fixedBody, "fixed-body", "", 0, "Body size") 73 | baseCmd.PersistentFlags().IntVarP(&runDuration, "time", "", 0, "Run Duration in seconds ( stop the test)") 74 | baseCmd.PersistentFlags().BoolVarP(&exitOnError, "exit-on-error", "", true, "Close the app in case of error") 75 | baseCmd.PersistentFlags().BoolVarP(&printStatsV, "print-stats", "", true, "Print stats") 76 | baseCmd.PersistentFlags().BoolVarP(&debugLogs, "debug-logs", "", false, "Enable debug logs") 77 | baseCmd.PersistentFlags().BoolVarP(&crcCheck, "crc-check", "", false, "Enable crc control") 78 | baseCmd.PersistentFlags().StringSliceVarP(&streams, "streams", "", []string{"perf-test-go"}, "Stream names") 79 | baseCmd.PersistentFlags().StringVarP(&maxLengthBytes, "max-length-bytes", "", "0", "Stream max length bytes, e.g. 10MB, 50GB, etc.") 80 | baseCmd.PersistentFlags().IntVarP(&maxAge, "max-age", "", 0, "Stream Age in hours, e.g. 1,2.. 24 , etc.") 81 | baseCmd.PersistentFlags().StringVarP(&maxSegmentSizeBytes, "stream-max-segment-size-bytes", "", "500MB", "Stream segment size bytes, e.g. 10MB, 1GB, etc.") 82 | baseCmd.PersistentFlags().StringVarP(&consumerOffset, "consumer-offset", "", "first", "Staring consuming, ex: first,last,next or random") 83 | baseCmd.PersistentFlags().IntVarP(&initialCredits, "initial-credits", "", 10, "Consumer initial credits") 84 | baseCmd.PersistentFlags().BoolVarP(&isAsyncSend, "async-send", "", false, "Enable the async send. By default it uses batchSend in this case is faster") 85 | baseCmd.PersistentFlags().StringVarP(&clientProvidedName, "client-provided-name", "", "", "Client provided name") 86 | baseCmd.AddCommand(versionCmd) 87 | baseCmd.AddCommand(newSilent()) 88 | } 89 | 90 | // Execute is the entrypoint of the commands 91 | func Execute() { 92 | cmd, _, err := rootCmd.Find(os.Args[1:]) 93 | if err == nil && cmd.Use == rootCmd.Use { 94 | args := append([]string{"silent"}, os.Args[1:]...) 95 | rootCmd.SetArgs(args) 96 | } 97 | 98 | if err := rootCmd.Execute(); err != nil { 99 | _, _ = fmt.Fprintln(os.Stderr, err) 100 | os.Exit(1) 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /perfTest/cmd/version.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | // These values are overridden at build time. 10 | // Please update ldflags aptly when renaming these vars or moving packages 11 | var ( 12 | version = "dev" 13 | commit = "dev" 14 | goVersion = "unknown" 15 | ) 16 | var versionCmd = &cobra.Command{ 17 | Use: "version", 18 | Short: "Print CLI version", 19 | Run: func(_ *cobra.Command, _ []string) { 20 | printVersion() 21 | }, 22 | } 23 | 24 | func printVersion() { 25 | fmt.Printf("version: %s\ngo version: %s\ncommit: %s\n", 26 | version, 27 | goVersion, 28 | commit, 29 | ) 30 | } 31 | -------------------------------------------------------------------------------- /perfTest/perftest.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/rabbitmq/rabbitmq-stream-go-client/perfTest/cmd" 5 | ) 6 | 7 | func main() { 8 | cmd.Execute() 9 | } 10 | -------------------------------------------------------------------------------- /pkg/amqp/buffer.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | // 3 | // Copyright (C) 2017 Kale Blankenship 4 | // Portions Copyright (C) Microsoft Corporation 5 | // 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | // 13 | // The above copyright notice and this permission notice shall be included in all 14 | // copies or substantial portions of the Software. 15 | // 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE 23 | 24 | package amqp 25 | 26 | import ( 27 | "encoding/binary" 28 | "io" 29 | ) 30 | 31 | // buffer is similar to bytes.Buffer but specialized for this package 32 | type buffer struct { 33 | b []byte 34 | i int 35 | } 36 | 37 | func (b *buffer) next(n int64) ([]byte, bool) { 38 | if b.readCheck(n) { 39 | buf := b.b[b.i:len(b.b)] 40 | b.i = len(b.b) 41 | return buf, false 42 | } 43 | 44 | buf := b.b[b.i : b.i+int(n)] 45 | b.i += int(n) 46 | return buf, true 47 | } 48 | 49 | func (b *buffer) skip(n int) { 50 | b.i += n 51 | } 52 | 53 | func (b *buffer) readCheck(n int64) bool { 54 | return int64(b.i)+n > int64(len(b.b)) 55 | } 56 | 57 | func (b *buffer) readByte() (byte, error) { 58 | if b.readCheck(1) { 59 | return 0, io.EOF 60 | } 61 | 62 | byte_ := b.b[b.i] 63 | b.i++ 64 | return byte_, nil 65 | } 66 | 67 | func (b *buffer) readType() (amqpType, error) { 68 | n, err := b.readByte() 69 | return amqpType(n), err 70 | } 71 | 72 | func (b *buffer) peekType() (amqpType, error) { 73 | if b.readCheck(1) { 74 | return 0, io.EOF 75 | } 76 | 77 | return amqpType(b.b[b.i]), nil 78 | } 79 | 80 | func (b *buffer) readUint16() (uint16, error) { 81 | if b.readCheck(2) { 82 | return 0, io.EOF 83 | } 84 | 85 | n := binary.BigEndian.Uint16(b.b[b.i:]) 86 | b.i += 2 87 | return n, nil 88 | } 89 | 90 | func (b *buffer) readUint32() (uint32, error) { 91 | if b.readCheck(4) { 92 | return 0, io.EOF 93 | } 94 | 95 | n := binary.BigEndian.Uint32(b.b[b.i:]) 96 | b.i += 4 97 | return n, nil 98 | } 99 | 100 | func (b *buffer) readUint64() (uint64, error) { 101 | if b.readCheck(8) { 102 | return 0, io.EOF 103 | } 104 | 105 | n := binary.BigEndian.Uint64(b.b[b.i : b.i+8]) 106 | b.i += 8 107 | return n, nil 108 | } 109 | 110 | func (b *buffer) write(p []byte) { 111 | b.b = append(b.b, p...) 112 | } 113 | 114 | func (b *buffer) writeByte(byte_ byte) { 115 | b.b = append(b.b, byte_) 116 | } 117 | 118 | func (b *buffer) writeString(s string) { 119 | b.b = append(b.b, s...) 120 | } 121 | 122 | func (b *buffer) len() int { 123 | return len(b.b) - b.i 124 | } 125 | 126 | func (b *buffer) bytes() []byte { 127 | return b.b[b.i:] 128 | } 129 | 130 | func (b *buffer) writeUint16(n uint16) { 131 | b.b = append(b.b, 132 | byte(n>>8), 133 | byte(n), 134 | ) 135 | } 136 | 137 | func (b *buffer) writeUint32(n uint32) { 138 | b.b = append(b.b, 139 | byte(n>>24), 140 | byte(n>>16), 141 | byte(n>>8), 142 | byte(n), 143 | ) 144 | } 145 | 146 | func (b *buffer) writeUint64(n uint64) { 147 | b.b = append(b.b, 148 | byte(n>>56), 149 | byte(n>>48), 150 | byte(n>>40), 151 | byte(n>>32), 152 | byte(n>>24), 153 | byte(n>>16), 154 | byte(n>>8), 155 | byte(n), 156 | ) 157 | } 158 | -------------------------------------------------------------------------------- /pkg/amqp/error_stdlib.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | // 3 | // Copyright (C) 2017 Kale Blankenship 4 | // Portions Copyright (C) Microsoft Corporation 5 | // 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | // 13 | // The above copyright notice and this permission notice shall be included in all 14 | // copies or substantial portions of the Software. 15 | // 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE 23 | 24 | //go:build !pkgerrors 25 | // +build !pkgerrors 26 | 27 | package amqp 28 | 29 | import ( 30 | "errors" 31 | "fmt" 32 | ) 33 | 34 | // Default stdlib-based error functions. 35 | var ( 36 | errorNew = errors.New 37 | errorErrorf = fmt.Errorf 38 | errorWrapf = func(err error, _ string, _ ...any) error { return err } 39 | ) 40 | -------------------------------------------------------------------------------- /pkg/ha/ha_consumer.go: -------------------------------------------------------------------------------- 1 | package ha 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "sync" 7 | "time" 8 | 9 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 10 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/logs" 11 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 12 | ) 13 | 14 | // ReliableConsumer is a consumer that can reconnect in case of connection problems 15 | // the function messagesHandler is mandatory 16 | type ReliableConsumer struct { 17 | env *stream.Environment 18 | consumer *stream.Consumer 19 | streamName string 20 | consumerOptions *stream.ConsumerOptions 21 | mutexStatus *sync.Mutex 22 | mutexConnection *sync.Mutex 23 | status int 24 | messagesHandler stream.MessagesHandler 25 | currentPosition int64 // the last offset consumed. It is needed in case of restart 26 | 27 | //bootstrap: if true the consumer will start from the user offset. 28 | // If false it will start from the last offset consumed (currentPosition) 29 | bootstrap bool 30 | } 31 | 32 | func (c *ReliableConsumer) GetStatusAsString() string { 33 | switch c.GetStatus() { 34 | case StatusOpen: 35 | return "Open" 36 | case StatusClosed: 37 | return "Closed" 38 | case StatusStreamDoesNotExist: 39 | return "StreamDoesNotExist" 40 | case StatusReconnecting: 41 | return "Reconnecting" 42 | default: 43 | return "Unknown" 44 | } 45 | } 46 | 47 | func (c *ReliableConsumer) handleNotifyClose(channelClose stream.ChannelClose) { 48 | go func() { 49 | event := <-channelClose 50 | if strings.EqualFold(event.Reason, stream.SocketClosed) || strings.EqualFold(event.Reason, stream.MetaDataUpdate) || strings.EqualFold(event.Reason, stream.ZombieConsumer) { 51 | c.setStatus(StatusReconnecting) 52 | logs.LogWarn("[Reliable] - %s closed unexpectedly %s.. Reconnecting..", c.getInfo(), event.Reason) 53 | c.bootstrap = false 54 | err, reconnected := retry(1, c) 55 | if err != nil { 56 | logs.LogInfo(""+ 57 | "[Reliable] - %s won't be reconnected. Error: %s", c.getInfo(), err) 58 | } 59 | if reconnected { 60 | c.setStatus(StatusOpen) 61 | } else { 62 | c.setStatus(StatusClosed) 63 | } 64 | } else { 65 | logs.LogInfo("[Reliable] - %s closed normally. Reason: %s", c.getInfo(), event.Reason) 66 | c.setStatus(StatusClosed) 67 | } 68 | }() 69 | } 70 | 71 | func NewReliableConsumer(env *stream.Environment, streamName string, 72 | consumerOptions *stream.ConsumerOptions, messagesHandler stream.MessagesHandler) (*ReliableConsumer, error) { 73 | res := &ReliableConsumer{ 74 | env: env, 75 | streamName: streamName, 76 | consumerOptions: consumerOptions, 77 | mutexStatus: &sync.Mutex{}, 78 | mutexConnection: &sync.Mutex{}, 79 | messagesHandler: messagesHandler, 80 | currentPosition: 0, 81 | bootstrap: true, 82 | } 83 | if messagesHandler == nil { 84 | return nil, fmt.Errorf("the messages handler is mandatory") 85 | } 86 | if consumerOptions == nil { 87 | return nil, fmt.Errorf("the consumer options is mandatory") 88 | } 89 | logs.LogDebug("[Reliable] - creating %s", res.getInfo()) 90 | err := res.newConsumer() 91 | if err == nil { 92 | res.setStatus(StatusOpen) 93 | } 94 | logs.LogDebug("[Reliable] - created %s", res.getInfo()) 95 | return res, err 96 | } 97 | 98 | func (c *ReliableConsumer) setStatus(value int) { 99 | c.mutexStatus.Lock() 100 | defer c.mutexStatus.Unlock() 101 | c.status = value 102 | } 103 | 104 | func (c *ReliableConsumer) GetStatus() int { 105 | c.mutexStatus.Lock() 106 | defer c.mutexStatus.Unlock() 107 | return c.status 108 | } 109 | 110 | func (c *ReliableConsumer) getEnv() *stream.Environment { 111 | return c.env 112 | } 113 | 114 | func (c *ReliableConsumer) GetStreamName() string { 115 | return c.streamName 116 | } 117 | 118 | func (c *ReliableConsumer) getNewInstance() newEntityInstance { 119 | return c.newConsumer 120 | } 121 | 122 | func (c *ReliableConsumer) getInfo() string { 123 | return fmt.Sprintf("consumer %s for stream %s", 124 | c.consumerOptions.ClientProvidedName, c.streamName) 125 | } 126 | 127 | func (c *ReliableConsumer) getTimeOut() time.Duration { 128 | return time.Duration(3) 129 | } 130 | 131 | func (c *ReliableConsumer) newConsumer() error { 132 | c.mutexConnection.Lock() 133 | defer c.mutexConnection.Unlock() 134 | offset := stream.OffsetSpecification{}.Offset(c.currentPosition + 1) 135 | if c.bootstrap { 136 | offset = c.consumerOptions.Offset 137 | } 138 | logs.LogDebug("[Reliable] - creating %s. Boot: %s. StartOffset: %s", c.getInfo(), 139 | c.bootstrap, offset) 140 | consumer, err := c.env.NewConsumer(c.streamName, func(consumerContext stream.ConsumerContext, message *amqp.Message) { 141 | c.mutexConnection.Lock() 142 | c.currentPosition = consumerContext.Consumer.GetOffset() 143 | c.mutexConnection.Unlock() 144 | 145 | c.messagesHandler(consumerContext, message) 146 | }, c.consumerOptions.SetOffset(offset)) 147 | if err != nil { 148 | return err 149 | } 150 | 151 | channelNotifyClose := consumer.NotifyClose() 152 | c.handleNotifyClose(channelNotifyClose) 153 | c.consumer = consumer 154 | return err 155 | } 156 | 157 | func (c *ReliableConsumer) Close() error { 158 | c.setStatus(StatusClosed) 159 | err := c.consumer.Close() 160 | if err != nil { 161 | return err 162 | } 163 | return nil 164 | } 165 | 166 | func (c *ReliableConsumer) GetInfo() string { 167 | return c.getInfo() 168 | } 169 | -------------------------------------------------------------------------------- /pkg/ha/ha_consumer_test.go: -------------------------------------------------------------------------------- 1 | package ha 2 | 3 | import ( 4 | "sync/atomic" 5 | "time" 6 | 7 | "github.com/google/uuid" 8 | . "github.com/onsi/ginkgo/v2" 9 | . "github.com/onsi/gomega" 10 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 11 | . "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 12 | test_helper "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/test-helper" 13 | ) 14 | 15 | var _ = Describe("Reliable Consumer", func() { 16 | 17 | var ( 18 | envForRConsumer *Environment 19 | streamForRConsumer string 20 | ) 21 | BeforeEach(func() { 22 | testEnv, err := NewEnvironment(nil) 23 | envForRConsumer = testEnv 24 | Expect(err).NotTo(HaveOccurred()) 25 | streamForRConsumer = uuid.New().String() 26 | err = envForRConsumer.DeclareStream(streamForRConsumer, nil) 27 | Expect(err).NotTo(HaveOccurred()) 28 | }) 29 | AfterEach(func() { 30 | exists, err := envForRConsumer.StreamExists(streamForRConsumer) 31 | Expect(err).NotTo(HaveOccurred()) 32 | if exists { 33 | Expect(envForRConsumer.DeleteStream(streamForRConsumer)).NotTo(HaveOccurred()) 34 | } 35 | }) 36 | 37 | It("Validate mandatory fields", func() { 38 | _, err := NewReliableConsumer(envForRConsumer, 39 | streamForRConsumer, &ConsumerOptions{}, nil) 40 | Expect(err).To(HaveOccurred()) 41 | _, err = NewReliableConsumer(envForRConsumer, streamForRConsumer, nil, func(_ ConsumerContext, _ *amqp.Message) { 42 | }) 43 | Expect(err).To(HaveOccurred()) 44 | }) 45 | 46 | It("Create/Confirm and close a Reliable Producer / Consumer", func() { 47 | signal := make(chan struct{}) 48 | var confirmed int32 49 | producer, err := NewReliableProducer(envForRConsumer, 50 | streamForRConsumer, NewProducerOptions(), func(messageConfirm []*ConfirmationStatus) { 51 | for _, confirm := range messageConfirm { 52 | Expect(confirm.IsConfirmed()).To(BeTrue()) 53 | } 54 | if atomic.AddInt32(&confirmed, int32(len(messageConfirm))) == 10 { 55 | signal <- struct{}{} 56 | } 57 | }) 58 | Expect(err).NotTo(HaveOccurred()) 59 | for range 10 { 60 | msg := amqp.NewMessage([]byte("ha")) 61 | err := producer.Send(msg) 62 | Expect(err).NotTo(HaveOccurred()) 63 | } 64 | <-signal 65 | Expect(producer.Close()).NotTo(HaveOccurred()) 66 | 67 | signal = make(chan struct{}) 68 | var consumed int32 69 | consumer, err := NewReliableConsumer(envForRConsumer, streamForRConsumer, NewConsumerOptions().SetOffset(OffsetSpecification{}.First()), func(_ ConsumerContext, _ *amqp.Message) { 70 | atomic.AddInt32(&consumed, 1) 71 | if atomic.LoadInt32(&consumed) == 10 { 72 | signal <- struct{}{} 73 | } 74 | }) 75 | 76 | Expect(err).NotTo(HaveOccurred()) 77 | <-signal 78 | Expect(consumed).To(Equal(int32(10))) 79 | Expect(consumer.Close()).NotTo(HaveOccurred()) 80 | }) 81 | 82 | It("restart Reliable Consumer in case of killing connection", func() { 83 | 84 | clientProvidedName := uuid.New().String() 85 | consumer, err := NewReliableConsumer(envForRConsumer, streamForRConsumer, NewConsumerOptions().SetOffset(OffsetSpecification{}.First()).SetClientProvidedName(clientProvidedName), 86 | func(_ ConsumerContext, _ *amqp.Message) {}) 87 | Expect(err).NotTo(HaveOccurred()) 88 | Expect(consumer).NotTo(BeNil()) 89 | time.Sleep(1 * time.Second) 90 | Expect(consumer.GetStatus()).To(Equal(StatusOpen)) 91 | connectionToDrop := "" 92 | Eventually(func() bool { 93 | connections, err := test_helper.Connections("15672") 94 | if err != nil { 95 | return false 96 | } 97 | for _, connection := range connections { 98 | if connection.ClientProperties.Connection_name == clientProvidedName { 99 | connectionToDrop = connection.Name 100 | return true 101 | } 102 | } 103 | return false 104 | }, time.Second*5). 105 | Should(BeTrue()) 106 | 107 | Expect(connectionToDrop).NotTo(BeEmpty()) 108 | // kill the connection 109 | errDrop := test_helper.DropConnection(connectionToDrop, "15672") 110 | Expect(errDrop).NotTo(HaveOccurred()) 111 | /// just give some time to raise the event 112 | time.Sleep(1200 * time.Millisecond) 113 | Eventually(func() int { return consumer.GetStatus() }, "15s").WithPolling(300 * time.Millisecond).Should(Equal(StatusOpen)) 114 | Expect(consumer.GetStatusAsString()).To(Equal("Open")) 115 | Expect(consumer.Close()).NotTo(HaveOccurred()) 116 | Expect(consumer.GetStatus()).To(Equal(StatusClosed)) 117 | Expect(consumer.GetStatusAsString()).To(Equal("Closed")) 118 | }) 119 | 120 | It("Delete the stream should close the consumer", func() { 121 | consumer, err := NewReliableConsumer(envForRConsumer, streamForRConsumer, 122 | NewConsumerOptions(), 123 | func(_ ConsumerContext, _ *amqp.Message) { 124 | }) 125 | Expect(err).NotTo(HaveOccurred()) 126 | Expect(consumer).NotTo(BeNil()) 127 | Expect(consumer.GetStatus()).To(Equal(StatusOpen)) 128 | Expect(consumer.GetStatusAsString()).To(Equal("Open")) 129 | Expect(envForRConsumer.DeleteStream(streamForRConsumer)).NotTo(HaveOccurred()) 130 | Eventually(func() int { 131 | return consumer.GetStatus() 132 | }).WithPolling(300 * time.Millisecond).WithTimeout(20 * time.Second).Should(Equal(StatusClosed)) 133 | 134 | }) 135 | }) 136 | -------------------------------------------------------------------------------- /pkg/ha/ha_suite_test.go: -------------------------------------------------------------------------------- 1 | package ha_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestHa(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "Ha Suite") 13 | } 14 | -------------------------------------------------------------------------------- /pkg/ha/reliable_common.go: -------------------------------------------------------------------------------- 1 | package ha 2 | 3 | import ( 4 | "errors" 5 | "math/rand" 6 | "time" 7 | 8 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/logs" 9 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" 10 | ) 11 | 12 | const ( 13 | StatusOpen = 1 14 | StatusClosed = 2 15 | StatusStreamDoesNotExist = 3 16 | StatusReconnecting = 4 17 | ) 18 | 19 | type newEntityInstance func() error 20 | 21 | type IReliable interface { 22 | setStatus(value int) 23 | GetStatus() int 24 | getInfo() string 25 | getEnv() *stream.Environment 26 | getNewInstance() newEntityInstance 27 | getTimeOut() time.Duration 28 | GetStreamName() string 29 | GetStatusAsString() string 30 | } 31 | 32 | // Retry is a function that retries the IReliable to the stream 33 | // The first step is to set the status to reconnecting 34 | // Then it sleeps for a random time between 2 and the timeout to avoid overlapping with other reconnecting 35 | // Then it checks if the stream exists. During the restart the stream could be deleted 36 | // If the stream does not exist it returns a StreamDoesNotExist error 37 | // If the stream exists it tries to create a new instance of the IReliable 38 | 39 | // 40 | // The stream could be in a `StreamNotAvailable` status or the `LeaderNotReady` 41 | // `StreamNotAvailable` is a server side error: Stream exists but is not available for the producer and consumer 42 | // `LeaderNotReady` is a client side error: Stream exists it is Ready but the leader is not elected yet. It is mandatory for the Producer 43 | // In both cases it retries the reconnection 44 | 45 | func retry(backoff int, reliable IReliable) (error, bool) { 46 | waitTime := randomWaitWithBackoff(backoff) 47 | logs.LogInfo("[Reliable] - The %s for the stream %s is in reconnection in %d milliseconds", reliable.getInfo(), reliable.GetStreamName(), waitTime) 48 | time.Sleep(time.Duration(waitTime) * time.Millisecond) 49 | streamMetaData, errS := reliable.getEnv().StreamMetaData(reliable.GetStreamName()) 50 | if errors.Is(errS, stream.StreamDoesNotExist) { 51 | logs.LogInfo("[Reliable] - The stream %s does not exist for %s. Stopping it", reliable.GetStreamName(), reliable.getInfo()) 52 | return errS, false 53 | } 54 | if errors.Is(errS, stream.StreamNotAvailable) { 55 | logs.LogInfo("[Reliable] - The stream %s is not available for %s. Trying to reconnect", reliable.GetStreamName(), reliable.getInfo()) 56 | return retry(backoff+1, reliable) 57 | } 58 | if errors.Is(errS, stream.LeaderNotReady) { 59 | logs.LogInfo("[Reliable] - The leader for the stream %s is not ready for %s. Trying to reconnect", reliable.GetStreamName(), reliable.getInfo()) 60 | return retry(backoff+1, reliable) 61 | } 62 | 63 | if errors.Is(errS, stream.StreamMetadataFailure) { 64 | logs.LogInfo("[Reliable] - Fail to retrieve the %s metadata for %s. Trying to reconnect", reliable.GetStreamName(), reliable.getInfo()) 65 | return retry(backoff+1, reliable) 66 | } 67 | 68 | var result error 69 | if streamMetaData != nil { 70 | logs.LogInfo("[Reliable] - The stream %s exists. Reconnecting the %s.", reliable.GetStreamName(), reliable.getInfo()) 71 | result = reliable.getNewInstance()() 72 | if result == nil { 73 | logs.LogInfo("[Reliable] - The stream %s exists. %s reconnected.", reliable.getInfo(), reliable.GetStreamName()) 74 | } else { 75 | logs.LogInfo("[Reliable] - error %s creating %s for the stream %s. Trying to reconnect", result, reliable.getInfo(), reliable.GetStreamName()) 76 | return retry(backoff+1, reliable) 77 | } 78 | } else { 79 | logs.LogError("[Reliable] - The stream %s does not exist for %s. Closing..", reliable.GetStreamName(), reliable.getInfo()) 80 | return stream.StreamDoesNotExist, false 81 | } 82 | 83 | return result, true 84 | } 85 | 86 | func randomWaitWithBackoff(attempt int) int { 87 | r := rand.New(rand.NewSource(time.Now().UnixNano())) 88 | baseWait := 3_000 + r.Intn(8_000) 89 | 90 | // Calculate the wait time considering the number of attempts 91 | waitTime := min(baseWait*(1<<(attempt-1)), 15_000) 92 | 93 | return waitTime 94 | } 95 | -------------------------------------------------------------------------------- /pkg/integration_test/integration_test_suite_test.go: -------------------------------------------------------------------------------- 1 | package integration_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestIntegrationTest(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "IntegrationTest Suite") 13 | } 14 | -------------------------------------------------------------------------------- /pkg/logs/log.go: -------------------------------------------------------------------------------- 1 | package logs 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | ) 7 | 8 | const ( 9 | INFO = 0 10 | DEBUG = 1 11 | ) 12 | 13 | var LogLevel int8 14 | 15 | func LogInfo(message string, v ...any) { 16 | log.Printf(fmt.Sprintf("[info] - %s", message), v...) 17 | } 18 | 19 | func LogError(message string, v ...any) { 20 | log.Printf(fmt.Sprintf("[error] - %s", message), v...) 21 | } 22 | 23 | func LogDebug(message string, v ...any) { 24 | if LogLevel > INFO { 25 | log.Printf(fmt.Sprintf("[debug] - %s", message), v...) 26 | } 27 | } 28 | 29 | func LogWarn(message string, v ...any) { 30 | log.Printf(fmt.Sprintf("[warn] - %s", message), v...) 31 | } 32 | -------------------------------------------------------------------------------- /pkg/message/interface.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 4 | 5 | // StreamMessage is the interface that wraps the basic methods to interact with a message 6 | // in the context of a stream. 7 | // Currently, the StreamMessage interface is implemented by the amqp.Message struct. 8 | // The implementations are not meant to be thread-safe. 9 | type StreamMessage interface { 10 | MarshalBinary() ([]byte, error) 11 | UnmarshalBinary(data []byte) error 12 | SetPublishingId(id int64) 13 | GetPublishingId() int64 14 | HasPublishingId() bool 15 | GetData() [][]byte 16 | GetMessageProperties() *amqp.MessageProperties 17 | GetMessageAnnotations() amqp.Annotations 18 | GetApplicationProperties() map[string]any 19 | 20 | // GetMessageHeader GetAMQPValue read only values see: rabbitmq-stream-go-client/issues/128 21 | GetMessageHeader() *amqp.MessageHeader 22 | GetAMQPValue() any 23 | } 24 | -------------------------------------------------------------------------------- /pkg/stream/aggregation_test.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | ) 10 | 11 | var _ = Describe("Compression algorithms", func() { 12 | var entries *subEntries 13 | 14 | BeforeEach(func() { 15 | messagePayload := make([]byte, 4096) 16 | for i := range messagePayload { 17 | messagePayload[i] = 99 18 | } 19 | 20 | message := &messageSequence{ 21 | messageBytes: messagePayload, 22 | publishingId: 0, 23 | } 24 | 25 | entries = &subEntries{ 26 | items: []*subEntry{{ 27 | messages: []*messageSequence{message}, 28 | publishingId: 0, 29 | unCompressedSize: len(messagePayload) + 4, 30 | sizeInBytes: 0, 31 | dataInBytes: nil, 32 | }}, 33 | totalSizeInBytes: 0, 34 | } 35 | }) 36 | 37 | It("NONE", func() { 38 | err := compressNONE{}.Compress(entries) 39 | Expect(err).NotTo(HaveOccurred()) 40 | Expect(entries.totalSizeInBytes).To(Equal(entries.items[0].sizeInBytes)) 41 | Expect(entries.totalSizeInBytes).To(Equal(entries.items[0].unCompressedSize)) 42 | }) 43 | 44 | It("GZIP", func() { 45 | gzip := compressGZIP{} 46 | err := gzip.Compress(entries) 47 | Expect(err).NotTo(HaveOccurred()) 48 | verifyCompression(gzip, entries) 49 | }) 50 | 51 | It("SNAPPY", func() { 52 | snappy := compressSnappy{} 53 | err := snappy.Compress(entries) 54 | Expect(err).NotTo(HaveOccurred()) 55 | verifyCompression(snappy, entries) 56 | }) 57 | 58 | It("LZ4", func() { 59 | lz4 := compressLZ4{} 60 | err := lz4.Compress(entries) 61 | Expect(err).NotTo(HaveOccurred()) 62 | verifyCompression(lz4, entries) 63 | }) 64 | 65 | It("ZSTD", func() { 66 | zstd := compressZSTD{} 67 | err := zstd.Compress(entries) 68 | Expect(err).NotTo(HaveOccurred()) 69 | verifyCompression(zstd, entries) 70 | }) 71 | }) 72 | 73 | func verifyCompression(algo iCompress, subEntries *subEntries) { 74 | Expect(subEntries.totalSizeInBytes).To(SatisfyAll(BeNumerically("<", subEntries.items[0].unCompressedSize))) 75 | Expect(subEntries.totalSizeInBytes).To(Equal(subEntries.items[0].sizeInBytes)) 76 | 77 | bufferReader := bytes.NewReader(subEntries.items[0].dataInBytes) 78 | _, err := algo.UnCompress(bufio.NewReader(bufferReader), 79 | uint32(subEntries.totalSizeInBytes), uint32(subEntries.items[0].unCompressedSize)) 80 | 81 | Expect(err).NotTo(HaveOccurred()) 82 | } 83 | -------------------------------------------------------------------------------- /pkg/stream/available_features.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "fmt" 5 | "regexp" 6 | "strconv" 7 | "strings" 8 | "sync" 9 | ) 10 | 11 | var lock = &sync.Mutex{} 12 | 13 | type availableFeatures struct { 14 | is313OrMore bool 15 | is311OrMore bool 16 | brokerFilterEnabled bool 17 | brokerVersion string 18 | brokerSingleActiveConsumerEnabled bool 19 | } 20 | 21 | func newAvailableFeatures() *availableFeatures { 22 | lock.Lock() 23 | defer lock.Unlock() 24 | return &availableFeatures{} 25 | } 26 | 27 | func (a *availableFeatures) Is311OrMore() bool { 28 | lock.Lock() 29 | defer lock.Unlock() 30 | return a.is311OrMore 31 | } 32 | 33 | func (a *availableFeatures) Is313OrMore() bool { 34 | lock.Lock() 35 | defer lock.Unlock() 36 | return a.is313OrMore 37 | } 38 | 39 | func (a *availableFeatures) BrokerFilterEnabled() bool { 40 | lock.Lock() 41 | defer lock.Unlock() 42 | return a.brokerFilterEnabled 43 | } 44 | 45 | func (a *availableFeatures) IsBrokerSingleActiveConsumerEnabled() bool { 46 | lock.Lock() 47 | defer lock.Unlock() 48 | return a.brokerSingleActiveConsumerEnabled 49 | } 50 | 51 | func (a *availableFeatures) SetVersion(version string) error { 52 | lock.Lock() 53 | defer lock.Unlock() 54 | if extractVersion(version) == "" { 55 | return fmt.Errorf("invalid version format: %s", version) 56 | } 57 | a.brokerVersion = version 58 | a.is311OrMore = IsVersionGreaterOrEqual(extractVersion(version), "3.11.0") 59 | a.is313OrMore = IsVersionGreaterOrEqual(extractVersion(version), "3.13.0") 60 | a.brokerSingleActiveConsumerEnabled = a.is311OrMore 61 | return nil 62 | } 63 | 64 | func (a *availableFeatures) GetCommands() []commandVersion { 65 | lock.Lock() 66 | defer lock.Unlock() 67 | return []commandVersion{ 68 | &PublishFilter{}, 69 | } 70 | } 71 | 72 | func (a *availableFeatures) ParseCommandVersions(commandVersions []commandVersion) { 73 | lock.Lock() 74 | defer lock.Unlock() 75 | for _, commandVersion := range commandVersions { 76 | if commandVersion.GetCommandKey() == commandPublish { 77 | a.brokerFilterEnabled = commandVersion.GetMinVersion() <= PublishFilter{}.GetMinVersion() && 78 | commandVersion.GetMaxVersion() >= PublishFilter{}.GetMaxVersion() 79 | } 80 | } 81 | } 82 | 83 | func (a *availableFeatures) String() string { 84 | return fmt.Sprintf("brokerVersion: %s, is311OrMore: %t, is313OrMore: %t, brokerFilterEnabled: %t", a.brokerVersion, a.is311OrMore, a.is313OrMore, a.brokerFilterEnabled) 85 | } 86 | 87 | func extractVersion(fullVersion string) string { 88 | pattern := `(\d+\.\d+\.\d+)` 89 | regex := regexp.MustCompile(pattern) 90 | match := regex.FindStringSubmatch(fullVersion) 91 | 92 | if len(match) > 1 { 93 | return match[1] 94 | } 95 | return "" 96 | } 97 | 98 | func IsVersionGreaterOrEqual(version, target string) bool { 99 | v1, err := parseVersion(version) 100 | if err != nil { 101 | return false 102 | } 103 | 104 | v2, err := parseVersion(target) 105 | if err != nil { 106 | return false 107 | } 108 | return v1.Compare(v2) >= 0 109 | } 110 | 111 | func parseVersion(version string) (Version, error) { 112 | parts := strings.Split(version, ".") 113 | if len(parts) != 3 { 114 | return Version{}, fmt.Errorf("invalid version format: %s", version) 115 | } 116 | 117 | major, err := strconv.Atoi(parts[0]) 118 | if err != nil { 119 | return Version{}, fmt.Errorf("invalid major version: %s", parts[0]) 120 | } 121 | 122 | minor, err := strconv.Atoi(parts[1]) 123 | if err != nil { 124 | return Version{}, fmt.Errorf("invalid minor version: %s", parts[1]) 125 | } 126 | 127 | patch, err := strconv.Atoi(parts[2]) 128 | if err != nil { 129 | return Version{}, fmt.Errorf("invalid patch version: %s", parts[2]) 130 | } 131 | 132 | return Version{Major: major, Minor: minor, Patch: patch}, nil 133 | } 134 | 135 | type Version struct { 136 | Major int 137 | Minor int 138 | Patch int 139 | } 140 | 141 | func (v Version) Compare(other Version) int { 142 | if v.Major != other.Major { 143 | return v.Major - other.Major 144 | } 145 | if v.Minor != other.Minor { 146 | return v.Minor - other.Minor 147 | } 148 | return v.Patch - other.Patch 149 | } 150 | -------------------------------------------------------------------------------- /pkg/stream/available_features_test.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "fmt" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | var _ = Describe("Available Features", func() { 11 | 12 | It("Parse Version", func() { 13 | v, err := parseVersion("1.2.3") 14 | Expect(err).NotTo(HaveOccurred()) 15 | Expect(v).To(Equal(Version{Major: 1, Minor: 2, Patch: 3})) 16 | 17 | _, err = parseVersion("1.2") 18 | Expect(err).To(HaveOccurred()) 19 | Expect(fmt.Sprintf("%s", err)).To(ContainSubstring("invalid version format: 1.2")) 20 | 21 | _, err = parseVersion("error.3.3") 22 | Expect(err).To(HaveOccurred()) 23 | Expect(fmt.Sprintf("%s", err)).To(ContainSubstring("invalid major version: error")) 24 | 25 | _, err = parseVersion("1.error.3") 26 | Expect(err).To(HaveOccurred()) 27 | Expect(fmt.Sprintf("%s", err)).To(ContainSubstring("invalid minor version: error")) 28 | 29 | _, err = parseVersion("1.2.error") 30 | Expect(err).To(HaveOccurred()) 31 | Expect(fmt.Sprintf("%s", err)).To(ContainSubstring("invalid patch version: error")) 32 | 33 | v, err = parseVersion(extractVersion("3.12.1-rc1")) 34 | Expect(err).NotTo(HaveOccurred()) 35 | Expect(v).To(Equal(Version{Major: 3, Minor: 12, Patch: 1})) 36 | 37 | v, err = parseVersion(extractVersion("3.13.1-alpha.234")) 38 | Expect(err).NotTo(HaveOccurred()) 39 | Expect(v).To(Equal(Version{Major: 3, Minor: 13, Patch: 1})) 40 | }) 41 | 42 | It("Is Version Greater Or Equal", func() { 43 | Expect(IsVersionGreaterOrEqual("1.2.3", "1.2.3")).To(BeTrue()) 44 | Expect(IsVersionGreaterOrEqual("1.2.3", "1.2.2")).To(BeTrue()) 45 | Expect(IsVersionGreaterOrEqual("1.2.3", "1.2.4")).To(BeFalse()) 46 | Expect(IsVersionGreaterOrEqual("1.2.3", "1.3.3")).To(BeFalse()) 47 | Expect(IsVersionGreaterOrEqual("1.2.3", "2.2.3")).To(BeFalse()) 48 | Expect(IsVersionGreaterOrEqual("3.1.3-alpha.1", "2.2.3")).To(BeFalse()) 49 | Expect(IsVersionGreaterOrEqual("3.3.3-rc.1", "2.2.3")).To(BeFalse()) 50 | 51 | Expect(IsVersionGreaterOrEqual("error.3.2", "2.2.3")).To(BeFalse()) 52 | Expect(IsVersionGreaterOrEqual("4.3.2", "2.error.3")).To(BeFalse()) 53 | 54 | }) 55 | 56 | It("Available Features check Version", func() { 57 | var availableFeatures = newAvailableFeatures() 58 | Expect(availableFeatures).NotTo(BeNil()) 59 | Expect(availableFeatures.SetVersion("error")).NotTo(BeNil()) 60 | Expect(availableFeatures.SetVersion("3.9.0")).To(BeNil()) 61 | Expect(availableFeatures.Is311OrMore()).To(BeFalse()) 62 | Expect(availableFeatures.Is313OrMore()).To(BeFalse()) 63 | Expect(availableFeatures.SetVersion("3.11.0")).To(BeNil()) 64 | Expect(availableFeatures.Is311OrMore()).To(BeTrue()) 65 | Expect(availableFeatures.Is313OrMore()).To(BeFalse()) 66 | Expect(availableFeatures.SetVersion("3.13.0")).To(BeNil()) 67 | Expect(availableFeatures.Is311OrMore()).To(BeTrue()) 68 | Expect(availableFeatures.Is313OrMore()).To(BeTrue()) 69 | Expect(availableFeatures.SetVersion("3.13.1-alpha.234")).To(BeNil()) 70 | Expect(availableFeatures.Is311OrMore()).To(BeTrue()) 71 | Expect(availableFeatures.Is313OrMore()).To(BeTrue()) 72 | }) 73 | It("Available Features parse command", func() { 74 | var availableFeatures = newAvailableFeatures() 75 | 76 | Expect(availableFeatures.SetVersion("3.13.0")).To(BeNil()) 77 | availableFeatures.ParseCommandVersions( 78 | []commandVersion{ 79 | PublishFilter{}, 80 | }, 81 | ) 82 | Expect(availableFeatures.BrokerFilterEnabled()).To(BeTrue()) 83 | }) 84 | }) 85 | -------------------------------------------------------------------------------- /pkg/stream/blocking_queue.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "errors" 5 | "sync/atomic" 6 | "time" 7 | 8 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/logs" 9 | ) 10 | 11 | var ErrBlockingQueueStopped = errors.New("blocking queue stopped") 12 | 13 | type BlockingQueue[T any] struct { 14 | queue chan T 15 | status int32 16 | } 17 | 18 | // NewBlockingQueue initializes a new BlockingQueue with the given capacity 19 | func NewBlockingQueue[T any](capacity int) *BlockingQueue[T] { 20 | return &BlockingQueue[T]{ 21 | queue: make(chan T, capacity), 22 | status: 0, 23 | } 24 | } 25 | 26 | // Enqueue adds an item to the queue, blocking if the queue is full 27 | func (bq *BlockingQueue[T]) Enqueue(item T) error { 28 | if bq.IsStopped() { 29 | return ErrBlockingQueueStopped 30 | } 31 | bq.queue <- item 32 | return nil 33 | } 34 | 35 | func (bq *BlockingQueue[T]) GetChannel() chan T { 36 | return bq.queue 37 | } 38 | 39 | func (bq *BlockingQueue[T]) Size() int { 40 | return len(bq.queue) 41 | } 42 | 43 | func (bq *BlockingQueue[T]) IsEmpty() bool { 44 | return len(bq.queue) == 0 45 | } 46 | 47 | // Stop stops the queue from accepting new items 48 | // but allows some pending items. 49 | // Stop is different from Close in that it allows the 50 | // existing items to be processed. 51 | // Drain the queue to be sure there are not pending messages 52 | func (bq *BlockingQueue[T]) Stop() []T { 53 | atomic.StoreInt32(&bq.status, 1) 54 | // drain the queue. To be sure there are not pending messages 55 | // in the queue and return to the caller the remaining pending messages 56 | msgInQueue := make([]T, 0, len(bq.queue)) 57 | outer: 58 | for { 59 | select { 60 | case msg := <-bq.queue: 61 | msgInQueue = append(msgInQueue, msg) 62 | case <-time.After(10 * time.Millisecond): 63 | break outer 64 | } 65 | } 66 | logs.LogDebug("BlockingQueue stopped") 67 | return msgInQueue 68 | } 69 | 70 | func (bq *BlockingQueue[T]) Close() { 71 | if bq.IsStopped() { 72 | atomic.StoreInt32(&bq.status, 2) 73 | close(bq.queue) 74 | } 75 | } 76 | 77 | func (bq *BlockingQueue[T]) IsStopped() bool { 78 | return atomic.LoadInt32(&bq.status) == 1 || atomic.LoadInt32(&bq.status) == 2 79 | } 80 | -------------------------------------------------------------------------------- /pkg/stream/brokers.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "crypto/tls" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | "sync" 9 | "time" 10 | ) 11 | 12 | type AddressResolver struct { 13 | Host string 14 | Port int 15 | } 16 | 17 | type TCPParameters struct { 18 | tlsConfig *tls.Config 19 | RequestedHeartbeat time.Duration 20 | RequestedMaxFrameSize int 21 | WriteBuffer int 22 | ReadBuffer int 23 | NoDelay bool 24 | } 25 | 26 | type Broker struct { 27 | Host string 28 | Port string 29 | User string 30 | Vhost string 31 | Uri string 32 | Password string 33 | Scheme string 34 | 35 | advHost string 36 | advPort string 37 | } 38 | 39 | func newBrokerDefault() *Broker { 40 | return &Broker{ 41 | Scheme: "rabbitmq-stream", 42 | Host: "localhost", 43 | Port: StreamTcpPort, 44 | User: "guest", 45 | Password: "guest", 46 | Vhost: "/", 47 | } 48 | } 49 | 50 | func newTCPParameterDefault() *TCPParameters { 51 | return &TCPParameters{ 52 | RequestedHeartbeat: defaultHeartbeat, 53 | RequestedMaxFrameSize: 1048576, 54 | WriteBuffer: defaultWriteSocketBuffer, 55 | ReadBuffer: defaultReadSocketBuffer, 56 | NoDelay: true, 57 | tlsConfig: nil, 58 | } 59 | } 60 | 61 | func (br *Broker) isTLS() bool { 62 | return strings.Index(br.Scheme, "+tls") > 0 63 | } 64 | 65 | func (br *Broker) mergeWithDefault() { 66 | broker := newBrokerDefault() 67 | if br.Host == "" { 68 | br.Host = broker.Host 69 | } 70 | if br.Vhost == "" { 71 | br.Vhost = broker.Vhost 72 | } 73 | 74 | if br.User == "" { 75 | br.User = broker.User 76 | } 77 | if br.User == "" { 78 | br.User = broker.User 79 | } 80 | if br.Password == "" { 81 | br.Password = broker.Password 82 | } 83 | if br.Port == "" || br.Port == "0" { 84 | br.Port = broker.Port 85 | } 86 | if br.Scheme == "" { 87 | br.Scheme = broker.Scheme 88 | } 89 | } 90 | 91 | func (br *Broker) cloneFrom(broker *Broker, resolver *AddressResolver) { 92 | br.User = broker.User 93 | br.Password = broker.Password 94 | br.Vhost = broker.Vhost 95 | br.Scheme = broker.Scheme 96 | if resolver != nil { 97 | br.Host = resolver.Host 98 | br.Port = strconv.Itoa(resolver.Port) 99 | } 100 | } 101 | 102 | func (br *Broker) GetUri() string { 103 | if br.Uri == "" { 104 | br.Uri = fmt.Sprintf("%s://%s:%s@%s:%s/%s", 105 | br.Scheme, 106 | br.User, br.Password, 107 | br.Host, br.Port, br.Vhost) 108 | } 109 | return br.Uri 110 | } 111 | 112 | func newBroker(host string, port string) *Broker { 113 | return &Broker{ 114 | Host: host, 115 | Port: port, 116 | } 117 | } 118 | 119 | type Brokers struct { 120 | items *sync.Map 121 | } 122 | 123 | func newBrokers() *Brokers { 124 | return &Brokers{items: &sync.Map{}} 125 | } 126 | 127 | func (brs *Brokers) Add(brokerReference int16, host string, port uint32) *Broker { 128 | broker := newBroker(host, strconv.Itoa(int(port))) 129 | brs.items.Store(brokerReference, broker) 130 | return broker 131 | } 132 | 133 | func (brs *Brokers) Get(brokerReference int16) *Broker { 134 | value, ok := brs.items.Load(brokerReference) 135 | if !ok { 136 | return nil 137 | } 138 | 139 | return value.(*Broker) 140 | } 141 | 142 | func (br *Broker) hostPort() string { 143 | return fmt.Sprintf("%s:%s", br.Host, br.Port) 144 | } 145 | 146 | type StreamMetadata struct { 147 | stream string 148 | responseCode uint16 149 | Leader *Broker 150 | Replicas []*Broker 151 | } 152 | 153 | func (sm StreamMetadata) String() string { 154 | replicas := "" 155 | for _, replica := range sm.Replicas { 156 | replicas += fmt.Sprintf(" - %s:%s", replica.Host, replica.Port) 157 | } 158 | return fmt.Sprintf("leader %s:%s, followers %s ", sm.Leader.Host, sm.Leader.Port, replicas) 159 | } 160 | 161 | func (StreamMetadata) New(stream string, responseCode uint16, 162 | leader *Broker, replicas []*Broker) *StreamMetadata { 163 | return &StreamMetadata{stream: stream, responseCode: responseCode, 164 | Leader: leader, Replicas: replicas} 165 | } 166 | 167 | type StreamsMetadata struct { 168 | items *sync.Map 169 | } 170 | 171 | func (StreamsMetadata) New() *StreamsMetadata { 172 | return &StreamsMetadata{&sync.Map{}} 173 | } 174 | 175 | func (smd *StreamsMetadata) Add(stream string, responseCode uint16, 176 | leader *Broker, replicas []*Broker) *StreamMetadata { 177 | streamMetadata := StreamMetadata{}.New(stream, responseCode, 178 | leader, replicas) 179 | smd.items.Store(stream, streamMetadata) 180 | return streamMetadata 181 | } 182 | 183 | func (smd *StreamsMetadata) Get(stream string) *StreamMetadata { 184 | value, ok := smd.items.Load(stream) 185 | if !ok { 186 | return nil 187 | } 188 | return value.(*StreamMetadata) 189 | } 190 | -------------------------------------------------------------------------------- /pkg/stream/buffer_reader.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "bufio" 5 | "encoding/binary" 6 | "io" 7 | ) 8 | 9 | func readUShort(readerStream io.Reader) uint16 { 10 | var res uint16 11 | _ = binary.Read(readerStream, binary.BigEndian, &res) 12 | return res 13 | } 14 | 15 | func readShort(readerStream io.Reader) int16 { 16 | var res int16 17 | _ = binary.Read(readerStream, binary.BigEndian, &res) 18 | return res 19 | } 20 | 21 | func readUInt(readerStream io.Reader) (uint32, error) { 22 | var res uint32 23 | err := binary.Read(readerStream, binary.BigEndian, &res) 24 | return res, err 25 | } 26 | 27 | func peekByte(readerStream *bufio.Reader) (uint8, error) { 28 | res, err := readerStream.Peek(1) 29 | if err != nil { 30 | return 0, err 31 | } 32 | return res[0], nil 33 | } 34 | 35 | func readInt64(readerStream io.Reader) int64 { 36 | var res int64 37 | _ = binary.Read(readerStream, binary.BigEndian, &res) 38 | return res 39 | } 40 | 41 | func readByte(readerStream io.Reader) uint8 { 42 | var res uint8 43 | _ = binary.Read(readerStream, binary.BigEndian, &res) 44 | return res 45 | } 46 | 47 | func readByteError(readerStream io.Reader) (uint8, error) { 48 | var res uint8 49 | err := binary.Read(readerStream, binary.BigEndian, &res) 50 | return res, err 51 | } 52 | 53 | func readString(readerStream io.Reader) string { 54 | lenString := readUShort(readerStream) 55 | buff := make([]byte, lenString) 56 | _ = binary.Read(readerStream, binary.BigEndian, &buff) 57 | return string(buff) 58 | } 59 | 60 | func readUint8Array(readerStream io.Reader, size uint32) []uint8 { 61 | var res = make([]uint8, size) 62 | _, _ = io.ReadFull(readerStream, res) 63 | return res 64 | } 65 | -------------------------------------------------------------------------------- /pkg/stream/buffer_writer.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "encoding/binary" 7 | ) 8 | 9 | func writeLong(inputBuff *bytes.Buffer, value int64) { 10 | writeULong(inputBuff, uint64(value)) 11 | } 12 | 13 | func writeULong(inputBuff *bytes.Buffer, value uint64) { 14 | var buff = make([]byte, 8) 15 | binary.BigEndian.PutUint64(buff, value) 16 | inputBuff.Write(buff) 17 | } 18 | 19 | func writeBLong(inputBuff *bufio.Writer, value int64) error { 20 | return writeBULong(inputBuff, uint64(value)) 21 | } 22 | 23 | func writeBULong(inputBuff *bufio.Writer, value uint64) error { 24 | var buff = make([]byte, 8) 25 | binary.BigEndian.PutUint64(buff, value) 26 | _, err := inputBuff.Write(buff) 27 | return err 28 | } 29 | 30 | func writeShort(inputBuff *bytes.Buffer, value int16) { 31 | writeUShort(inputBuff, uint16(value)) 32 | } 33 | 34 | func writeUShort(inputBuff *bytes.Buffer, value uint16) { 35 | var buff = make([]byte, 2) 36 | binary.BigEndian.PutUint16(buff, value) 37 | inputBuff.Write(buff) 38 | } 39 | 40 | func writeBShort(inputBuff *bufio.Writer, value int16) error { 41 | return writeBUShort(inputBuff, uint16(value)) 42 | } 43 | func writeBUShort(inputBuff *bufio.Writer, value uint16) error { 44 | var buff = make([]byte, 2) 45 | binary.BigEndian.PutUint16(buff, value) 46 | _, err := inputBuff.Write(buff) 47 | return err 48 | } 49 | 50 | func writeBString(inputBuff *bufio.Writer, value string) error { 51 | err := writeBUShort(inputBuff, uint16(len(value))) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | _, err = inputBuff.Write([]byte(value)) 57 | return err 58 | } 59 | 60 | func writeByte(inputBuff *bytes.Buffer, value byte) { 61 | var buff = make([]byte, 1) 62 | buff[0] = value 63 | inputBuff.Write(buff) 64 | } 65 | 66 | func writeBByte(inputBuff *bufio.Writer, value byte) error { 67 | var buff = make([]byte, 1) 68 | buff[0] = value 69 | _, err := inputBuff.Write(buff) 70 | return err 71 | } 72 | 73 | func writeInt(inputBuff *bytes.Buffer, value int) { 74 | writeUInt(inputBuff, uint32(value)) 75 | } 76 | func writeUInt(inputBuff *bytes.Buffer, value uint32) { 77 | var buff = make([]byte, 4) 78 | binary.BigEndian.PutUint32(buff, value) 79 | inputBuff.Write(buff) 80 | } 81 | 82 | func writeBInt(inputBuff *bufio.Writer, value int) error { 83 | return writeBUInt(inputBuff, uint32(value)) 84 | } 85 | 86 | func writeBUInt(inputBuff *bufio.Writer, value uint32) error { 87 | var buff = make([]byte, 4) 88 | binary.BigEndian.PutUint32(buff, value) 89 | _, err := inputBuff.Write(buff) 90 | return err 91 | } 92 | 93 | func writeString(inputBuff *bytes.Buffer, value string) { 94 | writeUShort(inputBuff, uint16(len(value))) 95 | inputBuff.Write([]byte(value)) 96 | } 97 | 98 | func writeStringArray(inputBuff *bytes.Buffer, array []string) { 99 | writeInt(inputBuff, len(array)) 100 | for _, s := range array { 101 | writeString(inputBuff, s) 102 | } 103 | } 104 | 105 | func writeMapStringString(inputBuff *bytes.Buffer, mapString map[string]string) { 106 | writeInt(inputBuff, len(mapString)) 107 | for k, v := range mapString { 108 | writeString(inputBuff, k) 109 | writeString(inputBuff, v) 110 | } 111 | } 112 | 113 | func writeBytes(inputBuff *bytes.Buffer, value []byte) { 114 | inputBuff.Write(value) 115 | } 116 | 117 | // writeProtocolHeader protocol utils functions 118 | func writeProtocolHeader(inputBuff *bytes.Buffer, 119 | length int, command uint16, 120 | correlationId ...int) { 121 | writeInt(inputBuff, length) 122 | writeUShort(inputBuff, command) 123 | writeShort(inputBuff, version1) 124 | if len(correlationId) > 0 { 125 | writeInt(inputBuff, correlationId[0]) 126 | } 127 | } 128 | 129 | func writeBProtocolHeader(inputBuff *bufio.Writer, 130 | length int, command int16, 131 | correlationId ...int) error { 132 | return writeBProtocolHeaderVersion(inputBuff, length, command, version1, correlationId...) 133 | } 134 | 135 | func writeBProtocolHeaderVersion(inputBuff *bufio.Writer, length int, command int16, 136 | version int16, correlationId ...int) error { 137 | if err := writeBInt(inputBuff, length); err != nil { 138 | return err 139 | } 140 | if err := writeBShort(inputBuff, command); err != nil { 141 | return err 142 | } 143 | if err := writeBShort(inputBuff, version); err != nil { 144 | return err 145 | } 146 | 147 | if len(correlationId) > 0 { 148 | if err := writeBInt(inputBuff, correlationId[0]); err != nil { 149 | return err 150 | } 151 | } 152 | 153 | return nil 154 | } 155 | 156 | func sizeOfStringArray(array []string) int { 157 | size := 0 158 | for _, s := range array { 159 | size += 2 + len(s) 160 | } 161 | return size 162 | } 163 | 164 | func sizeOfMapStringString(mapString map[string]string) int { 165 | size := 0 166 | for k, v := range mapString { 167 | size += 2 + len(k) + 2 + len(v) 168 | } 169 | return size 170 | } 171 | 172 | func bytesLenghPrefixed(msg []byte) []byte { 173 | size := len(msg) 174 | buff := make([]byte, 4+size) 175 | 176 | binary.BigEndian.PutUint32(buff, uint32(size)) 177 | copy(buff[4:], msg) 178 | 179 | return buff 180 | } 181 | -------------------------------------------------------------------------------- /pkg/stream/converters.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "fmt" 5 | "regexp" 6 | "strconv" 7 | "strings" 8 | 9 | "github.com/pkg/errors" 10 | ) 11 | 12 | const ( 13 | UnitMb = "mb" 14 | UnitKb = "kb" 15 | UnitGb = "gb" 16 | UnitTb = "tb" 17 | kilobytesMultiplier = 1000 18 | megabytesMultiplier = 1000 * 1000 19 | gigabytesMultiplier = 1000 * 1000 * 1000 20 | terabytesMultiplier = 1000 * 1000 * 1000 * 1000 21 | ) 22 | 23 | type ByteCapacity struct { 24 | bytes int64 25 | error error 26 | } 27 | 28 | func (byteCapacity ByteCapacity) B(value int64) *ByteCapacity { 29 | return &ByteCapacity{bytes: value, error: nil} 30 | } 31 | 32 | func (byteCapacity ByteCapacity) KB(value int64) *ByteCapacity { 33 | return &ByteCapacity{bytes: value * kilobytesMultiplier, error: nil} 34 | } 35 | 36 | func (byteCapacity ByteCapacity) MB(value int64) *ByteCapacity { 37 | return &ByteCapacity{bytes: value * megabytesMultiplier, error: nil} 38 | } 39 | 40 | func (byteCapacity ByteCapacity) GB(value int64) *ByteCapacity { 41 | return &ByteCapacity{bytes: value * gigabytesMultiplier, error: nil} 42 | } 43 | func (byteCapacity ByteCapacity) TB(value int64) *ByteCapacity { 44 | return &ByteCapacity{bytes: value * terabytesMultiplier, error: nil} 45 | } 46 | 47 | func (byteCapacity ByteCapacity) From(value string) *ByteCapacity { 48 | if value == "" || value == "0" { 49 | return &ByteCapacity{bytes: 0, error: nil} 50 | } 51 | 52 | match := regexp.MustCompile("^((kb|mb|gb|tb))") 53 | foundUnitSize := strings.ToLower(value[len(value)-2:]) 54 | 55 | if match.MatchString(foundUnitSize) { 56 | size, err := strconv.Atoi(value[:len(value)-2]) 57 | if err != nil { 58 | return &ByteCapacity{bytes: 0, error: errors.New(fmt.Sprintf("Capacity, Invalid number format: %s", value))} 59 | } 60 | 61 | switch foundUnitSize { 62 | case UnitKb: 63 | return byteCapacity.KB(int64(size)) 64 | 65 | case UnitMb: 66 | return byteCapacity.MB(int64(size)) 67 | 68 | case UnitGb: 69 | return byteCapacity.GB(int64(size)) 70 | 71 | case UnitTb: 72 | return byteCapacity.TB(int64(size)) 73 | } 74 | } 75 | 76 | return &ByteCapacity{bytes: 0, 77 | error: errors.New(fmt.Sprintf("Capacity, Invalid unit size format: %s", value))} 78 | } 79 | -------------------------------------------------------------------------------- /pkg/stream/converters_test.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "fmt" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | var _ = Describe("Converters", func() { 11 | 12 | It("Converter from number", func() { 13 | Expect(ByteCapacity{}.B(100).bytes).To(Equal(int64(100))) 14 | Expect(ByteCapacity{}.KB(1).bytes).To(Equal(int64(1000))) 15 | Expect(ByteCapacity{}.MB(1).bytes).To(Equal(int64(1000 * 1000))) 16 | Expect(ByteCapacity{}.GB(1).bytes).To(Equal(int64(1000 * 1000 * 1000))) 17 | Expect(ByteCapacity{}.TB(1).bytes).To(Equal(int64(1000 * 1000 * 1000 * 1000))) 18 | }) 19 | 20 | It("Converter from string", func() { 21 | v := ByteCapacity{}.From("1KB") 22 | Expect(v.error).NotTo(HaveOccurred()) 23 | Expect(v.bytes).To(Equal(int64(1000))) 24 | 25 | v = ByteCapacity{}.From("1MB") 26 | Expect(v.error).NotTo(HaveOccurred()) 27 | Expect(v.bytes).To(Equal(int64(1000 * 1000))) 28 | 29 | v = ByteCapacity{}.From("1GB") 30 | Expect(v.error).NotTo(HaveOccurred()) 31 | Expect(v.bytes).To(Equal(int64(1000 * 1000 * 1000))) 32 | 33 | v = ByteCapacity{}.From("1tb") 34 | Expect(v.error).NotTo(HaveOccurred()) 35 | Expect(v.bytes).To(Equal(int64(1000 * 1000 * 1000 * 1000))) 36 | }) 37 | 38 | It("Converter from string logError", func() { 39 | v := ByteCapacity{}.From("10LL") 40 | Expect(fmt.Sprintf("%s", v.error)). 41 | To(ContainSubstring("Invalid unit size format")) 42 | 43 | v = ByteCapacity{}.From("aGB") 44 | Expect(fmt.Sprintf("%s", v.error)). 45 | To(ContainSubstring("Invalid number format")) 46 | 47 | v = ByteCapacity{}.From("") 48 | Expect(v.bytes).To(Equal(int64(0))) 49 | Expect(v.error).To(BeNil()) 50 | 51 | v = ByteCapacity{}.From("0") 52 | Expect(v.bytes).To(Equal(int64(0))) 53 | Expect(v.error).To(BeNil()) 54 | }) 55 | 56 | }) 57 | -------------------------------------------------------------------------------- /pkg/stream/coordinator_test.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "fmt" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | var _ = Describe("Coordinator", func() { 11 | 12 | Describe("Add/Remove Producers", func() { 13 | 14 | var ( 15 | client *Client 16 | ) 17 | BeforeEach(func() { 18 | client = newClient("test-client", nil, nil, nil, defaultSocketCallTimeout) 19 | 20 | }) 21 | AfterEach(func() { 22 | client = nil 23 | 24 | }) 25 | 26 | It("Add/Remove Producers ", func() { 27 | p, err := client.coordinator.NewProducer(nil, nil) 28 | Expect(err).NotTo(HaveOccurred()) 29 | Expect(p.id).To(Equal(uint8(0))) 30 | err = client.coordinator.RemoveProducerById(p.id, Event{ 31 | Command: 0, 32 | Reason: "UNIT_TEST", 33 | Err: nil, 34 | }) 35 | Expect(err).NotTo(HaveOccurred()) 36 | }) 37 | 38 | It("producer not found remove by id ", func() { 39 | err := client.coordinator.RemoveProducerById(200, Event{ 40 | Command: 0, 41 | Reason: "UNIT_TEST", 42 | Err: nil, 43 | }) 44 | Expect(err).To(HaveOccurred()) 45 | }) 46 | 47 | It("massive insert/delete coordinator ", func() { 48 | var producersId []uint8 49 | for range 100 { 50 | p, err := client.coordinator.NewProducer(nil, nil) 51 | producersId = append(producersId, p.id) 52 | Expect(err).NotTo(HaveOccurred()) 53 | } 54 | Expect(client.coordinator.ProducersCount()).To(Equal(100)) 55 | for _, pid := range producersId { 56 | err := client.coordinator.RemoveProducerById(pid, Event{ 57 | Command: 0, 58 | Reason: "UNIT_TEST", 59 | Err: nil, 60 | }) 61 | Expect(err).NotTo(HaveOccurred()) 62 | } 63 | Expect(client.coordinator.ProducersCount()).To(Equal(0)) 64 | }) 65 | 66 | It("Get next publisher id ", func() { 67 | // until reach 255 then start reusing the old 68 | // unused ids 69 | for i := 0; i < 250; i++ { 70 | p, err := client.coordinator.NewProducer(nil, nil) 71 | Expect(err).NotTo(HaveOccurred()) 72 | Expect(p.id).To(Equal(uint8(i))) 73 | err = client.coordinator.RemoveProducerById(p.id, Event{ 74 | Command: 0, 75 | StreamName: "", 76 | Name: "UNIT TEST", 77 | Reason: "", 78 | Err: nil, 79 | }) 80 | Expect(err).NotTo(HaveOccurred()) 81 | } 82 | }) 83 | 84 | It("To many publishers ", func() { 85 | var producersId []uint8 86 | for i := 0; i < 500; i++ { 87 | 88 | p, err := client.coordinator.NewProducer(nil, nil) 89 | if i >= int(^uint8(0)) { 90 | Expect(fmt.Sprintf("%s", err)). 91 | To(ContainSubstring("No more items available")) 92 | } else { 93 | Expect(err).NotTo(HaveOccurred()) 94 | producersId = append(producersId, p.id) 95 | } 96 | } 97 | 98 | // just some random remove, 99 | randomRemove := []uint8{5, 127, 250, 36, 57, 99, 102, 88} 100 | for _, v := range randomRemove { 101 | // remove an producer then recreate it and I must have the 102 | // missing item 103 | err := client.coordinator.RemoveProducerById(v, Event{ 104 | Reason: "UNIT_TEST", 105 | }) 106 | Expect(err).NotTo(HaveOccurred()) 107 | err = client.coordinator.RemoveProducerById(v, Event{ 108 | Reason: "UNIT_TEST", 109 | }) 110 | // raise an logError not found 111 | Expect(err).To(HaveOccurred()) 112 | 113 | p, err := client.coordinator.NewProducer(nil, nil) 114 | Expect(err).NotTo(HaveOccurred()) 115 | Expect(p.id).To(Equal(v)) 116 | } 117 | 118 | for _, pid := range producersId { 119 | err := client.coordinator.RemoveProducerById(pid, Event{ 120 | Reason: "UNIT_TEST", 121 | }) 122 | Expect(err).NotTo(HaveOccurred()) 123 | } 124 | 125 | }) 126 | 127 | }) 128 | 129 | Describe("Add/Remove consumers", func() { 130 | 131 | var ( 132 | client *Client 133 | ) 134 | BeforeEach(func() { 135 | client = newClient("test-client", nil, nil, nil, defaultSocketCallTimeout) 136 | 137 | }) 138 | AfterEach(func() { 139 | client = nil 140 | 141 | }) 142 | 143 | It("Add/Remove consumers ", func() { 144 | p, err := client.coordinator.NewProducer(nil, nil) 145 | Expect(err).NotTo(HaveOccurred()) 146 | Expect(p.id).To(Equal(uint8(0))) 147 | err = client.coordinator.RemoveProducerById(p.id, Event{ 148 | Reason: "UNIT_TEST", 149 | }) 150 | Expect(err).NotTo(HaveOccurred()) 151 | }) 152 | 153 | It("producer not found remove by id ", func() { 154 | err := client.coordinator.RemoveProducerById(200, Event{ 155 | Reason: "UNIT_TEST", 156 | }) 157 | Expect(err).To(HaveOccurred()) 158 | }) 159 | It("consumer not found get by id ", func() { 160 | _, err := client.coordinator.GetConsumerById(200) 161 | Expect(err).To(HaveOccurred()) 162 | }) 163 | 164 | It("massive insert/delete consumers ", func() { 165 | var consumersId []uint8 166 | for range 100 { 167 | p := client.coordinator.NewConsumer(nil, NewConsumerOptions(), nil) 168 | consumersId = append(consumersId, p.ID) 169 | } 170 | Expect(client.coordinator.ConsumersCount()).To(Equal(100)) 171 | for _, pid := range consumersId { 172 | err := client.coordinator.RemoveConsumerById(pid, Event{ 173 | Command: 0, 174 | StreamName: "UNIT_TESTS", 175 | Name: "", 176 | Reason: "UNIT_TEST", 177 | Err: nil, 178 | }) 179 | Expect(err).NotTo(HaveOccurred()) 180 | } 181 | Expect(client.coordinator.ConsumersCount()).To(Equal(0)) 182 | }) 183 | }) 184 | 185 | Describe("Add/Remove Response", func() { 186 | var ( 187 | client *Client 188 | ) 189 | BeforeEach(func() { 190 | client = newClient("test-client", nil, nil, nil, defaultSocketCallTimeout) 191 | 192 | }) 193 | AfterEach(func() { 194 | client = nil 195 | 196 | }) 197 | 198 | It("Add/Remove Response ", func() { 199 | r := client.coordinator.NewResponse(commandUnitTest) 200 | Expect(r.correlationid).ToNot(Equal(0)) 201 | err := client.coordinator.RemoveResponseById(r.correlationid) 202 | Expect(err).NotTo(HaveOccurred()) 203 | }) 204 | It("not found Response by id ", func() { 205 | err := client.coordinator.RemoveResponseById(200) 206 | Expect(err).To(HaveOccurred()) 207 | 208 | err = client.coordinator.RemoveResponseByName("it does not exist") 209 | Expect(err).To(HaveOccurred()) 210 | 211 | _, err = client.coordinator.GetResponseById(255) 212 | Expect(err).To(HaveOccurred()) 213 | 214 | }) 215 | It("massive insert/delete Responses ", func() { 216 | var responsesId []int 217 | for range 100 { 218 | r := client.coordinator.NewResponse(commandUnitTest) 219 | responsesId = append(responsesId, r.correlationid) 220 | } 221 | 222 | for _, pid := range responsesId { 223 | err := client.coordinator.RemoveResponseById(pid) 224 | Expect(err).NotTo(HaveOccurred()) 225 | } 226 | }) 227 | }) 228 | 229 | }) 230 | -------------------------------------------------------------------------------- /pkg/stream/environment_debug.go: -------------------------------------------------------------------------------- 1 | //go:build debug 2 | // +build debug 3 | 4 | package stream 5 | 6 | //type ProducersCoordinator = environmentCoordinator 7 | // 8 | //func (env *Environment) ClientCoordinator() map[string]*ProducersCoordinator { 9 | // return env.producers.producersCoordinator 10 | //} 11 | // 12 | //func (env *Environment) Nodes() []string { 13 | // var result []string 14 | // for s, _ := range env.producers.producersCoordinator { 15 | // result = append(result, s) 16 | // } 17 | // sort.Strings(result) 18 | // return result 19 | //} 20 | // 21 | //func (env *Environment) ProducerPerStream(streamName string) []*Producer { 22 | // var result []*Producer 23 | // for _, p := range env.producers.producersCoordinator { 24 | // for _, client := range p.getClientsPerContext() { 25 | // for _, prod := range client.coordinator.producers { 26 | // if prod.(*Producer).options.streamName == streamName { 27 | // result = append(result, prod.(*Producer)) 28 | // } 29 | // } 30 | // } 31 | // } 32 | // return result 33 | //} 34 | // 35 | //func (env *Environment) ClientsPerStream(streamName string) []*Client { 36 | // var result []*Client 37 | // for _, p := range env.producers.producersCoordinator { 38 | // for _, client := range p.getClientsPerContext() { 39 | // for _, prod := range client.coordinator.producers { 40 | // if prod.(*Producer).options.streamName == streamName { 41 | // result = append(result, client) 42 | // } 43 | // } 44 | // } 45 | // } 46 | // return result 47 | //} 48 | // 49 | //func (env *Environment) Coordinators() []*Coordinator { 50 | // var result []*Coordinator 51 | // for _, p := range env.producers.producersCoordinator { 52 | // for _, client := range p.getClientsPerContext() { 53 | // result = append(result, client.coordinator) 54 | // } 55 | // } 56 | // return result 57 | //} 58 | -------------------------------------------------------------------------------- /pkg/stream/exchange_commands.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | type commandVersion interface { 4 | GetMinVersion() uint16 5 | GetMaxVersion() uint16 6 | GetCommandKey() uint16 7 | } 8 | 9 | type commandVersionResponse struct { 10 | minVersion uint16 11 | maxVersion uint16 12 | commandKey uint16 13 | } 14 | 15 | func (c commandVersionResponse) GetMinVersion() uint16 { 16 | return c.minVersion 17 | } 18 | 19 | func (c commandVersionResponse) GetMaxVersion() uint16 { 20 | return c.maxVersion 21 | } 22 | 23 | func (c commandVersionResponse) GetCommandKey() uint16 { 24 | return c.commandKey 25 | } 26 | 27 | func newCommandVersionResponse(minVersion, maxVersion, commandKey uint16) commandVersionResponse { 28 | return commandVersionResponse{ 29 | minVersion: minVersion, 30 | maxVersion: maxVersion, 31 | commandKey: commandKey, 32 | } 33 | } 34 | 35 | type PublishFilter struct { 36 | } 37 | 38 | func (p PublishFilter) GetMinVersion() uint16 { 39 | return version1 40 | } 41 | 42 | func (p PublishFilter) GetMaxVersion() uint16 { 43 | return version2 44 | } 45 | 46 | func (p PublishFilter) GetCommandKey() uint16 { 47 | return commandPublish 48 | } 49 | -------------------------------------------------------------------------------- /pkg/stream/listeners.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | type Event struct { 4 | Command uint16 5 | StreamName string 6 | Name string 7 | Reason string 8 | Err error 9 | } 10 | 11 | type ChannelClose = <-chan Event 12 | type ChannelPublishConfirm chan []*ConfirmationStatus 13 | -------------------------------------------------------------------------------- /pkg/stream/producer_unconfirmed.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/logs" 8 | ) 9 | 10 | // unConfirmed is a structure that holds unconfirmed messages 11 | // And unconfirmed message is a message that has been sent to the broker but not yet confirmed, 12 | // and it is added to the unConfirmed structure as soon is possible when 13 | // 14 | // the Send() or BatchSend() method is called 15 | // 16 | // The confirmation status is updated when the confirmation is received from the broker (see server_frame.go) 17 | // or due of timeout. The Timeout is configurable, and it is calculated client side. 18 | type unConfirmed struct { 19 | messages map[int64]*ConfirmationStatus 20 | mutexMessageMap sync.RWMutex 21 | maxSize int 22 | blockSignal *sync.Cond 23 | } 24 | 25 | func newUnConfirmed(maxSize int) *unConfirmed { 26 | r := &unConfirmed{ 27 | messages: make(map[int64]*ConfirmationStatus, maxSize), 28 | mutexMessageMap: sync.RWMutex{}, 29 | maxSize: maxSize, 30 | blockSignal: sync.NewCond(&sync.Mutex{}), 31 | } 32 | return r 33 | } 34 | 35 | func (u *unConfirmed) addFromSequences(messages []*messageSequence, producerID uint8) { 36 | if u.size() > u.maxSize { 37 | u.blockSignal.L.Lock() 38 | u.blockSignal.Wait() 39 | u.blockSignal.L.Unlock() 40 | } 41 | 42 | u.mutexMessageMap.Lock() 43 | for _, msgSeq := range messages { 44 | u.messages[msgSeq.publishingId] = &ConfirmationStatus{ 45 | inserted: time.Now(), 46 | message: msgSeq.sourceMsg, 47 | producerID: producerID, 48 | publishingId: msgSeq.publishingId, 49 | confirmed: false, 50 | } 51 | } 52 | u.mutexMessageMap.Unlock() 53 | } 54 | 55 | func (u *unConfirmed) link(from int64, to int64) { 56 | u.mutexMessageMap.Lock() 57 | defer u.mutexMessageMap.Unlock() 58 | r := u.messages[from] 59 | if r != nil { 60 | r.linkedTo = append(r.linkedTo, u.messages[to]) 61 | } 62 | } 63 | 64 | func (u *unConfirmed) extractWithConfirms(ids []int64) []*ConfirmationStatus { 65 | u.mutexMessageMap.Lock() 66 | defer u.mutexMessageMap.Unlock() 67 | 68 | res := make([]*ConfirmationStatus, 0, len(ids)) 69 | for _, v := range ids { 70 | m := u.extract(v, 0, true) 71 | if m != nil { 72 | res = append(res, m) 73 | if m.linkedTo != nil { 74 | res = append(res, m.linkedTo...) 75 | } 76 | } 77 | } 78 | u.maybeUnLock() 79 | return res 80 | } 81 | 82 | func (u *unConfirmed) extractWithError(id int64, errorCode uint16) *ConfirmationStatus { 83 | u.mutexMessageMap.Lock() 84 | defer u.mutexMessageMap.Unlock() 85 | cs := u.extract(id, errorCode, false) 86 | u.maybeUnLock() 87 | return cs 88 | } 89 | 90 | func (u *unConfirmed) extract(id int64, errorCode uint16, confirmed bool) *ConfirmationStatus { 91 | rootMessage := u.messages[id] 92 | if rootMessage != nil { 93 | u.updateStatus(rootMessage, errorCode, confirmed) 94 | 95 | for _, linkedMessage := range rootMessage.linkedTo { 96 | u.updateStatus(linkedMessage, errorCode, confirmed) 97 | delete(u.messages, linkedMessage.publishingId) 98 | } 99 | delete(u.messages, id) 100 | } 101 | return rootMessage 102 | } 103 | 104 | func (u *unConfirmed) updateStatus(rootMessage *ConfirmationStatus, errorCode uint16, confirmed bool) { 105 | rootMessage.confirmed = confirmed 106 | if confirmed { 107 | return 108 | } 109 | rootMessage.errorCode = errorCode 110 | rootMessage.err = lookErrorCode(errorCode) 111 | } 112 | 113 | func (u *unConfirmed) extractWithTimeOut(timeout time.Duration) []*ConfirmationStatus { 114 | u.mutexMessageMap.Lock() 115 | defer u.mutexMessageMap.Unlock() 116 | var res []*ConfirmationStatus 117 | for _, v := range u.messages { 118 | if time.Since(v.inserted) > timeout { 119 | v := u.extract(v.publishingId, timeoutError, false) 120 | res = append(res, v) 121 | } 122 | } 123 | u.maybeUnLock() 124 | return res 125 | } 126 | 127 | func (u *unConfirmed) size() int { 128 | u.mutexMessageMap.Lock() 129 | defer u.mutexMessageMap.Unlock() 130 | return len(u.messages) 131 | } 132 | 133 | func (u *unConfirmed) maybeUnLock() { 134 | if len(u.messages) < u.maxSize { 135 | logs.LogDebug("unConfirmed size: %d back to normal, producer unblocked", u.maxSize) 136 | u.blockSignal.L.Lock() 137 | u.blockSignal.Broadcast() 138 | u.blockSignal.L.Unlock() 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /pkg/stream/socket.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "net" 7 | "sync" 8 | 9 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/logs" 10 | ) 11 | 12 | type socket struct { 13 | connection net.Conn 14 | writer *bufio.Writer 15 | mutex *sync.Mutex 16 | closed int32 17 | destructor *sync.Once 18 | } 19 | 20 | func (sck *socket) setOpen() { 21 | sck.mutex.Lock() 22 | defer sck.mutex.Unlock() 23 | sck.closed = 1 24 | } 25 | 26 | func (sck *socket) isOpen() bool { 27 | sck.mutex.Lock() 28 | defer sck.mutex.Unlock() 29 | return sck.closed == 1 30 | } 31 | func (sck *socket) shutdown(_ error) { 32 | if !sck.isOpen() { 33 | return 34 | } 35 | sck.mutex.Lock() 36 | sck.closed = 0 37 | sck.mutex.Unlock() 38 | 39 | sck.destructor.Do(func() { 40 | sck.mutex.Lock() 41 | defer sck.mutex.Unlock() 42 | err := sck.connection.Close() 43 | if err != nil { 44 | logs.LogWarn("error during close socket: %s", err) 45 | } 46 | }) 47 | } 48 | 49 | func (sck *socket) writeAndFlush(buffer []byte) error { 50 | sck.mutex.Lock() 51 | defer sck.mutex.Unlock() 52 | _, err := sck.writer.Write(buffer) 53 | if err != nil { 54 | return err 55 | } 56 | err = sck.writer.Flush() 57 | if err != nil { 58 | return err 59 | } 60 | 61 | return nil 62 | } 63 | 64 | func (c *Client) handleWrite(buffer []byte, response *Response) responseError { 65 | return c.handleWriteWithResponse(buffer, response, true) 66 | } 67 | 68 | func (c *Client) handleWriteWithResponse(buffer []byte, response *Response, removeResponse bool) responseError { 69 | result := c.socket.writeAndFlush(buffer) 70 | resultCode := waitCodeWithTimeOut(response, c.socketCallTimeout) 71 | /// we need to remove the response before evaluate the 72 | // buffer errSocket 73 | if removeResponse { 74 | result = c.coordinator.RemoveResponseById(response.correlationid) 75 | } 76 | 77 | if result != nil { 78 | // we just log 79 | fmt.Printf("Error handleWrite %s", result) 80 | } 81 | 82 | return resultCode 83 | } 84 | -------------------------------------------------------------------------------- /pkg/stream/stream_options.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | type StreamOptions struct { 9 | MaxAge time.Duration 10 | MaxLengthBytes *ByteCapacity 11 | MaxSegmentSizeBytes *ByteCapacity 12 | } 13 | 14 | func (s *StreamOptions) SetMaxAge(maxAge time.Duration) *StreamOptions { 15 | s.MaxAge = maxAge 16 | return s 17 | } 18 | 19 | func (s *StreamOptions) SetMaxLengthBytes(maxLength *ByteCapacity) *StreamOptions { 20 | s.MaxLengthBytes = maxLength 21 | return s 22 | } 23 | 24 | func (s *StreamOptions) SetMaxSegmentSizeBytes(segmentSize *ByteCapacity) *StreamOptions { 25 | s.MaxSegmentSizeBytes = segmentSize 26 | return s 27 | } 28 | 29 | func (s StreamOptions) buildParameters() (map[string]string, error) { 30 | res := map[string]string{"queue-leader-locator": "least-leaders"} 31 | 32 | if s.MaxLengthBytes != nil { 33 | if s.MaxLengthBytes.error != nil { 34 | return nil, s.MaxLengthBytes.error 35 | } 36 | 37 | if s.MaxLengthBytes.bytes > 0 { 38 | res["max-length-bytes"] = fmt.Sprintf("%d", s.MaxLengthBytes.bytes) 39 | } 40 | } 41 | 42 | if s.MaxSegmentSizeBytes != nil { 43 | if s.MaxSegmentSizeBytes.error != nil { 44 | return nil, s.MaxSegmentSizeBytes.error 45 | } 46 | 47 | if s.MaxSegmentSizeBytes.bytes > 0 { 48 | res["stream-max-segment-size-bytes"] = fmt.Sprintf("%d", s.MaxSegmentSizeBytes.bytes) 49 | } 50 | } 51 | 52 | if s.MaxAge > 0 { 53 | res["max-age"] = fmt.Sprintf("%.0fs", s.MaxAge.Seconds()) 54 | } 55 | return res, nil 56 | } 57 | 58 | func NewStreamOptions() *StreamOptions { 59 | return &StreamOptions{} 60 | } 61 | -------------------------------------------------------------------------------- /pkg/stream/stream_stats.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import "fmt" 4 | 5 | type StreamStats struct { 6 | stats map[string]int64 7 | streamName string 8 | } 9 | 10 | func newStreamStats(stats map[string]int64, streamName string) *StreamStats { 11 | return &StreamStats{stats: stats, streamName: streamName} 12 | } 13 | 14 | // FirstOffset - The first offset in the stream. 15 | // return first offset in the stream / 16 | // Error if there is no first offset yet 17 | func (s *StreamStats) FirstOffset() (int64, error) { 18 | if s.stats["first_chunk_id"] == -1 { 19 | return -1, fmt.Errorf("FirstOffset not found for %s", s.streamName) 20 | } 21 | return s.stats["first_chunk_id"], nil 22 | } 23 | 24 | // Deprecated: The method name may be misleading. 25 | // It does not indicate the last offset of the stream. It indicates the last uncommitted chunk id. This information is not necessary. The user should use CommittedChunkId(). 26 | func (s *StreamStats) LastOffset() (int64, error) { 27 | if s.stats["last_chunk_id"] == -1 { 28 | return -1, fmt.Errorf("LastOffset not found for %s", s.streamName) 29 | } 30 | return s.stats["last_chunk_id"], nil 31 | } 32 | 33 | // CommittedChunkId - The ID (offset) of the committed chunk (block of messages) in the stream. 34 | // 35 | // It is the offset of the first message in the last chunk confirmed by a quorum of the stream 36 | // cluster members (leader and replicas). 37 | // 38 | // The committed chunk ID is a good indication of what the last offset of a stream can be at a 39 | // given time. The value can be stale as soon as the application reads it though, as the committed 40 | // chunk ID for a stream that is published to changes all the time. 41 | // 42 | // return committed offset in this stream 43 | // Error if there is no committed chunk yet 44 | func (s *StreamStats) CommittedChunkId() (int64, error) { 45 | if s.stats["committed_chunk_id"] == -1 { 46 | return -1, fmt.Errorf("CommittedChunkId not found for %s", s.streamName) 47 | } 48 | return s.stats["committed_chunk_id"], nil 49 | } 50 | -------------------------------------------------------------------------------- /pkg/stream/stream_suite_test.go: -------------------------------------------------------------------------------- 1 | package stream_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "testing" 8 | 9 | . "github.com/onsi/ginkgo/v2" 10 | . "github.com/onsi/gomega" 11 | ) 12 | 13 | const testVhost = "rabbitmq-streams-go-test" 14 | 15 | func TestStream(t *testing.T) { 16 | defer GinkgoRecover() 17 | RegisterFailHandler(Fail) 18 | RunSpecs(t, "Go-streaming-client") 19 | } 20 | 21 | var _ = SynchronizedBeforeSuite(func() []byte { 22 | err := createVhost(testVhost) 23 | Expect(err).NotTo(HaveOccurred()) 24 | return nil 25 | }, func(_ []byte) {}) 26 | 27 | var _ = SynchronizedAfterSuite(func() {}, func() { 28 | Expect(deleteVhost(testVhost)).NotTo(HaveOccurred()) 29 | }) 30 | 31 | func createVhost(vhost string) error { 32 | return httpCall("PUT", vhostUrl(vhost)) 33 | } 34 | 35 | func deleteVhost(vhost string) error { 36 | return httpCall("DELETE", vhostUrl(vhost)) 37 | } 38 | 39 | func vhostUrl(vhost string) string { 40 | return fmt.Sprintf("http://guest:guest@localhost:15672/api/vhosts/%s", vhost) 41 | } 42 | 43 | func httpCall(method, url string) error { 44 | ctx := context.Background() 45 | req, err := http.NewRequestWithContext(ctx, method, url, nil) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | resp, err := http.DefaultClient.Do(req) 51 | if err != nil { 52 | return err 53 | } 54 | err = resp.Body.Close() 55 | if err != nil { 56 | return err 57 | } 58 | 59 | if resp.StatusCode < 200 || resp.StatusCode >= 300 { 60 | return fmt.Errorf("http error (%d): %s", resp.StatusCode, resp.Status) 61 | } 62 | return nil 63 | } 64 | -------------------------------------------------------------------------------- /pkg/stream/stream_test.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "strconv" 5 | 6 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" 7 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/message" 8 | ) 9 | 10 | const MessageBufferTooBig = 1148001 11 | const MessageBufferBigButLessTheFrame = 1048400 12 | 13 | func CreateArrayMessagesForTesting(numberOfMessages int) []message.StreamMessage { 14 | return CreateArrayMessagesForTestingWithPrefix("test_", numberOfMessages) 15 | } 16 | func CreateArrayMessagesForTestingWithPrefix(prefix string, numberOfMessages int) []message.StreamMessage { 17 | arr := make([]message.StreamMessage, numberOfMessages) 18 | for i := range numberOfMessages { 19 | arr[i] = CreateMessageForTesting(prefix, i) 20 | } 21 | return arr 22 | } 23 | func CreateMessageForTesting(prefix string, index int) message.StreamMessage { 24 | return amqp.NewMessage([]byte(prefix + strconv.Itoa(index))) 25 | } 26 | -------------------------------------------------------------------------------- /pkg/stream/super_stream.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | // public TimeSpan MaxAge 9 | //{ 10 | // set => Args["max-age"] = $"{value.TotalSeconds}s"; 11 | //} 12 | // 13 | // public ulong MaxLengthBytes 14 | //{ 15 | // set => Args["max-length-bytes"] = $"{value}"; 16 | //} 17 | // 18 | // public LeaderLocator LeaderLocator 19 | //{ 20 | // set => Args["queue-leader-locator"] = $"{value.ToString()}"; 21 | //} 22 | // 23 | // public int MaxSegmentSizeBytes 24 | //{ 25 | // set => Args["stream-max-segment-size-bytes"] = $"{value}"; 26 | //} 27 | 28 | const maxAge = "max-age" 29 | const maxLengthBytes = "max-length-bytes" 30 | const queueLeaderLocator = "queue-leader-locator" 31 | const streamMaxSegmentSizeBytes = "stream-max-segment-size-bytes" 32 | 33 | type SuperStreamOptions interface { 34 | getPartitions(prefix string) []string 35 | getBindingKeys() []string 36 | getArgs() map[string]string 37 | } 38 | 39 | type PartitionsOptions struct { 40 | Partitions int 41 | MaxAge time.Duration 42 | MaxLengthBytes *ByteCapacity 43 | MaxSegmentSizeBytes *ByteCapacity 44 | LeaderLocator string 45 | args map[string]string 46 | } 47 | 48 | func NewPartitionsOptions(partitions int) *PartitionsOptions { 49 | return &PartitionsOptions{ 50 | Partitions: partitions, 51 | args: make(map[string]string), 52 | } 53 | } 54 | 55 | func (t *PartitionsOptions) SetMaxAge(maxAge time.Duration) *PartitionsOptions { 56 | t.MaxAge = maxAge 57 | return t 58 | } 59 | 60 | func (t *PartitionsOptions) SetMaxLengthBytes(maxLengthBytes *ByteCapacity) *PartitionsOptions { 61 | t.MaxLengthBytes = maxLengthBytes 62 | return t 63 | } 64 | 65 | func (t *PartitionsOptions) SetMaxSegmentSizeBytes(maxSegmentSizeBytes *ByteCapacity) *PartitionsOptions { 66 | t.MaxSegmentSizeBytes = maxSegmentSizeBytes 67 | return t 68 | } 69 | 70 | func (t *PartitionsOptions) SetBalancedLeaderLocator() *PartitionsOptions { 71 | t.LeaderLocator = LeaderLocatorBalanced 72 | return t 73 | } 74 | 75 | func (t *PartitionsOptions) SetClientLocalLocator() *PartitionsOptions { 76 | t.LeaderLocator = LeaderLocatorClientLocal 77 | return t 78 | } 79 | 80 | func (t *PartitionsOptions) getPartitions(prefix string) []string { 81 | partitions := make([]string, t.Partitions) 82 | for i := range t.Partitions { 83 | partitions[i] = fmt.Sprintf("%s-%d", prefix, i) 84 | } 85 | return partitions 86 | } 87 | 88 | func (t *PartitionsOptions) getBindingKeys() []string { 89 | var bindingKeys []string 90 | for i := 0; i < t.Partitions; i++ { 91 | bindingKeys = append(bindingKeys, fmt.Sprintf("%d", i)) 92 | } 93 | return bindingKeys 94 | } 95 | 96 | func (t *PartitionsOptions) getArgs() map[string]string { 97 | if t.MaxAge > 0 { 98 | t.args[maxAge] = fmt.Sprintf("%ds", int(t.MaxAge.Seconds())) 99 | } 100 | if t.MaxLengthBytes != nil { 101 | t.args[maxLengthBytes] = fmt.Sprintf("%d", t.MaxLengthBytes.bytes) 102 | } 103 | if t.MaxSegmentSizeBytes != nil { 104 | t.args[streamMaxSegmentSizeBytes] = fmt.Sprintf("%d", t.MaxSegmentSizeBytes.bytes) 105 | } 106 | if t.LeaderLocator != "" { 107 | t.args[queueLeaderLocator] = t.LeaderLocator 108 | } 109 | return t.args 110 | } 111 | 112 | type BindingsOptions struct { 113 | Bindings []string 114 | MaxAge time.Duration 115 | MaxLengthBytes *ByteCapacity 116 | MaxSegmentSizeBytes *ByteCapacity 117 | LeaderLocator string 118 | args map[string]string 119 | } 120 | 121 | func NewBindingsOptions(bindings []string) *BindingsOptions { 122 | return &BindingsOptions{ 123 | Bindings: bindings, 124 | args: make(map[string]string), 125 | } 126 | } 127 | 128 | func (t *BindingsOptions) SetMaxAge(maxAge time.Duration) *BindingsOptions { 129 | t.MaxAge = maxAge 130 | return t 131 | } 132 | 133 | func (t *BindingsOptions) SetMaxLengthBytes(maxLengthBytes *ByteCapacity) *BindingsOptions { 134 | t.MaxLengthBytes = maxLengthBytes 135 | return t 136 | } 137 | 138 | func (t *BindingsOptions) SetMaxSegmentSizeBytes(maxSegmentSizeBytes *ByteCapacity) *BindingsOptions { 139 | t.MaxSegmentSizeBytes = maxSegmentSizeBytes 140 | return t 141 | } 142 | 143 | func (t *BindingsOptions) SetBalancedLeaderLocator() *BindingsOptions { 144 | t.LeaderLocator = LeaderLocatorBalanced 145 | return t 146 | } 147 | 148 | func (t *BindingsOptions) SetClientLocalLocator() *BindingsOptions { 149 | t.LeaderLocator = LeaderLocatorClientLocal 150 | return t 151 | } 152 | 153 | func (t *BindingsOptions) getPartitions(prefix string) []string { 154 | partitions := make([]string, len(t.Bindings)) 155 | for i, bindingKey := range t.Bindings { 156 | partitions[i] = fmt.Sprintf("%s-%s", prefix, bindingKey) 157 | } 158 | return partitions 159 | } 160 | 161 | func (t *BindingsOptions) getBindingKeys() []string { 162 | return t.Bindings 163 | } 164 | 165 | func (t *BindingsOptions) getArgs() map[string]string { 166 | if t.MaxAge > 0 { 167 | t.args[maxAge] = fmt.Sprintf("%ds", int(t.MaxAge.Seconds())) 168 | } 169 | if t.MaxLengthBytes != nil { 170 | t.args[maxLengthBytes] = fmt.Sprintf("%d", t.MaxLengthBytes.bytes) 171 | } 172 | if t.MaxSegmentSizeBytes != nil { 173 | t.args[streamMaxSegmentSizeBytes] = fmt.Sprintf("%d", t.MaxSegmentSizeBytes.bytes) 174 | } 175 | if t.LeaderLocator != "" { 176 | t.args[queueLeaderLocator] = t.LeaderLocator 177 | } 178 | return t.args 179 | } 180 | -------------------------------------------------------------------------------- /pkg/stream/utils.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "time" 7 | 8 | "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/logs" 9 | ) 10 | 11 | type responseError struct { 12 | Err error 13 | isTimeout bool 14 | } 15 | 16 | func newResponseError(err error, timeout bool) responseError { 17 | return responseError{ 18 | Err: err, 19 | isTimeout: timeout, 20 | } 21 | } 22 | 23 | func uShortExtractResponseCode(code uint16) uint16 { 24 | return code & 0b0111_1111_1111_1111 25 | } 26 | 27 | // func UIntExtractResponseCode(code int32) int32 { 28 | // return code & 0b0111_1111_1111_1111 29 | //} 30 | 31 | func uShortEncodeResponseCode(code uint16) uint16 { 32 | return code | 0b1000_0000_0000_0000 33 | } 34 | 35 | func waitCodeWithDefaultTimeOut(response *Response) responseError { 36 | return waitCodeWithTimeOut(response, defaultSocketCallTimeout) 37 | } 38 | func waitCodeWithTimeOut(response *Response, timeout time.Duration) responseError { 39 | select { 40 | case code := <-response.code: 41 | if code.id != responseCodeOk { 42 | return newResponseError(lookErrorCode(code.id), false) 43 | } 44 | return newResponseError(nil, false) 45 | case <-time.After(timeout): 46 | logs.LogError("timeout %d ns - waiting Code, operation: %s", timeout.Milliseconds(), response.commandDescription) 47 | 48 | return newResponseError( 49 | fmt.Errorf("timeout %d ms - waiting Code, operation: %s ", 50 | timeout.Milliseconds(), response.commandDescription), true) 51 | } 52 | } 53 | 54 | func SetLevelInfo(value int8) { 55 | logs.LogLevel = value 56 | } 57 | 58 | func containsOnlySpaces(input string) bool { 59 | return len(input) > 0 && len(strings.TrimSpace(input)) == 0 60 | } 61 | -------------------------------------------------------------------------------- /pkg/stream/utils_test.go: -------------------------------------------------------------------------------- 1 | package stream 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | ) 10 | 11 | var _ = Describe("Utils", func() { 12 | 13 | It("Timeout calls No Error", func() { 14 | response := newResponse(lookUpCommand(commandUnitTest)) 15 | response.correlationid = 9 16 | var wg sync.WaitGroup 17 | wg.Add(1) 18 | go func(res *Response) { 19 | defer GinkgoRecover() 20 | err := waitCodeWithDefaultTimeOut(res) 21 | Expect(err.Err).ToNot(HaveOccurred()) 22 | wg.Done() 23 | }(response) 24 | time.Sleep(200 * time.Millisecond) 25 | response.code <- Code{ 26 | id: responseCodeOk, 27 | } 28 | 29 | wg.Wait() 30 | }) 31 | 32 | It("Timeout calls No Error", func() { 33 | response := newResponse(lookUpCommand(commandUnitTest)) 34 | response.correlationid = 9 35 | var wg sync.WaitGroup 36 | wg.Add(1) 37 | go func(res *Response) { 38 | defer GinkgoRecover() 39 | err := waitCodeWithDefaultTimeOut(res) 40 | Expect(err.Err).To(HaveOccurred()) 41 | wg.Done() 42 | }(response) 43 | 44 | wg.Wait() 45 | }) 46 | 47 | }) 48 | -------------------------------------------------------------------------------- /pkg/test-helper/http_utils.go: -------------------------------------------------------------------------------- 1 | package test_helper 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "io" 7 | "net/http" 8 | "strconv" 9 | 10 | "github.com/pkg/errors" 11 | ) 12 | 13 | type client_properties struct { 14 | Connection_name string `json:"connection_name"` 15 | } 16 | 17 | type connection struct { 18 | Name string `json:"name"` 19 | ClientProperties client_properties `json:"client_properties"` 20 | } 21 | 22 | func Connections(port string) ([]connection, error) { 23 | bodyString, err := httpGet("http://localhost:"+port+"/api/connections/", "guest", "guest") 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | var data []connection 29 | err = json.Unmarshal([]byte(bodyString), &data) 30 | if err != nil { 31 | return nil, err 32 | } 33 | return data, nil 34 | } 35 | 36 | func DropConnectionClientProvidedName(clientProvidedName string, port string) error { 37 | connections, err := Connections(port) 38 | if err != nil { 39 | return err 40 | } 41 | connectionToDrop := "" 42 | for _, connection := range connections { 43 | if connection.ClientProperties.Connection_name == clientProvidedName { 44 | connectionToDrop = connection.Name 45 | break 46 | } 47 | } 48 | 49 | if connectionToDrop == "" { 50 | return errors.New("connection not found") 51 | } 52 | 53 | err = DropConnection(connectionToDrop, port) 54 | if err != nil { 55 | return err 56 | } 57 | 58 | return nil 59 | } 60 | 61 | func DropConnection(name string, port string) error { 62 | _, err := httpDelete("http://localhost:"+port+"/api/connections/"+name, "guest", "guest") 63 | if err != nil { 64 | return err 65 | } 66 | 67 | return nil 68 | } 69 | func httpGet(url, username, password string) (string, error) { 70 | return baseCall(url, username, password, "GET") 71 | } 72 | 73 | func httpDelete(url, username, password string) (string, error) { 74 | return baseCall(url, username, password, "DELETE") 75 | } 76 | 77 | func baseCall(url, username, password string, method string) (string, error) { 78 | ctx := context.Background() 79 | var client http.Client 80 | req, err := http.NewRequestWithContext(ctx, method, url, nil) 81 | if err != nil { 82 | return "", err 83 | } 84 | req.SetBasicAuth(username, password) 85 | 86 | resp, err3 := client.Do(req) 87 | 88 | if err3 != nil { 89 | return "", err3 90 | } 91 | 92 | //nolint:errcheck 93 | defer resp.Body.Close() 94 | 95 | if resp.StatusCode == 200 { // OK 96 | bodyBytes, err2 := io.ReadAll(resp.Body) 97 | if err2 != nil { 98 | return "", err2 99 | } 100 | return string(bodyBytes), nil 101 | } 102 | 103 | if resp.StatusCode == 204 { // No Content 104 | return "", nil 105 | } 106 | 107 | return "", errors.New(strconv.Itoa(resp.StatusCode)) 108 | } 109 | --------------------------------------------------------------------------------