├── .github └── workflows │ └── docker-image.yml ├── .gitignore ├── README.md ├── client_caos └── Caos │ ├── .gitignore │ ├── Caos.cs │ ├── Caos.csproj │ ├── CloseThreadsTest.cs │ ├── DeduplicationTest.cs │ ├── Dockerfile │ ├── ForceCloseTest.cs │ ├── ProducerConsumer.cs │ ├── RabbitMQStream.cs │ ├── SuperStreamRaw.cs │ ├── TestBase.cs │ ├── deploy │ └── deploy.yaml ├── command_line_tools ├── README.md ├── add_test └── command_line_tools ├── examples ├── client_sni │ ├── .gitignore │ └── rabbitmq │ │ ├── .gitignore │ │ ├── pom.xml │ │ ├── rabbitmq.jar │ │ └── src │ │ ├── main │ │ └── java │ │ │ └── gas │ │ │ └── App.java │ │ └── test │ │ └── java │ │ └── gas │ │ └── AppTest.java ├── java_cert │ ├── .gitignore │ ├── README.md │ ├── build │ ├── conf │ │ ├── enabled_plugins │ │ └── rabbitmq.conf │ ├── docker-compose.template │ ├── pom.xml │ └── src │ │ └── main │ │ └── java │ │ └── rabbitmq │ │ └── sasl │ │ └── example │ │ └── TLSConnection.java ├── mqtt_over_ws │ └── python_ws.py ├── python │ ├── pika_connections.py │ ├── pika_handle_errors.py │ ├── pika_handle_errors_dlx.py │ ├── pika_memory_limit.py │ ├── pika_produce_consume.py │ ├── pika_produce_consume_access_control.py │ ├── pika_publish_subscribe.py │ ├── pump.py │ └── stream.cs └── rabbitmq_docker_ssl │ ├── README.md │ ├── conf │ ├── enabled_plugins │ └── rabbitmq.conf │ ├── docker-compose.yaml │ ├── pom.xml │ └── src │ └── main │ └── java │ └── TLSConnection.java ├── http_utils ├── README.md ├── close_allconnections.py └── remove_all_queues.py ├── k8s ├── helm │ ├── dashboards │ │ └── rabbitmq-monitoring.json │ ├── rabbimq_ha │ │ ├── dashboards │ │ │ └── rabbitmq-monitoring.json │ │ ├── delete_exporter │ │ ├── delete_prometheus_operator │ │ ├── delete_rabbitmq_ha │ │ ├── img │ │ │ ├── dashoard.png │ │ │ └── rmq_clsuter.png │ │ ├── install_exporter │ │ ├── install_prometheus_operator │ │ ├── install_rabbitmq_ha │ │ ├── rabbitmq-ha_values.yaml │ │ ├── rabbitmq_exporter_values.yaml │ │ └── values.yaml │ ├── rabbitmq3.8 │ │ ├── 1_install_rabbitmq_ha │ │ ├── 2_describe_endpoint │ │ ├── 3_install_rabbitmq_ha │ │ ├── 4_scale_rabbitmq │ │ ├── 5_install_prometheus │ │ ├── 6_install_grafana │ │ ├── README.md │ │ ├── delete_grafana │ │ ├── delete_prometheus │ │ ├── delete_rabbitmq_ha │ │ ├── export_graf │ │ ├── export_mgm │ │ ├── export_prom │ │ ├── grafana_values.yaml │ │ ├── img │ │ │ ├── prom_rabbitmq_targets.png │ │ │ └── rabbitmq_grafana.png │ │ ├── load_kind_images │ │ ├── prometheus_values.yaml │ │ ├── rabbitmq-ha_values.yaml │ │ ├── rmq_definition.json │ │ └── run_test │ └── rabbitmq_asas │ │ ├── c │ │ ├── dashboards │ │ └── rabbitmq-monitoring.json │ │ ├── delete_exporter │ │ ├── delete_grafana │ │ ├── delete_prometheus │ │ ├── delete_rabbitmq_ha │ │ ├── grafana_values.yaml │ │ ├── install_exporter │ │ ├── install_grafana │ │ ├── install_prometheus │ │ ├── install_rabbitmq_ha │ │ ├── prometheus_values.yaml │ │ ├── rabbitmq-exporter_values.yaml │ │ └── rabbitmq-ha_values.yaml └── kind │ ├── dashboard │ ├── admin-role-binding.yml │ └── dashboard-adminuser.yaml │ ├── get_token │ └── setup ├── microservices_demo ├── 1_rabbitmq.py ├── 2_kafka.py └── common.py ├── openstack ├── mandatory_test │ ├── README.md │ ├── mandatory_client_fail.py │ └── mandatory_test.py ├── oslo-example │ └── oslo.messaging.example.py ├── pika-examples │ ├── pika-example.py │ └── py_pika_publish.py ├── py-amqp-examples │ ├── create_queues.py │ ├── py-amqp-example.py │ └── py_amqp_publish.py └── tox-func-rabbit │ ├── Dockerfile │ └── README.md ├── rabbitmq-suse ├── leap_15_ipv6 │ └── Vagrantfile └── vagrant_cluster │ ├── .gitignore │ ├── README.md │ ├── Vagrantfile │ └── img │ └── cluster.png ├── rabbitmq_stream ├── AMQP │ ├── nodejs │ │ ├── .gitignore │ │ ├── README.md │ │ ├── package-lock.json │ │ ├── package.json │ │ ├── receive_stream.js │ │ └── send_stream.js │ └── python │ │ └── stream │ │ ├── .gitignore │ │ ├── README.md │ │ ├── client.py │ │ ├── receive_stream.py │ │ ├── send_stream.py │ │ └── server.py └── README.md ├── raft_kv ├── .gitignore ├── LICENSE ├── README.md ├── rebar.config ├── rebar.lock └── src │ ├── raft_kv.app.src │ ├── raft_kv.erl │ ├── raft_kv_app.erl │ ├── raft_kv_http_rest.erl │ ├── raft_kv_http_rest_map.erl │ ├── raft_kv_sm.erl │ └── raft_kv_sup.erl ├── test └── problem.py ├── vagrant ├── leap_15_ipv6 │ └── Vagrantfile ├── ubuntu_cluster_empty │ └── Vagrantfile └── vagrant_cluster │ ├── .gitignore │ ├── README.md │ ├── Vagrantfile │ └── img │ └── cluster.png └── workshop ├── README.md └── envoyfilter ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── Makefile ├── README.md ├── docker-compose.yaml ├── envoy ├── .dockerignore ├── Dockerfile.proxy ├── envoy_tcp_v3.yaml ├── envoy_tcp_v3_empty.yaml └── envoy_tcp_v3_tls.yaml ├── examples └── istio │ ├── README.md │ ├── istio_rabbitmq_filter.yaml │ ├── istio_rabbitmq_vservice.yaml │ ├── rabbitmq-bugs-perftest.yaml │ └── rabbitmq.yaml ├── img └── envoy_tls_termination_initiator.png └── network ├── Cargo.lock ├── Cargo.toml └── src └── lib.rs /.github/workflows/docker-image.yml: -------------------------------------------------------------------------------- 1 | name: Docker Image CI 2 | 3 | on: 4 | push: 5 | branches: [ "master" ] 6 | pull_request: 7 | branches: [ "master" ] 8 | 9 | jobs: 10 | 11 | build: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v3 17 | - name: Build the Docker image 18 | run: docker build . --file Dockerfile --tag my-image-name:$(date +%s) 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .venv*/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Repository with utilities and RabbitMQ examples 2 | 3 | 1 - [How to create a RabbitMQ cluster on openSUSE with vagrant](https://github.com/Gsantomaggio/rabbitmq-utils/tree/master/rabbitmq-suse/vagrant_cluster) 4 | 5 | 2 - [Set of OpenStack examples/utils](https://github.com/Gsantomaggio/rabbitmq-utils/tree/master/openstack) 6 | 7 | 3 - [RabbitMQ command line tools](https://github.com/Gsantomaggio/rabbitmq-utils/tree/master/command_line_tools) 8 | 9 | 4 - [RabbitMQ http utils](https://github.com/Gsantomaggio/rabbitmq-utils/tree/master/http_utils) 10 | 11 | 5 - [Kafka and RabbitMQ example](https://github.com/Gsantomaggio/rabbitmq-utils/tree/master/microservices_demo) 12 | -------------------------------------------------------------------------------- /client_caos/Caos/.gitignore: -------------------------------------------------------------------------------- 1 | ## Misc files 2 | *.bak 3 | docs/api-guide.pdf 4 | docs/pyle.log 5 | docs/pyle.pid 6 | .DS_Store 7 | .idea 8 | InternalTrace* 9 | [Ll]ocal.dist 10 | [Ll]ocal.props 11 | *.lock.json 12 | nunit-agent* 13 | *.pyc 14 | test-output.log 15 | TestResults.xml 16 | TestResult.xml 17 | Tests/coverage.xml 18 | test.sh 19 | *.VisualState.xml 20 | .vscode 21 | 22 | ## Misc directories 23 | .fake/ 24 | gensrc/ 25 | .ionide/ 26 | NuGet/ 27 | tmp/ 28 | .vscode/ 29 | 30 | ################# 31 | ## Visual Studio 32 | ################# 33 | 34 | ## Ignore Visual Studio temporary files, build results, and 35 | ## files generated by popular Visual Studio add-ons. 36 | 37 | # User-specific files 38 | *.suo 39 | *.user 40 | *.sln.docstates 41 | 42 | # Build results 43 | [Dd]ebug/ 44 | [Rr]elease/ 45 | x64/ 46 | build/ 47 | [Bb]in/ 48 | [Oo]bj/ 49 | *.lock.json 50 | 51 | BenchmarkDotNet.Artifacts/* 52 | 53 | APIApproval.Approve.received.txt 54 | 55 | # Visual Studio 2015 cache/options directory 56 | .vs/ 57 | 58 | # Visual Studio profiler 59 | *.psess 60 | *.vsp 61 | *.vspx 62 | 63 | # ReSharper is a .NET coding add-in 64 | _ReSharper*/ 65 | *.[Rr]e[Ss]harper 66 | 67 | # DotCover is a Code Coverage Tool 68 | *.dotCover 69 | 70 | # NCrunch 71 | *.ncrunch* 72 | .*crunch*.local.xml 73 | 74 | # Installshield output folder 75 | [Ee]xpress/ 76 | 77 | # DocProject is a documentation generator add-in 78 | DocProject/buildhelp/ 79 | DocProject/Help/*.HxT 80 | DocProject/Help/*.HxC 81 | DocProject/Help/*.hhc 82 | DocProject/Help/*.hhk 83 | DocProject/Help/*.hhp 84 | DocProject/Help/Html2 85 | DocProject/Help/html 86 | 87 | # NuGet Packages Directory 88 | packages/ 89 | /packages 90 | 91 | # Windows Store app package directory 92 | AppPackages/ 93 | 94 | # Others 95 | sql/ 96 | *.Cache 97 | ClientBin/ 98 | [Ss]tyle[Cc]op.* 99 | ~$* 100 | *~ 101 | *.dbmdl 102 | *.[Pp]ublish.xml 103 | *.pfx 104 | *.publishsettings 105 | 106 | # Backup & report files from converting an old project file to a newer 107 | # Visual Studio version. Backup files are not needed, because we have git ;-) 108 | _UpgradeReport_Files/ 109 | Backup*/ 110 | UpgradeLog*.XML 111 | UpgradeLog*.htm 112 | 113 | # Unit tests 114 | projects/Unit*/TestResult.xml 115 | 116 | # Development scripts 117 | *.pcap 118 | 119 | # Vim 120 | .sw? 121 | .*.sw? 122 | 123 | #tests 124 | Tests/coverage.* 125 | 126 | -------------------------------------------------------------------------------- /client_caos/Caos/Caos.cs: -------------------------------------------------------------------------------- 1 | namespace Caos; 2 | 3 | public class Caos 4 | { 5 | public static void Main(string[] args) 6 | { 7 | Console.WriteLine("Caos RabbitMQ Stream Client Tester 2.1"); 8 | // check with rabbitmq-streams list_stream_group_consumers --stream invoices-1 --reference reference 9 | // rabbitmq-streams delete_super_stream invoices 10 | // rabbitmq-streams add_super_stream invoices --partitions 10 11 | // rabbitmq-streams delete_replica --vhost "/" "invoices-1" "rabbit@rabbitmq-stream-server-0.rabbitmq-stream-nodes.stream-clients-test" 12 | // new SuperStreamRaw("test-stream",args[0],args[1], args[2]).Start().Wait(); 13 | 14 | var streams = new List() { }; 15 | for (var i = 0; i < 5; i++) 16 | { 17 | Console.WriteLine($"Adding test-stream-{i}"); 18 | streams.Add($"test-stream-{i}"); 19 | } 20 | 21 | var id = new Random().Next(streams.Count); 22 | Console.WriteLine($"Using stream {streams[id]}"); 23 | new ProducerConsumer(streams[id], args[0], args[1], args[2]).Start(1_000_000).Wait(); 24 | 25 | 26 | // Console.WriteLine("SuperStreamRaw Done"); 27 | // new DeduplicationTest("test-stream",args[0],args[1], args[2]).Start(6_000_000).Wait(); 28 | // Console.WriteLine("Deduplication Test Force close Done"); 29 | // 30 | // // new ForceCloseTest("test-rabbitmq",args[0],args[1], args[2]).Start(3_000_000).Wait(); 31 | // Console.WriteLine("Test Force close Done"); 32 | // 33 | // new CloseThreadsTest("test-stream",args[0],args[1], args[2]).Start().Wait(); 34 | Console.WriteLine("Press any key to close"); 35 | Console.ReadLine(); 36 | } 37 | } -------------------------------------------------------------------------------- /client_caos/Caos/Caos.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Exe 5 | net7.0 6 | enable 7 | enable 8 | Linux 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | .dockerignore 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /client_caos/Caos/CloseThreadsTest.cs: -------------------------------------------------------------------------------- 1 | using RabbitMQ.Stream.Client; 2 | using RabbitMQ.Stream.Client.Reliable; 3 | 4 | namespace Caos; 5 | 6 | public class CloseThreadsTest : TestBase 7 | { 8 | public CloseThreadsTest(string streamName, string username, string password, string host) 9 | { 10 | _streamSystem = new RabbitMQStream(streamName, username, password, host); 11 | this.streamName = streamName; 12 | this.username = username; 13 | this.password = password; 14 | this.host = host; 15 | } 16 | 17 | public async Task Start() 18 | { 19 | await _streamSystem.DeleteStream(); 20 | await _streamSystem.CreateStream(); 21 | 22 | 23 | for (var z = 0; z < 100; z++) 24 | { 25 | Console.WriteLine($"Starting test {z}"); 26 | await Task.Delay(new Random().Next(100, 200)); 27 | var producer = await _streamSystem.CreateProducer("producer-thread-force-test", 28 | new Func( 29 | async confirmation => { await Task.CompletedTask; } 30 | )); 31 | 32 | _ = Task.Run(async () => 33 | { 34 | await Task.Delay(new Random().Next(500, 800)); 35 | await producer.Close(); 36 | } 37 | ); 38 | 39 | for (var i = 0; i < 1_000_000; i++) 40 | { 41 | await producer.Send(new Message(new byte[100])); 42 | if (!producer.IsOpen()) break; 43 | } 44 | } 45 | 46 | 47 | 48 | 49 | for (var z = 0; z < 100; z++) 50 | { 51 | Console.WriteLine($"Starting consumer test {z}"); 52 | await Task.Delay(new Random().Next(100, 200)); 53 | var consumer = await _streamSystem.CreateConsumer("consumer-thread-force-test", async (s, consumer, arg3, arg4) => await Task.CompletedTask); 54 | 55 | _ = Task.Run(async () => 56 | { 57 | await Task.Delay(new Random().Next(500, 800)); 58 | await consumer.Close(); 59 | } 60 | ); 61 | 62 | 63 | } 64 | } 65 | } -------------------------------------------------------------------------------- /client_caos/Caos/DeduplicationTest.cs: -------------------------------------------------------------------------------- 1 | using RabbitMQ.Stream.Client; 2 | using RabbitMQ.Stream.Client.Reliable; 3 | 4 | namespace Caos; 5 | 6 | public class DeduplicationTest : TestBase 7 | { 8 | public DeduplicationTest(string streamName, string username, string password, string host) 9 | { 10 | _streamSystem = new RabbitMQStream(streamName, username, password, host); 11 | this.streamName = streamName; 12 | this.username = username; 13 | this.password = password; 14 | this.host = host; 15 | } 16 | 17 | public int MessagesSent { get; set; } 18 | public int MessagesConfirmed { get; set; } 19 | public int MessagesError { get; set; } 20 | 21 | 22 | public async Task Start(ulong messagesToSend) 23 | { 24 | await _streamSystem.DeleteStream(); 25 | await _streamSystem.CreateStream(); 26 | 27 | _ = Task.Run((() => 28 | { 29 | var n = 0; 30 | while (n < 100) 31 | { 32 | Console.WriteLine( 33 | $"Messages sent: {MessagesSent}, confirmed: {MessagesConfirmed}, error: {MessagesError} - Total: {MessagesConfirmed + MessagesError}"); 34 | n++; 35 | Thread.Sleep(1000); 36 | } 37 | 38 | { 39 | } 40 | })); 41 | 42 | var d = await _streamSystem.CreateDeduplicationProducer("deduplication-test", 43 | new Func(async confirmation => 44 | { 45 | if (confirmation.Status == ConfirmationStatus.Confirmed) 46 | { 47 | MessagesConfirmed += confirmation.Messages.Count; 48 | } 49 | else 50 | { 51 | MessagesError += confirmation.Messages.Count; 52 | } 53 | 54 | await Task.CompletedTask; 55 | })); 56 | 57 | for (ulong i = 0; i < messagesToSend; i++) 58 | { 59 | await d.Send(i, new Message(new byte[100])); 60 | MessagesSent++; 61 | } 62 | } 63 | } -------------------------------------------------------------------------------- /client_caos/Caos/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/dotnet/runtime:7.0 AS base 2 | WORKDIR /app 3 | COPY ["bin/Release/net7.0", "/Caos/"] 4 | ENTRYPOINT ["dotnet", "/Caos/Caos.dll", "test","test","rabbitmq-stream.stream-clients-test.svc.cluster.local"] 5 | -------------------------------------------------------------------------------- /client_caos/Caos/ProducerConsumer.cs: -------------------------------------------------------------------------------- 1 | using RabbitMQ.Stream.Client; 2 | using RabbitMQ.Stream.Client.Reliable; 3 | 4 | namespace Caos; 5 | 6 | public class ProducerConsumer : TestBase 7 | { 8 | public ProducerConsumer(string streamName, string username, string password, string host) 9 | { 10 | _streamSystem = new RabbitMQStream(streamName, username, password, host); 11 | this.streamName = streamName; 12 | this.username = username; 13 | this.password = password; 14 | this.host = host; 15 | } 16 | 17 | public async Task Start(ulong messagesToSend) 18 | { 19 | await _streamSystem.DeleteStream(); 20 | await _streamSystem.CreateStream(); 21 | 22 | _ = Task.Run((() => 23 | { 24 | var n = 0; 25 | while (n < 10000) 26 | { 27 | Console.WriteLine( 28 | $"Messages sent: {MessagesSent}, confirmed: {MessagesConfirmed}, error: {MessagesError} - " + 29 | $"Total: {MessagesConfirmed + MessagesError} Consumed: {MessagesConsumed}"); 30 | n++; 31 | Thread.Sleep(1000); 32 | } 33 | 34 | { 35 | } 36 | })); 37 | 38 | var d = await _streamSystem.CreateProducer("producer-test", 39 | new Func(async confirmation => 40 | { 41 | if (confirmation.Status == ConfirmationStatus.Confirmed) 42 | { 43 | MessagesConfirmed += confirmation.Messages.Count; 44 | } 45 | else 46 | { 47 | MessagesError += confirmation.Messages.Count; 48 | } 49 | 50 | await Task.CompletedTask; 51 | })); 52 | 53 | 54 | for (int i = 0; i < 10; i++) 55 | { 56 | var consumer = _streamSystem.CreateConsumer("consumer-caos-force-test", 57 | new Func( 58 | async (s, rawConsumer, messageContext, message) => 59 | { 60 | MessagesConsumed += 1; 61 | await Task.CompletedTask; 62 | } 63 | )); 64 | await Task.WhenAll(consumer); 65 | } 66 | 67 | 68 | for (ulong i = 0; i < messagesToSend; i++) 69 | { 70 | await d.Send(new Message(new byte[100])); 71 | MessagesSent++; 72 | await Task.Delay(100); 73 | } 74 | } 75 | 76 | public int MessagesConsumed { get; set; } 77 | 78 | 79 | public int MessagesSent { get; set; } 80 | 81 | public int MessagesError { get; set; } 82 | 83 | public int MessagesConfirmed { get; set; } 84 | } -------------------------------------------------------------------------------- /client_caos/Caos/RabbitMQStream.cs: -------------------------------------------------------------------------------- 1 | using System.Net; 2 | using Microsoft.Extensions.Logging; 3 | using RabbitMQ.Stream.Client; 4 | using RabbitMQ.Stream.Client.Reliable; 5 | 6 | namespace Caos; 7 | 8 | public class RabbitMQStream 9 | { 10 | public string StreamName { get; set; } 11 | private StreamSystem _streamSystem; 12 | private string superStream = "invoices"; 13 | 14 | public RabbitMQStream(string streamName, string username, string password, string host) 15 | { 16 | var addressResolver = new AddressResolver(new DnsEndPoint(host, 5552)); 17 | 18 | _streamSystem = StreamSystem.Create(new StreamSystemConfig 19 | { 20 | UserName = username, 21 | Password = password, 22 | AddressResolver = addressResolver, 23 | Endpoints = new List() 24 | { 25 | addressResolver.EndPoint 26 | } 27 | }).Result; 28 | StreamName = streamName; 29 | } 30 | 31 | public async Task CreateStream() 32 | { 33 | Console.WriteLine($"Creating stream {StreamName}"); 34 | await _streamSystem.CreateStream(new StreamSpec(StreamName) 35 | { 36 | MaxLengthBytes = 18_250_418_240, 37 | }); 38 | } 39 | 40 | public async Task DeleteStream() 41 | { 42 | try 43 | { 44 | await _streamSystem.DeleteStream(StreamName); 45 | } 46 | catch (Exception) 47 | { 48 | // don't care 49 | } 50 | } 51 | 52 | public async Task CreateProducer(string producerName, 53 | Func confirmationHandler) 54 | { 55 | var loggerFactory = LoggerFactory.Create(builder => 56 | { 57 | builder.AddSimpleConsole(); 58 | builder.AddFilter("RabbitMQ.Stream", LogLevel.Debug); 59 | }); 60 | 61 | var producerLogger = loggerFactory.CreateLogger(); 62 | Console.WriteLine($"Creating Producer {StreamName}"); 63 | return await Producer.Create(new ProducerConfig(_streamSystem, StreamName) 64 | { 65 | ClientProvidedName = producerName, 66 | ConfirmationHandler = confirmationHandler 67 | }, producerLogger); 68 | } 69 | 70 | 71 | public async Task CreateSuperProducer(string producerName, 72 | Func confirmationHandler) 73 | { 74 | var loggerFactory = LoggerFactory.Create(builder => 75 | { 76 | builder.AddSimpleConsole(); 77 | builder.AddFilter("RabbitMQ.Stream", LogLevel.Debug); 78 | }); 79 | 80 | var producerLogger = loggerFactory.CreateLogger(); 81 | 82 | return await Producer.Create(new ProducerConfig(_streamSystem, superStream) 83 | { 84 | SuperStreamConfig = new SuperStreamConfig() 85 | { 86 | Routing = msg => msg.Properties.MessageId.ToString() 87 | }, 88 | ClientProvidedName = producerName, 89 | ConfirmationHandler = confirmationHandler 90 | }, producerLogger); 91 | } 92 | 93 | public async Task CreateDeduplicationProducer(string producerName, 94 | Func confirmationHandler) 95 | { 96 | var loggerFactory = LoggerFactory.Create(builder => 97 | { 98 | builder.AddSimpleConsole(); 99 | builder.AddFilter("RabbitMQ.Stream", LogLevel.Debug); 100 | }); 101 | 102 | var producerLogger = loggerFactory.CreateLogger(); 103 | 104 | return await DeduplicatingProducer.Create( 105 | new DeduplicatingProducerConfig(_streamSystem, StreamName, "reference") 106 | { 107 | ClientProvidedName = producerName, 108 | ConfirmationHandler = confirmationHandler 109 | }, producerLogger); 110 | } 111 | 112 | public async Task CreateConsumer(string consumerName, 113 | Func messageHandler) 114 | { 115 | var loggerFactory = LoggerFactory.Create(builder => 116 | { 117 | builder.AddSimpleConsole(); 118 | builder.AddFilter("RabbitMQ.Stream", LogLevel.Debug); 119 | }); 120 | 121 | var consumerLogger = loggerFactory.CreateLogger(); 122 | 123 | return await Consumer.Create(new ConsumerConfig(_streamSystem, StreamName) 124 | { 125 | OffsetSpec = new OffsetTypeFirst(), 126 | ClientProvidedName = consumerName, 127 | MessageHandler = messageHandler 128 | }, consumerLogger); 129 | } 130 | 131 | 132 | public async Task CreateSuperConsumer(string consumerName, 133 | Func messageHandler) 134 | { 135 | var loggerFactory = LoggerFactory.Create(builder => 136 | { 137 | builder.AddSimpleConsole(); 138 | builder.AddFilter("RabbitMQ.Stream", LogLevel.Debug); 139 | }); 140 | 141 | var consumerLogger = loggerFactory.CreateLogger(); 142 | 143 | return await Consumer.Create(new ConsumerConfig(_streamSystem, superStream) 144 | { 145 | 146 | Reference = "reference", 147 | IsSuperStream = true, 148 | IsSingleActiveConsumer = true, 149 | 150 | // 151 | 152 | 153 | ConsumerUpdateListener = async (reference, stream, isActive) => 154 | { 155 | consumerLogger.LogInformation("Consumer {S1}, for stream {Stream} is active {S2} ", stream, reference, 156 | isActive); 157 | await Task.CompletedTask.ConfigureAwait(false); 158 | return new OffsetTypeFirst(); 159 | }, 160 | OffsetSpec = new OffsetTypeFirst(), 161 | ClientProvidedName = consumerName, 162 | MessageHandler = messageHandler 163 | }, consumerLogger); 164 | } 165 | } -------------------------------------------------------------------------------- /client_caos/Caos/SuperStreamRaw.cs: -------------------------------------------------------------------------------- 1 | using System.Collections.Concurrent; 2 | using RabbitMQ.Stream.Client; 3 | using RabbitMQ.Stream.Client.AMQP; 4 | using RabbitMQ.Stream.Client.Reliable; 5 | 6 | namespace Caos; 7 | 8 | public class SuperStreamRaw : TestBase 9 | { 10 | public SuperStreamRaw(string streamName, string username, string password, string host) 11 | { 12 | _streamSystem = new RabbitMQStream(streamName, username, password, host); 13 | this.streamName = streamName; 14 | this.username = username; 15 | this.password = password; 16 | this.host = host; 17 | } 18 | 19 | public async Task Start() 20 | { 21 | var producer = await _streamSystem.CreateSuperProducer("producer-super-stream", 22 | new Func( 23 | async confirmation => { await Task.CompletedTask; } 24 | )); 25 | 26 | _ = Task.Run(async () => 27 | { 28 | for (int i = 0; i < 100_000_0000; i++) 29 | { 30 | if (!producer.IsOpen()) break; 31 | await producer.Send(new Message(new byte[100]) 32 | { 33 | Properties = new Properties 34 | { 35 | MessageId = i.ToString() 36 | } 37 | }); 38 | await Task.Delay(1); 39 | } 40 | 41 | 42 | }); 43 | await Task.Delay(2000); 44 | for (var z = 0; z < 15509; z++) 45 | { 46 | Console.WriteLine($"Restart all-1 {z}"); 47 | 48 | var consumers = new List(); 49 | var consumedDictionary = new ConcurrentDictionary(); 50 | // for (var i = 0; i < 1; i++) 51 | { 52 | Console.WriteLine($"starting consumer"); 53 | // await Task.Delay(new Random().Next(100, 3000)); 54 | var c = await _streamSystem.CreateSuperConsumer("super-consumer", 55 | async (stream, consumer, arg3, arg4) => 56 | { 57 | // await Task.Delay(new Random().Next(1000, 5000)); 58 | // Console.WriteLine($"****************--++Before Message received: {stream} {DateTime.Now}"); 59 | var random = new Random(); 60 | await Task.Delay(random.Next(200, 1000)); 61 | if (!consumedDictionary.ContainsKey(stream)) 62 | { 63 | consumedDictionary.TryAdd(stream, 0); 64 | } 65 | 66 | consumedDictionary.TryUpdate(stream, consumedDictionary[stream] + 1, 67 | consumedDictionary[stream]); 68 | // Console.WriteLine( 69 | // $"****************--++After Message received: {stream} elapsed: {DateTime.Now - start} offset {arg3.Offset}"); 70 | consumedDictionary.TryGetValue(stream, out var val); 71 | if (val % 10 == 0) 72 | { 73 | Console.WriteLine( 74 | $"__Message received: {stream} offset {arg3.Offset} {DateTime.Now}"); 75 | } 76 | 77 | await Task.CompletedTask; 78 | } 79 | ); 80 | consumers.Add(c); 81 | } 82 | 83 | await Task.Delay(90000 * 1); 84 | foreach (var consumer in consumers) 85 | { 86 | Console.WriteLine($"closing consumers..."); 87 | await Task.Delay(1000); 88 | await consumer.Close(); 89 | } 90 | 91 | Console.WriteLine($"closed consumers"); 92 | } 93 | } 94 | } -------------------------------------------------------------------------------- /client_caos/Caos/TestBase.cs: -------------------------------------------------------------------------------- 1 | namespace Caos; 2 | 3 | public class TestBase 4 | { 5 | protected string username = "guest"; 6 | protected string password = "guest"; 7 | protected string host = "localhost"; 8 | protected string streamName = "test-stream"; 9 | protected RabbitMQStream _streamSystem; 10 | } -------------------------------------------------------------------------------- /client_caos/Caos/deploy: -------------------------------------------------------------------------------- 1 | dotnet build --configuration Release --no-restore 2 | docker build -t caos . 3 | docker tag caos gsantomaggio/caos 4 | docker push gsantomaggio/caos 5 | kubectl delete -f deploy.yaml || true 6 | kubectl apply -f deploy.yaml -------------------------------------------------------------------------------- /client_caos/Caos/deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deployments-caos 5 | namespace: stream-clients-test 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | app: deployments-caos 11 | template: 12 | metadata: 13 | labels: 14 | app: deployments-caos 15 | spec: 16 | containers: 17 | - name: caos 18 | image: gsantomaggio/caos -------------------------------------------------------------------------------- /command_line_tools/README.md: -------------------------------------------------------------------------------- 1 | RabbitMQ command line tools 2 | ==== 3 | 4 | bash scripts based on `rabbitmqctl` to filter/sort informations 5 | 6 | -------------------------------------------------------------------------------- /command_line_tools/add_test: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | rabbitmqctl add_user test test && rabbitmqctl set_user_tags test administrator && rabbitmqctl set_permissions -p / test ".*" ".*" ".*" 3 | -------------------------------------------------------------------------------- /command_line_tools/command_line_tools: -------------------------------------------------------------------------------- 1 | ######################### QUEUES ########################################### 2 | 3 | #### Identify queues with messages > 0 4 | rabbitmqctl list_queues --no-table-headers -q | awk '$2 > 0 { printf "%s\t%s\n", $0,"[X]" ; } $2 <= 0 { print $0 ;}' 5 | 6 | #### filter queues with messages > 0 7 | rabbitmqctl list_queues --no-table-headers -q | awk '$2 > 0 { printf "%s\t%s\n", $0,"" ; }' 8 | 9 | ### filter and sort messages with messages > 0 10 | rabbitmqctl list_queues --no-table-headers -q | sort -k2 -n |awk '$2 > 0 { printf "%s\t%s\n", $0,"" ; }' 11 | 12 | ### filter and sort messages with messages > 0 reverse 13 | rabbitmqctl list_queues --no-table-headers -q | sort -r -k2 -n |awk '$2 > 0 { printf "%s\t%s\n", $0,"" ; }' 14 | 15 | ### filter queue name starting with 16 | rabbitmqctl list_queues --no-table-headers -q | sort -r -k1 -n |awk '/^PUTSTARTINGNAMEHERE/' 17 | 18 | ### filter queue name contain name 19 | rabbitmqctl list_queues --no-table-headers -q | sort -r -k1 -n |awk '/PUTSUBSTRINGHERE/' 20 | 21 | ### filter queues with consumers 22 | rabbitmqctl list_queues name messages consumers --no-table-headers -q | awk '$3 > 0 { printf "%s\t%s\n", $0,"" ; }' 23 | 24 | 25 | ################# CHANNLES ################# 26 | 27 | ### filter and sort channels with conumers > 0 28 | rabbitmqctl list_channels name connection number consumer_count --no-table-headers -q | sort -r -k1 -n |awk '$7 > 0 { printf "%s\t%s\n", $0,"" ; }' 29 | 30 | #### filter channels with ZERO consumers 31 | rabbitmqctl list_channels name connection number consumer_count --no-table-headers -q |awk '$7 = 0 { printf "%s\t%s\n", $0,"" ; }' 32 | 33 | 34 | #################### DELETE/PURGE WARNING!!!! ############################# 35 | ### delete all the queues on the default vhost 36 | for q in $(rabbitmqctl list_queues name -s) ; do rabbitmqctl delete_queue $q; done 37 | 38 | ### delete all the queues on the given a vhost 39 | for q in $(rabbitmqctl list_queues name -s --vhost <>) ; do rabbitmqctl delete_queue --vhost <> $q; done 40 | 41 | ### delete all the queues on the given a vhost 42 | for q in $(rabbitmqctl list_queues name -s --vhost <>) ; do rabbitmqctl delete_queue --vhost <> $q; done 43 | 44 | ### purge all the queues on the given a vhost 45 | for q in $(rabbitmqctl list_queues name -s --vhost <>) ; do rabbitmqctl purge_queue --vhost <> $q; done 46 | 47 | ### purge all the queues in all vhost !!!!! 48 | for vh in $(rabbitmqctl list_vhosts -s); do for q in $(rabbitmqctl list_queues name -s --vhost $vh) ; do rabbitmqctl purge_queue --vhost $vh $q; done done 49 | 50 | 51 | ### delete all the queues in all vhost !!!!! 52 | for vh in $(rabbitmqctl list_vhosts -s); do for q in $(rabbitmqctl list_queues name -s --vhost $vh) ; do rabbitmqctl delete_queue --vhost $vh $q; done done 53 | -------------------------------------------------------------------------------- /examples/client_sni/.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | rabbitmq/target/ 3 | -------------------------------------------------------------------------------- /examples/client_sni/rabbitmq/.gitignore: -------------------------------------------------------------------------------- 1 | .classpath 2 | .project 3 | .settings/ 4 | -------------------------------------------------------------------------------- /examples/client_sni/rabbitmq/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 4.0.0 5 | 6 | gas 7 | rabbitmq 8 | 1.0-SNAPSHOT 9 | 10 | rabbitmq 11 | 12 | http://www.example.com 13 | 14 | 15 | UTF-8 16 | 1.7 17 | 1.7 18 | 19 | 20 | 21 | 22 | 23 | com.rabbitmq 24 | amqp-client 25 | 5.10.0 26 | 27 | 28 | 29 | junit 30 | junit 31 | 4.13.1 32 | test 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | maven-clean-plugin 44 | 3.1.0 45 | 46 | 47 | 48 | maven-resources-plugin 49 | 3.0.2 50 | 51 | 52 | maven-compiler-plugin 53 | 3.8.0 54 | 55 | 56 | maven-surefire-plugin 57 | 2.22.1 58 | 59 | 60 | maven-jar-plugin 61 | 3.0.2 62 | 63 | 64 | maven-install-plugin 65 | 2.5.2 66 | 67 | 68 | maven-deploy-plugin 69 | 2.8.2 70 | 71 | 72 | 73 | maven-site-plugin 74 | 3.7.1 75 | 76 | 77 | maven-project-info-reports-plugin 78 | 3.0.0 79 | 80 | 81 | 82 | 83 | 84 | -------------------------------------------------------------------------------- /examples/client_sni/rabbitmq/rabbitmq.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Gsantomaggio/rabbitmq-utils/07423e9b253af5e602e3b06e65d1fc22157586f7/examples/client_sni/rabbitmq/rabbitmq.jar -------------------------------------------------------------------------------- /examples/client_sni/rabbitmq/src/main/java/gas/App.java: -------------------------------------------------------------------------------- 1 | package gas; 2 | 3 | /** 4 | * Hello world! 5 | * 6 | */ 7 | import java.io.*; 8 | import java.security.*; 9 | 10 | import com.rabbitmq.client.*; 11 | 12 | import java.util.ArrayList; 13 | import java.util.concurrent.TimeUnit; 14 | import java.util.concurrent.atomic.AtomicBoolean; 15 | import com.rabbitmq.client.impl.nio.NioParams; 16 | import javax.net.ssl.SSLEngine; 17 | import javax.net.ssl.SNIHostName; 18 | import javax.net.ssl.SNIServerName; 19 | import javax.net.ssl.SSLParameters; 20 | import java.util.List; 21 | 22 | public class App { 23 | public static void main(String[] args) { 24 | System.out.println("Testing SNI value: " + args[1]); 25 | 26 | try { 27 | 28 | ConnectionFactory factory = new ConnectionFactory(); 29 | factory.setHost(args[0]); 30 | factory.setUsername("test"); 31 | factory.setPassword("test"); 32 | factory.setPort(5671); 33 | 34 | factory.useNio(); 35 | factory.useSslProtocol(); 36 | NioParams nioParams = new NioParams(); 37 | final SSLParameters sslParameters = new SSLParameters(); 38 | SNIHostName sniHostName = new SNIHostName(args[1]); 39 | final List sniHostNameList = new ArrayList<>(1); 40 | sniHostNameList.add(sniHostName); 41 | sslParameters.setServerNames(sniHostNameList); 42 | 43 | nioParams.setSslEngineConfigurator(new SslEngineConfigurator() { 44 | @Override 45 | public void configure(SSLEngine sslEngine) throws IOException { 46 | sslEngine.setSSLParameters(sslParameters); 47 | System.out.println(sslEngine.getSSLParameters().getServerNames()); 48 | 49 | } 50 | }); 51 | 52 | factory.setNioParams(nioParams); 53 | 54 | // Tells the library to setup the default Key and Trust managers for you 55 | // which do not do any form of remote server trust verification 56 | 57 | Connection conn = factory.newConnection(); 58 | Channel channel = conn.createChannel(); 59 | 60 | // non-durable, exclusive, auto-delete queue 61 | channel.queueDeclare("rabbitmq-java-test", false, true, true, null); 62 | channel.basicPublish("", "rabbitmq-java-test", null, "Hello, World".getBytes()); 63 | GetResponse chResponse = channel.basicGet("rabbitmq-java-test", false); 64 | if (chResponse == null) { 65 | System.out.println("No message retrieved"); 66 | } else { 67 | byte[] body = chResponse.getBody(); 68 | System.out.println("Received: " + new String(body)); 69 | } 70 | 71 | channel.close(); 72 | conn.close(); 73 | 74 | } catch (Exception e) { 75 | e.printStackTrace(); 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /examples/client_sni/rabbitmq/src/test/java/gas/AppTest.java: -------------------------------------------------------------------------------- 1 | package gas; 2 | 3 | import static org.junit.Assert.assertTrue; 4 | 5 | import org.junit.Test; 6 | 7 | /** 8 | * Unit test for simple App. 9 | */ 10 | public class AppTest 11 | { 12 | /** 13 | * Rigorous Test :-) 14 | */ 15 | @Test 16 | public void shouldAnswerWithTrue() 17 | { 18 | assertTrue( true ); 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /examples/java_cert/.gitignore: -------------------------------------------------------------------------------- 1 | docker-compose.yaml 2 | rabbitstore 3 | tls-gen/ 4 | -------------------------------------------------------------------------------- /examples/java_cert/README.md: -------------------------------------------------------------------------------- 1 | Full example RabbitMQ TLS java 2 | == 3 | This example shows how to use RabbitMQ TLS with external authentication. 4 | 5 | Run 6 | == 7 | ```shell script 8 | ./build 9 | ``` 10 | it generates the certs, import the key, and setup the docker compose with the right user 11 | 12 | ``` 13 | Import key, set the password, to make the example easy: 14 | - use the password: rabbitmq 15 | - set Trust this certificate? [no]: yes 16 | Enter keystore password: 17 | ``` 18 | 19 | after that execute: 20 | ```shell script 21 | docker-compose up 22 | ``` 23 | 24 | Then the Java client: 25 | 26 | ```shell script 27 | 28 | mvn package exec:java 29 | 30 | ``` 31 | 32 | **Expected Output** 33 | 34 | ``` 35 | Connected!!! 36 | Received: Hello, World 37 | ``` 38 | 39 | 40 | 41 | 42 | see also: https://www.rabbitmq.com/ssl.html#java-client-connecting as a reference -------------------------------------------------------------------------------- /examples/java_cert/build: -------------------------------------------------------------------------------- 1 | # !/bin/bash 2 | rm rabbitstore 3 | rm -rf tls-gen 4 | git clone https://github.com/michaelklishin/tls-gen tls-gen 5 | cd tls-gen/basic && make && cd .. && cd .. 6 | 7 | echo "Import key, set the password, to make the example easy:" 8 | echo "- use the password: rabbitmq" 9 | echo "- set Trust this certificate? [no]: yes" 10 | keytool -import -alias server2 -file tls-gen/basic/result/server_certificate.pem -keystore rabbitstore 11 | 12 | val=$(openssl x509 -in tls-gen/basic/result/client_certificate.pem -nameopt RFC2253 -subject -noout | awk '{print $2}') 13 | echo $val 14 | sed "s/REPLACE_WITH_USER_NAME/$val/g" docker-compose.template > docker-compose.yaml 15 | echo "docker-compose.yaml is ready" 16 | echo "docker-compose up to run RabbitMQ" -------------------------------------------------------------------------------- /examples/java_cert/conf/enabled_plugins: -------------------------------------------------------------------------------- 1 | [rabbitmq_auth_mechanism_ssl,rabbitmq_stream_management]. 2 | -------------------------------------------------------------------------------- /examples/java_cert/conf/rabbitmq.conf: -------------------------------------------------------------------------------- 1 | loopback_users.guest = false 2 | listeners.ssl.default = 5671 3 | stream.listeners.ssl.default = 5551 4 | ssl_options.cacertfile = /cert/ca_certificate.pem 5 | ssl_options.certfile = /cert/server_gsantomagg6LVDM.vmware.com_certificate.pem 6 | ssl_options.depth = 1 7 | ssl_options.fail_if_no_peer_cert = false 8 | ssl_options.keyfile = /cert/server_gsantomagg6LVDM.vmware.com_key.pem 9 | ssl_options.verify = verify_peer 10 | auth_mechanisms.1 = PLAIN 11 | auth_mechanisms.1 = AMQPLAIN 12 | auth_mechanisms.1 = EXTERNAL 13 | listeners.tcp.default = 5672 14 | management.tcp.port = 15672 15 | -------------------------------------------------------------------------------- /examples/java_cert/docker-compose.template: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | rabbitmq_service: 4 | image: rabbitmq 5 | environment: 6 | RABBITMQ_DEFAULT_USER: REPLACE_WITH_USER_NAME 7 | RABBITMQ_DEFAULT_PASS: notimportat 8 | expose: 9 | - "15672" 10 | - "5672" 11 | ports: 12 | - "15672:15672" 13 | - "5671:5671" 14 | volumes: 15 | - "./tls-gen/basic/result/:/cert" 16 | - "./conf/:/etc/rabbitmq/" 17 | 18 | -------------------------------------------------------------------------------- /examples/java_cert/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | org.example 8 | java_cert 9 | 1.0-SNAPSHOT 10 | 11 | 12 | 13 | 14 | com.rabbitmq 15 | amqp-client 16 | 5.11.0 17 | 18 | 19 | 20 | 21 | 22 | 23 | org.codehaus.mojo 24 | exec-maven-plugin 25 | 3.0.0 26 | 27 | rabbitmq.sasl.example.TLSConnection 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 11 36 | 11 37 | 38 | 39 | -------------------------------------------------------------------------------- /examples/java_cert/src/main/java/rabbitmq/sasl/example/TLSConnection.java: -------------------------------------------------------------------------------- 1 | package rabbitmq.sasl.example; 2 | 3 | import com.rabbitmq.client.Channel; 4 | import com.rabbitmq.client.Connection; 5 | import com.rabbitmq.client.ConnectionFactory; 6 | 7 | import java.io.*; 8 | import java.security.*; 9 | import javax.net.ssl.*; 10 | 11 | import com.rabbitmq.client.*; 12 | 13 | public class TLSConnection { 14 | 15 | public static void main(String[] argv) throws Exception { 16 | 17 | char[] keyPassphrase = "".toCharArray(); 18 | KeyStore ks = KeyStore.getInstance("PKCS12"); 19 | ks.load(new FileInputStream("tls-gen/basic/result/client_key.p12"), keyPassphrase); 20 | 21 | KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509"); 22 | kmf.init(ks, keyPassphrase); 23 | 24 | char[] trustPassphrase = "rabbitmq".toCharArray(); 25 | KeyStore tks = KeyStore.getInstance("JKS"); 26 | tks.load(new FileInputStream("rabbitstore"), trustPassphrase); 27 | 28 | TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509"); 29 | tmf.init(tks); 30 | 31 | SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); 32 | sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); 33 | 34 | ConnectionFactory factory = new ConnectionFactory(); 35 | 36 | factory.setHost("localhost"); 37 | factory.setPort(5671); 38 | factory.setSaslConfig(DefaultSaslConfig.EXTERNAL); 39 | factory.useSslProtocol(sslContext); 40 | 41 | Connection conn = factory.newConnection(); 42 | System.out.println("Connected!!!"); 43 | Channel channel = conn.createChannel(); 44 | 45 | channel.queueDeclare("rabbitmq-java-test", false, true, true, null); 46 | channel.basicPublish("", "rabbitmq-java-test", null, "Hello, World".getBytes()); 47 | 48 | GetResponse chResponse = channel.basicGet("rabbitmq-java-test", false); 49 | if (chResponse == null) { 50 | System.out.println("No message retrieved"); 51 | } else { 52 | byte[] body = chResponse.getBody(); 53 | System.out.println("Received: " + new String(body)); 54 | } 55 | 56 | channel.close(); 57 | conn.close(); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /examples/mqtt_over_ws/python_ws.py: -------------------------------------------------------------------------------- 1 | ## pip install paho-mqtt 2 | ## pip install pika 3 | import _thread 4 | 5 | import paho.mqtt.client as mqtt 6 | import pika 7 | import time 8 | import sys 9 | import paho.mqtt.publish as publish 10 | 11 | 12 | # MQTT over Web Socket Section 13 | # This is the Front End layer, the layer can be used for external clients" 14 | 15 | def on_connect(client, userdata, flags, rc): 16 | # client event subscribe, (not related to RabbitMQ) 17 | client.subscribe("/event/+/mysubkey/xxx") 18 | 19 | 20 | def on_message(client, userdata, msg): 21 | # receive the message, can be used in normal applications 22 | # mobile or web applications 23 | print("Message from MQTT over WS" + msg.topic + " " + str(msg.payload)) 24 | print("-------") 25 | 26 | 27 | def subscribe_mqtt(): 28 | client = mqtt.Client(transport="websockets") 29 | client.ws_set_options(path="/ws") 30 | client.connect("localhost", 15675, 60) 31 | 32 | client.on_connect = on_connect 33 | client.on_message = on_message 34 | client.loop_forever() 35 | 36 | 37 | _thread.start_new_thread(subscribe_mqtt, ()) 38 | 39 | 40 | class PyPikaTest: 41 | 42 | def on_rabbitmq_message(self, ch, method, properties, body): 43 | # here the message is received from RabbitMQ and forwarded to the ws client 44 | print("Message on RabbitMQ, going to redirect to mqtt %s" % (body)) 45 | 46 | # change the source rabbitmq key to mqtt key 47 | # you don't have to change anything in term of the keys 48 | # just adapt the key from rmq to mqtt 49 | mqtt_topic = method.routing_key.replace(".", "/") 50 | # send the message to the mqtt topic 51 | publish.single("/" + mqtt_topic, body, hostname="localhost") 52 | 53 | def get_connection(self, rm): 54 | credentials = pika.PlainCredentials('test', 'test') 55 | connection = pika.BlockingConnection(pika.ConnectionParameters(host=rm, port=5672, 56 | virtual_host="/", 57 | credentials=credentials)) 58 | return connection 59 | 60 | def start_consumers(self, rm, qname): 61 | channel = self.get_connection(rm).channel() 62 | channel.basic_consume( 63 | queue=qname, 64 | on_message_callback=self.on_rabbitmq_message, 65 | auto_ack=True) 66 | 67 | channel.start_consuming() 68 | 69 | def publish(self, rm): 70 | channel = self.get_connection(rm).channel() 71 | 72 | print("start: %s" % (time.ctime(time.time()))) 73 | for i in range(1, 900000): 74 | time.sleep(1) 75 | channel.basic_publish( 76 | exchange='notify', 77 | routing_key="event.mykey.mysubkey.xxx", 78 | body='my_event_detail: ' + str(i) 79 | ) 80 | print("end: %s" % (time.ctime(time.time()))) 81 | 82 | def start(self, rm): 83 | channel = self.get_connection(rm).channel() 84 | channel.exchange_declare(exchange="notify", exchange_type='topic') 85 | channel.queue_declare(queue='notify_queue', durable=True) 86 | channel.queue_bind(exchange='notify', 87 | queue='notify_queue', routing_key="event.#") 88 | _thread.start_new_thread(self.publish, (rm,)) 89 | _thread.start_new_thread(self.start_consumers, (rm, 'notify_queue',)) 90 | 91 | time.sleep(3) 92 | 93 | 94 | print('starting .. %s' % sys.argv[1]) 95 | x = PyPikaTest() 96 | x.start(sys.argv[1]) 97 | 98 | input("Press Enter to continue...\n") 99 | -------------------------------------------------------------------------------- /examples/python/pika_connections.py: -------------------------------------------------------------------------------- 1 | # !/bin/python3 2 | import pika 3 | import time 4 | import sys 5 | import _thread 6 | 7 | 8 | class PyPikaTest: 9 | 10 | def pub(self, host, id): 11 | credentials = pika.PlainCredentials('test', 'test') 12 | connection = pika.BlockingConnection(pika.ConnectionParameters(host=host, port=5672, 13 | virtual_host="/", 14 | credentials=credentials)) 15 | channel = connection.channel() 16 | print("Connection open: %s " % (id)) 17 | import uuid 18 | for i in range(0, 1): 19 | time.sleep(1) 20 | qname = str(uuid.uuid4()) 21 | print("Creating: %s " % (qname)) 22 | 23 | channel.queue_declare(queue=qname, auto_delete=False, durable=True) 24 | prop = pika.BasicProperties( 25 | content_type='application/json', 26 | content_encoding='utf-8', 27 | headers={'key': 'value'}, 28 | delivery_mode=1, 29 | ) 30 | 31 | for i in range(0, 10000): 32 | channel.basic_publish( 33 | exchange='', 34 | routing_key=qname, 35 | properties=prop, 36 | body='{message: hello}' 37 | ) 38 | 39 | def thread_pub(self, rm): 40 | for i in range(0, 1): 41 | _thread.start_new_thread(self.pub, (rm, i,)) 42 | 43 | 44 | print('starting .. %s' % sys.argv[1]) 45 | time.sleep(1) 46 | x = PyPikaTest() 47 | x.thread_pub(sys.argv[1]) 48 | time.sleep(1) 49 | input("Press Enter to continue...") 50 | -------------------------------------------------------------------------------- /examples/python/pika_handle_errors.py: -------------------------------------------------------------------------------- 1 | import _thread 2 | 3 | import pika 4 | import time 5 | import sys 6 | 7 | 8 | def get_connection(rm): 9 | credentials = pika.PlainCredentials('test', 'test') 10 | connection = pika.BlockingConnection(pika.ConnectionParameters(host=rm, port=5672, 11 | virtual_host="/", 12 | credentials=credentials)) 13 | return connection 14 | 15 | 16 | class PyPikaTest: 17 | 18 | def callback(self, chan, method_frame, properties, body): 19 | print(" [x] %s" % (body)) 20 | time.sleep(2) 21 | # chan.basic_ack(delivery_tag=method_frame.delivery_tag) 22 | chan.basic_nack(delivery_tag=method_frame.delivery_tag, requeue=True) # loop here 23 | # chan.basic_nack(delivery_tag=method_frame.delivery_tag, requeue=False) 24 | 25 | def start_consumers(self, rm, qname): 26 | channel = get_connection(rm).channel() 27 | channel.basic_consume( 28 | queue=qname, 29 | on_message_callback=self.callback, 30 | auto_ack=False) 31 | 32 | channel.start_consuming() 33 | 34 | def publish(self, rm, qname): 35 | channel = get_connection(rm).channel() 36 | channel.queue_declare(queue=qname, auto_delete=False) 37 | 38 | _properties = pika.BasicProperties( 39 | content_type='application/json', 40 | content_encoding='utf-8' 41 | ) 42 | print("start: %s" % (time.ctime(time.time()))) 43 | for i in range(1, 2): 44 | time.sleep(1) 45 | channel.basic_publish( 46 | exchange='', 47 | routing_key=qname, 48 | properties=_properties, 49 | body='message: ' + str(i) 50 | ) 51 | print("basic_publish : %s" % (time.ctime(time.time()))) 52 | 53 | print("end: %s" % (time.ctime(time.time()))) 54 | 55 | def thread_publish(self, rm): 56 | qname = "training_handle_errors_1" 57 | _thread.start_new_thread(self.publish, (rm, qname,)) 58 | time.sleep(3) 59 | _thread.start_new_thread(self.start_consumers, (rm, qname,)) 60 | 61 | 62 | print('starting .. %s' % sys.argv[1]) 63 | x = PyPikaTest() 64 | x.thread_publish(sys.argv[1]) 65 | 66 | input("Press Enter to continue...") 67 | -------------------------------------------------------------------------------- /examples/python/pika_handle_errors_dlx.py: -------------------------------------------------------------------------------- 1 | import _thread 2 | 3 | import pika 4 | import time 5 | import sys 6 | 7 | 8 | def get_connection(rm): 9 | credentials = pika.PlainCredentials('test', 'test') 10 | connection = pika.BlockingConnection(pika.ConnectionParameters(host=rm, port=5672, 11 | virtual_host="/", 12 | credentials=credentials)) 13 | return connection 14 | 15 | 16 | class PyPikaTest: 17 | 18 | def callback(self, chan, method_frame, properties, body): 19 | print(" [x] %s" % (body)) 20 | time.sleep(2) 21 | chan.basic_nack(delivery_tag=method_frame.delivery_tag, requeue=False) 22 | 23 | def start_consumers(self, rm, qname): 24 | channel = get_connection(rm).channel() 25 | channel.basic_consume( 26 | queue=qname, 27 | on_message_callback=self.callback, 28 | auto_ack=False) 29 | 30 | channel.start_consuming() 31 | 32 | def publish(self, rm, qname): 33 | channel = get_connection(rm).channel() 34 | channel.exchange_declare(exchange="training_dead_exchange", exchange_type='topic') 35 | channel.queue_declare(queue=qname, auto_delete=False, 36 | arguments={'x-dead-letter-exchange': 'training_dead_exchange'}) 37 | 38 | channel.queue_declare(queue="training_dead_queue", auto_delete=False) 39 | channel.queue_bind(queue="training_dead_queue",exchange="training_dead_exchange", routing_key="#") 40 | 41 | _properties = pika.BasicProperties( 42 | content_type='application/json', 43 | content_encoding='utf-8' 44 | ) 45 | print("start: %s" % (time.ctime(time.time()))) 46 | for i in range(0, 10): 47 | time.sleep(1) 48 | channel.basic_publish( 49 | exchange='', 50 | routing_key=qname, 51 | properties=_properties, 52 | body='message: ' + str(i) 53 | ) 54 | print("basic_publish : %s" % (time.ctime(time.time()))) 55 | 56 | print("end: %s" % (time.ctime(time.time()))) 57 | 58 | def thread_publish(self, rm): 59 | qname = "training_handle_errors_dlx_1" 60 | _thread.start_new_thread(self.publish, (rm, qname,)) 61 | time.sleep(3) 62 | _thread.start_new_thread(self.start_consumers, (rm, qname,)) 63 | 64 | 65 | print('starting .. %s' % sys.argv[1]) 66 | x = PyPikaTest() 67 | x.thread_publish(sys.argv[1]) 68 | 69 | input("Press Enter to continue...") 70 | -------------------------------------------------------------------------------- /examples/python/pika_memory_limit.py: -------------------------------------------------------------------------------- 1 | import _thread 2 | 3 | import pika 4 | import time 5 | import sys 6 | 7 | 8 | class PyPikaTest: 9 | 10 | def callback(self, ch, method, properties, body): 11 | print(" [x] %s" % (body)) 12 | 13 | def get_connection(self, rm): 14 | credentials = pika.PlainCredentials('test', 'test') 15 | connection = pika.BlockingConnection(pika.ConnectionParameters(host=rm, port=5672, 16 | virtual_host="/", 17 | credentials=credentials)) 18 | return connection 19 | 20 | def publish(self, rm, qname): 21 | channel = self.get_connection(rm).channel() 22 | channel.queue_declare(queue=qname, auto_delete=False, 23 | arguments={'x-queue-mode': 'lazy'} 24 | ) 25 | 26 | print("start: %s" % (time.ctime(time.time()))) 27 | for i in range(1, 900000): 28 | if (i % 100) == 0: 29 | time.sleep(1) 30 | print("Published: .." + str(i)) 31 | channel.basic_publish( 32 | exchange='', 33 | routing_key=qname, 34 | properties=pika.BasicProperties( 35 | delivery_mode=2, # make message persistent 36 | ), 37 | body='message: ' + str(i) 38 | ) 39 | print("end: %s" % (time.ctime(time.time()))) 40 | 41 | def thread_publish(self, rm): 42 | qname = "training_lazy_queue_1" 43 | _thread.start_new_thread(self.publish, (rm, qname,)) 44 | 45 | 46 | print('starting .. %s' % sys.argv[1]) 47 | x = PyPikaTest() 48 | x.thread_publish(sys.argv[1]) 49 | 50 | input("Press Enter to continue...") 51 | -------------------------------------------------------------------------------- /examples/python/pika_produce_consume.py: -------------------------------------------------------------------------------- 1 | import _thread 2 | 3 | import pika 4 | import time 5 | import sys 6 | 7 | 8 | class PyPikaTest: 9 | 10 | def callback(self, ch, method, properties, body): 11 | print(" [x] %s" % (body)) 12 | 13 | def get_connection(self, rm): 14 | credentials = pika.PlainCredentials('test', 'test') 15 | connection = pika.BlockingConnection(pika.ConnectionParameters(host=rm, port=5672, 16 | virtual_host="/", 17 | credentials=credentials)) 18 | return connection 19 | 20 | def start_consumers(self, rm, qname): 21 | channel = self.get_connection(rm).channel() 22 | channel.basic_consume( 23 | queue=qname, 24 | on_message_callback=self.callback, 25 | auto_ack=True) 26 | 27 | channel.start_consuming() 28 | 29 | def publish(self, rm, qname): 30 | channel = self.get_connection(rm).channel() 31 | channel.queue_declare(queue=qname, auto_delete=False) 32 | 33 | _properties = pika.BasicProperties( 34 | content_type='application/json', 35 | content_encoding='utf-8' 36 | ) 37 | print("start: %s" % (time.ctime(time.time()))) 38 | for i in range(1, 900000): 39 | time.sleep(5) 40 | channel.basic_publish( 41 | exchange='', 42 | routing_key=qname, 43 | properties=_properties, 44 | body='message: ' + str(i) 45 | ) 46 | print("end: %s" % (time.ctime(time.time()))) 47 | 48 | def thread_publish(self, rm): 49 | for i in range(1, 8): 50 | qname = "test_queue_" + str(i) 51 | _thread.start_new_thread(self.publish, (rm, qname,)) 52 | time.sleep(3) 53 | _thread.start_new_thread(self.start_consumers, (rm, qname,)) 54 | 55 | 56 | print('starting .. %s' % sys.argv[1]) 57 | x = PyPikaTest() 58 | x.thread_publish(sys.argv[1]) 59 | 60 | input("Press Enter to continue...") 61 | -------------------------------------------------------------------------------- /examples/python/pika_produce_consume_access_control.py: -------------------------------------------------------------------------------- 1 | import _thread 2 | 3 | import pika 4 | import time 5 | import sys 6 | 7 | 8 | class PyPikaTest: 9 | 10 | def callback(self, ch, method, properties, body): 11 | print(" [x] %s" % (body)) 12 | 13 | def get_connection(self, rm): 14 | credentials = pika.PlainCredentials('user1', 'user1') 15 | connection = pika.BlockingConnection(pika.ConnectionParameters(host=rm, port=5672, 16 | virtual_host="access_control", 17 | credentials=credentials)) 18 | return connection 19 | 20 | def start_consumers(self, rm, qname): 21 | channel = self.get_connection(rm).channel() 22 | channel.basic_consume( 23 | queue=qname, 24 | on_message_callback=self.callback, 25 | auto_ack=False) 26 | 27 | channel.start_consuming() 28 | 29 | def publish(self, rm, qname): 30 | channel = self.get_connection(rm).channel() 31 | channel.queue_declare(queue=qname, auto_delete=False) 32 | 33 | _properties = pika.BasicProperties( 34 | content_type='application/json', 35 | content_encoding='utf-8' 36 | ) 37 | print("start: %s" % (time.ctime(time.time()))) 38 | for i in range(1, 9000): 39 | time.sleep(5) 40 | channel.basic_publish( 41 | exchange='', 42 | routing_key=qname, 43 | properties=_properties, 44 | body='message: ' + str(i) 45 | ) 46 | print("end: %s" % (time.ctime(time.time()))) 47 | 48 | def thread_publish(self, rm): 49 | for i in range(1, 8): 50 | qname = "training_queue_" + str(i) 51 | _thread.start_new_thread(self.publish, (rm, qname,)) 52 | time.sleep(3) 53 | _thread.start_new_thread(self.start_consumers, (rm, qname,)) 54 | 55 | 56 | print('starting .. %s' % sys.argv[1]) 57 | x = PyPikaTest() 58 | x.thread_publish(sys.argv[1]) 59 | 60 | input("Press Enter to continue...") 61 | -------------------------------------------------------------------------------- /examples/python/pika_publish_subscribe.py: -------------------------------------------------------------------------------- 1 | import _thread 2 | 3 | import pika 4 | import time 5 | import sys 6 | 7 | 8 | class PyPikaTest: 9 | 10 | def callback(self, ch, method, properties, body): 11 | print(" [x] %s" % (body)) 12 | 13 | def get_connection(self, rm): 14 | credentials = pika.PlainCredentials('test', 'test') 15 | connection = pika.BlockingConnection(pika.ConnectionParameters(host=rm, port=5672, 16 | virtual_host="/", 17 | credentials=credentials)) 18 | return connection 19 | 20 | def start_consumers(self, rm, qname): 21 | channel = self.get_connection(rm).channel() 22 | channel.basic_consume( 23 | queue=qname, 24 | on_message_callback=self.callback, 25 | auto_ack=True) 26 | 27 | channel.start_consuming() 28 | 29 | def publish(self, rm, qname): 30 | channel = self.get_connection(rm).channel() 31 | channel.queue_declare(queue=qname, auto_delete=False) 32 | 33 | _properties = pika.BasicProperties( 34 | content_type='application/json', 35 | content_encoding='utf-8' 36 | ) 37 | print("start: %s" % (time.ctime(time.time()))) 38 | for i in range(1, 900000): 39 | time.sleep(5) 40 | channel.basic_publish( 41 | exchange='', 42 | routing_key=qname, 43 | body='message: ' + str(i) 44 | ) 45 | print("end: %s" % (time.ctime(time.time()))) 46 | 47 | def thread_publish(self, rm): 48 | for i in range(1, 8): 49 | qname = "training_queue_" + str(i) 50 | _thread.start_new_thread(self.publish, (rm, qname,)) 51 | time.sleep(3) 52 | _thread.start_new_thread(self.start_consumers, (rm, qname,)) 53 | 54 | 55 | print('starting .. %s' % sys.argv[1]) 56 | x = PyPikaTest() 57 | x.thread_publish(sys.argv[1]) 58 | 59 | input("Press Enter to continue...") 60 | -------------------------------------------------------------------------------- /examples/python/pump.py: -------------------------------------------------------------------------------- 1 | import _thread 2 | 3 | import pika 4 | import time 5 | import sys 6 | 7 | 8 | class PyPikaTest: 9 | 10 | def callback(self, ch, method, properties, body): 11 | print(" [x] %s" % (body)) 12 | 13 | def get_connection(self, rm): 14 | credentials = pika.PlainCredentials('test', 'test') 15 | connection = pika.BlockingConnection(pika.ConnectionParameters(host=rm, port=5672, 16 | virtual_host="/", 17 | credentials=credentials)) 18 | return connection 19 | 20 | def start_consumers(self, rm, qname): 21 | channel = self.get_connection(rm).channel() 22 | channel.basic_consume( 23 | queue=qname, 24 | on_message_callback=self.callback, 25 | auto_ack=True) 26 | 27 | channel.start_consuming() 28 | 29 | def publish(self, rm, qname): 30 | channel = self.get_connection(rm).channel() 31 | _properties = pika.BasicProperties( 32 | content_type='application/json', 33 | content_encoding='utf-8', 34 | delivery_mode=pika.spec.PERSISTENT_DELIVERY_MODE 35 | ) 36 | print("start: %s" % (time.ctime(time.time()))) 37 | for i in range(1, 1000): 38 | channel.basic_publish( 39 | exchange='', 40 | routing_key=qname, 41 | properties=_properties, 42 | body='message: ' + str(i) 43 | ) 44 | print("end: %s" % (time.ctime(time.time()))) 45 | 46 | def thread_publish(self, rm): 47 | for i in range(1, 2): 48 | qname = "quorum_" + str(i) 49 | channel = self.get_connection(rm).channel() 50 | channel.queue_declare(queue=qname, durable=True, arguments={"x-queue-type": "quorum"}) 51 | _thread.start_new_thread(self.publish, (rm, qname,)) 52 | for i in range(1, 2): 53 | qname = "stream_" + str(i) 54 | channel = self.get_connection(rm).channel() 55 | channel.queue_declare(queue=qname, durable=True, arguments={"x-queue-type": "stream"}) 56 | _thread.start_new_thread(self.publish, (rm, qname,)) 57 | for i in range(1, 2): 58 | qname = "classic_" + str(i) 59 | channel = self.get_connection(rm).channel() 60 | channel.queue_declare(queue=qname, durable=True, arguments={"x-queue-type": "classic"}) 61 | _thread.start_new_thread(self.publish, (rm, qname,)) 62 | 63 | 64 | print('starting .. %s' % sys.argv[1]) 65 | x = PyPikaTest() 66 | x.thread_publish(sys.argv[1]) 67 | 68 | input("Press Enter to continue...") 69 | -------------------------------------------------------------------------------- /examples/python/stream.cs: -------------------------------------------------------------------------------- 1 | public async Task Start() 2 | { 3 | Console.WriteLine("Reliable .NET Producer"); 4 | // var addressResolver = new AddressResolver(IPEndPoint.Parse("192.168.56.11:5552")); 5 | var config = new StreamSystemConfig() 6 | { 7 | // AddressResolver = addressResolver, 8 | // UserName = "test", 9 | // Password = "test", 10 | // Endpoints = new List() {addressResolver.EndPoint} 11 | }; 12 | const string stream = "my-reliable-stream"; 13 | var system = await StreamSystem.Create(config); 14 | await system.CreateStream(new StreamSpec(stream) 15 | { 16 | MaxLengthBytes = 5_242_880 * 3, 17 | MaxSegmentSizeBytes = 5_242_880 18 | }); 19 | const int totalMessages = 10_00_000; 20 | var run = Task.Run(async () => 21 | { 22 | var reliableProducer = await ReliableProducer.CreateReliableProducer(new ReliableProducerConfig() 23 | { 24 | StreamSystem = system, 25 | Stream = stream, 26 | Reference = "my-reliable-producer", 27 | ConfirmationHandler = confirmation => 28 | { 29 | if (confirmation.PublishingId % 10_000 == 0) 30 | { 31 | Console.WriteLine(confirmation.Status == ConfirmationStatus.Confirmed 32 | ? $"Confirmed: Publishing id {confirmation.PublishingId}" 33 | : $"Error: Publishing id {confirmation.PublishingId}, error: {confirmation.Status} "); 34 | } 35 | 36 | return Task.CompletedTask; 37 | } 38 | }); 39 | var start = DateTime.Now; 40 | for (var i = 0; i < totalMessages; i++) 41 | { 42 | await reliableProducer.Send(new Message(Encoding.UTF8.GetBytes($"hello {i}"))); 43 | } 44 | 45 | Console.WriteLine($"End...Done {DateTime.Now - start}"); 46 | // just to receive all the notification back 47 | Thread.Sleep(2000); 48 | await reliableProducer.Close(); 49 | }); -------------------------------------------------------------------------------- /examples/rabbitmq_docker_ssl/README.md: -------------------------------------------------------------------------------- 1 | Full example RabbitMQ TLS java 2 | == 3 | This example shows how to use RabbitMQ TLS. 4 | 5 | Run 6 | == 7 | ``` 8 | ./build 9 | ``` 10 | it generates the certs 11 | ``` 12 | after that execute: 13 | ``` 14 | docker-compose up 15 | ``` 16 | 17 | then the Java client: 18 | ``` 19 | Connected!!! 20 | Received: Hello, World 21 | ``` -------------------------------------------------------------------------------- /examples/rabbitmq_docker_ssl/conf/enabled_plugins: -------------------------------------------------------------------------------- 1 | [rabbitmq_management,rabbitmq_auth_mechanism_ssl]. -------------------------------------------------------------------------------- /examples/rabbitmq_docker_ssl/conf/rabbitmq.conf: -------------------------------------------------------------------------------- 1 | loopback_users.guest = false 2 | listeners.ssl.default = 5671 3 | ssl_options.cacertfile = /cert/ca_certificate.pem 4 | ssl_options.certfile = /cert/server_certificate.pem 5 | ssl_options.depth = 1 6 | ssl_options.fail_if_no_peer_cert = false 7 | ssl_options.keyfile = /cert/server_key.pem 8 | ssl_options.verify = verify_peer 9 | management.ssl.port = 15671 10 | management.ssl.cacertfile = /cert/ca_certificate.pem 11 | management.ssl.certfile = /cert/server_certificate.pem 12 | management.ssl.fail_if_no_peer_cert = false 13 | management.ssl.keyfile = /cert/server_key.pem 14 | management.ssl.verify = verify_none 15 | listeners.tcp.default = 5672 16 | management.tcp.port = 15672 17 | default_pass = test 18 | default_user = test 19 | -------------------------------------------------------------------------------- /examples/rabbitmq_docker_ssl/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | rabbitmq_service: 4 | 5 | hostname: 'rabbitmq-tls' 6 | image: rabbitmq 7 | environment: 8 | RABBITMQ_DEFAULT_USER: test 9 | RABBITMQ_DEFAULT_PASS: test 10 | expose: 11 | - "15672" 12 | - "5672" 13 | ports: 14 | - "15672:15672" 15 | - "5671:5671" 16 | volumes: 17 | - "./tls-gen/basic/result/:/cert" 18 | - "./conf/:/etc/rabbitmq/" 19 | - "./data/:/var/lib/rabbitmq/" 20 | -------------------------------------------------------------------------------- /examples/rabbitmq_docker_ssl/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | org.example 8 | java_cert 9 | 1.0-SNAPSHOT 10 | 11 | 12 | 13 | 14 | com.rabbitmq 15 | amqp-client 16 | 5.11.0 17 | 18 | 19 | 20 | 21 | 22 | 23 | org.apache.maven.plugins 24 | maven-assembly-plugin 25 | 26 | 27 | package 28 | 29 | single 30 | 31 | 32 | 33 | 34 | 35 | TLSConnection 36 | 37 | 38 | 39 | 40 | jar-with-dependencies 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 15 52 | 15 53 | 54 | 55 | -------------------------------------------------------------------------------- /examples/rabbitmq_docker_ssl/src/main/java/TLSConnection.java: -------------------------------------------------------------------------------- 1 | import com.rabbitmq.client.Channel; 2 | import com.rabbitmq.client.Connection; 3 | import com.rabbitmq.client.ConnectionFactory; 4 | 5 | import java.io.*; 6 | import java.security.*; 7 | import javax.net.ssl.*; 8 | 9 | import com.rabbitmq.client.*; 10 | 11 | public class TLSConnection { 12 | 13 | public static void main(String[] argv) throws Exception { 14 | ConnectionFactory factory = new ConnectionFactory(); 15 | 16 | factory.setHost("localhost"); 17 | factory.setUsername("test"); 18 | factory.setPassword("test"); 19 | 20 | factory.setPort(5671); 21 | factory.useSslProtocol(); 22 | 23 | Connection conn = factory.newConnection(); 24 | System.out.println("Connected!!!"); 25 | Channel channel = conn.createChannel(); 26 | 27 | channel.queueDeclare("rabbitmq-java-test", false, true, true, null); 28 | channel.basicPublish("", "rabbitmq-java-test", null, "Hello, World".getBytes()); 29 | 30 | GetResponse chResponse = channel.basicGet("rabbitmq-java-test", false); 31 | if (chResponse == null) { 32 | System.out.println("No message retrieved"); 33 | } else { 34 | byte[] body = chResponse.getBody(); 35 | System.out.println("Received: " + new String(body)); 36 | } 37 | 38 | channel.close(); 39 | conn.close(); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /http_utils/README.md: -------------------------------------------------------------------------------- 1 | RabbitMQ HTTP Utils 2 | ==== 3 | 4 | A set of http utils for RabbitMQ 5 | -------------------------------------------------------------------------------- /http_utils/close_allconnections.py: -------------------------------------------------------------------------------- 1 | __author__ = 'gabriele' 2 | 3 | import base64 4 | import sys 5 | import time 6 | import datetime 7 | import json 8 | 9 | import urllib.request 10 | 11 | 12 | ### This script closes all the TCP connections, so be careful!!! 13 | 14 | def print_time(step): 15 | ts = time.time(); 16 | st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S'); 17 | print(st + " - " + step) 18 | 19 | 20 | def get_auth(username, password): 21 | credentials = ('%s:%s' % (username, password)) 22 | encoded_credentials = base64.b64encode(credentials.encode('ascii')) 23 | return 'Authorization', 'Basic %s' % encoded_credentials.decode("ascii") 24 | 25 | 26 | def call_api(rabbitmq_host, vhost, user, password): 27 | p = urllib.request.HTTPPasswordMgrWithDefaultRealm() 28 | p.add_password(None, "http://" + rabbitmq_host + ":15672/api/connections", user, password) 29 | 30 | auth_handler = urllib.request.HTTPBasicAuthHandler(p) 31 | opener = urllib.request.build_opener(auth_handler) 32 | 33 | urllib.request.install_opener(opener) 34 | 35 | req = urllib.request.Request("http://" + rabbitmq_host + ":15672/api/connections", 36 | method='GET') 37 | 38 | res = urllib.request.urlopen(req, timeout=5) 39 | 40 | print_time(" *** response done, loading json") 41 | connections = json.load(res) 42 | print_time(" *** connections {}".format(connections)) 43 | 44 | for q in connections: 45 | print_time(" *** removing " + q['name']) 46 | 47 | url_connection = "http://" + rabbitmq_host + ":15672/api/connections/" + urllib.parse.quote(q[ 48 | 'name']) 49 | request_del = urllib.request.Request( 50 | url_connection, method='DELETE') 51 | urllib.request.urlopen(request_del, timeout=5) 52 | print_time(" *** removed " + q['name']) 53 | 54 | 55 | if __name__ == '__main__': 56 | print_time('Number of arguments: {} {}'.format(len(sys.argv), 'arguments.')) 57 | print_time('Argument List: {}'.format(str(sys.argv))) 58 | rabbitmq_host = sys.argv[1]; 59 | call_api(rabbitmq_host, "%2f", sys.argv[2], sys.argv[3]) 60 | -------------------------------------------------------------------------------- /http_utils/remove_all_queues.py: -------------------------------------------------------------------------------- 1 | __author__ = 'gabriele' 2 | 3 | import base64 4 | import time 5 | import datetime 6 | import json 7 | 8 | import urllib.request 9 | 10 | 11 | ### This script removes all the queues, so be careful!!! 12 | 13 | def print_time(step): 14 | ts = time.time(); 15 | st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S'); 16 | print(st + " - " + step) 17 | 18 | 19 | def get_auth(username, password): 20 | credentials = ('%s:%s' % (username, password)) 21 | encoded_credentials = base64.b64encode(credentials.encode('ascii')) 22 | return 'Authorization', 'Basic %s' % encoded_credentials.decode("ascii") 23 | 24 | 25 | def call_api(rabbitmq_host, vhost, user, password): 26 | p = urllib.request.HTTPPasswordMgrWithDefaultRealm() 27 | p.add_password(None, "http://" + rabbitmq_host + ":15672/api/queues", user, password) 28 | 29 | auth_handler = urllib.request.HTTPBasicAuthHandler(p) 30 | opener = urllib.request.build_opener(auth_handler) 31 | 32 | urllib.request.install_opener(opener) 33 | 34 | req = urllib.request.Request("http://" + rabbitmq_host + ":15672/api/queues", 35 | method='GET') 36 | 37 | res = urllib.request.urlopen(req, timeout=5) 38 | 39 | print_time(" *** response done, loading json") 40 | queues = json.load(res) 41 | for q in queues: 42 | print_time(" *** removing " + q['name']) 43 | 44 | request_del = urllib.request.Request( 45 | "http://" + rabbitmq_host + ":15672/api/queues/" + vhost + "/" + q[ 46 | 'name'], method='DELETE') 47 | urllib.request.urlopen(request_del, timeout=5) 48 | print_time(" *** removed " + q['name']) 49 | 50 | 51 | if __name__ == '__main__': 52 | rabbitmq_host = "localhost"; 53 | call_api(rabbitmq_host, "%2f", "guest", "guest") 54 | -------------------------------------------------------------------------------- /k8s/helm/rabbimq_ha/delete_exporter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm delete --purge rmq-exp 3 | 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbimq_ha/delete_prometheus_operator: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm delete --purge prom 3 | kubectl delete crd prometheusrules.monitoring.coreos.com 4 | kubectl delete crd servicemonitors.monitoring.coreos.com 5 | kubectl delete crd alertmanagers.monitoring.coreos.com 6 | kubectl delete crd prometheuses.monitoring.coreos.com 7 | kubectl delete crd alertmanagers.monitoring.coreos.com 8 | kubectl delete crd podmonitors.monitoring.coreos.com 9 | -------------------------------------------------------------------------------- /k8s/helm/rabbimq_ha/delete_rabbitmq_ha: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm delete --purge rmq-ha 3 | 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbimq_ha/img/dashoard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Gsantomaggio/rabbitmq-utils/07423e9b253af5e602e3b06e65d1fc22157586f7/k8s/helm/rabbimq_ha/img/dashoard.png -------------------------------------------------------------------------------- /k8s/helm/rabbimq_ha/img/rmq_clsuter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Gsantomaggio/rabbitmq-utils/07423e9b253af5e602e3b06e65d1fc22157586f7/k8s/helm/rabbimq_ha/img/rmq_clsuter.png -------------------------------------------------------------------------------- /k8s/helm/rabbimq_ha/install_exporter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm install --name rmq-exp -f rabbitmq_exporter_values.yaml stable/prometheus-rabbitmq-exporter 3 | -------------------------------------------------------------------------------- /k8s/helm/rabbimq_ha/install_prometheus_operator: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm install --name prom -f values.yaml stable/prometheus-operator 3 | 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbimq_ha/install_rabbitmq_ha: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm install --name rmq-ha -f rabbitmq-ha_values.yaml stable/rabbitmq-ha 3 | -------------------------------------------------------------------------------- /k8s/helm/rabbimq_ha/rabbitmq_exporter_values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for prometheus-rabbitmq-exporter. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | replicaCount: 1 5 | image: 6 | repository: kbudde/rabbitmq-exporter 7 | tag: v0.29.0 8 | pullPolicy: IfNotPresent 9 | service: 10 | type: ClusterIP 11 | externalPort: 9419 12 | internalPort: 9419 13 | resources: {} 14 | # We usually recommend not to specify default resources and to leave this as a conscious 15 | # choice for the user. This also increases chances charts run on environments with little 16 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 17 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 18 | # limits: 19 | # cpu: 100m 20 | # memory: 128Mi 21 | # requests: 22 | # cpu: 100m 23 | # memory: 128Mi 24 | 25 | nodeSelector: {} 26 | 27 | tolerations: [] 28 | 29 | affinity: {} 30 | 31 | loglevel: info 32 | rabbitmq: 33 | url: http://rmq-ha-rabbitmq-ha:15672 34 | user: guest 35 | password: test 36 | # If existingPasswordSecret is set then password is ignored 37 | existingPasswordSecret: ~ 38 | capabilities: bert,no_sort 39 | include_queues: ".*" 40 | include_vhost: ".*" 41 | skip_queues: "test.*" 42 | skip_verify: "false" 43 | skip_vhost: "^$" 44 | exporters: "exchange,node,overview,queue" 45 | output_format: "TTY" 46 | timeout: 30 47 | max_queues: 0 48 | 49 | annotations: {} 50 | # prometheus.io/scrape: "true" 51 | # prometheus.io/path: "/metrics" 52 | # prometheus.io/port: 9419 53 | 54 | prometheus: 55 | monitor: 56 | enabled: false 57 | additionalLabels: {} 58 | interval: 15s 59 | namespace: [] 60 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/1_install_rabbitmq_ha: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" 3 | helm install --name rmq-ha stable/rabbitmq-ha --set rabbitmqPassword="test" --version 1.35.0 -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/2_describe_endpoint: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" 3 | kubectl get ep rmq-ha-rabbitmq-ha-discovery -o json 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/3_install_rabbitmq_ha: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" 3 | helm install --name rmq-ha -f rabbitmq-ha_values.yaml stable/rabbitmq-ha 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/4_scale_rabbitmq: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" 3 | kubectl scale --replicas 3 statefulset/rmq-ha-rabbitmq-ha 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/5_install_prometheus: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" 3 | helm install --name prom -f prometheus_values.yaml stable/prometheus 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/6_install_grafana: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" 3 | helm install --name graf -f grafana_values.yaml stable/grafana 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/README.md: -------------------------------------------------------------------------------- 1 | ## Deploy and monitoring a RabbitMQ Cluster with Helm 2 | 3 | **Note: In this example, the data are not persistent to the disk, so don't use it in production; this is only for test.** 4 | 5 | 6 | To run this example, you need: 7 | * [Kind](https://github.com/kubernetes-sigs/kind) 8 | * [Helm client](https://helm.sh/) 9 | 10 | Then following these steps: 11 | * Setup Kind 12 | * Deploy RabbitMQ 13 | * Deploy Prometheus 14 | * Deploy Grafana 15 | 16 | ### Setup Kind 17 | To set up, you can use this [script](https://github.com/Gsantomaggio/rabbitmq-utils/blob/master/k8s/kind/setup) , it: 18 | * creates the Kind cluster 19 | * setups the helm server 20 | * installs the k8s dashboard 21 | 22 | ### Deploy RabbitMQ 23 | You can use the script: `3_install_rabbitmq.` 24 | 25 | ``` 26 | helm install --name rmq-ha -f rabbitmq-ha_values.yaml stable/rabbitmq-ha 27 | ``` 28 | 29 | 30 | If you want to check the RabbitMQ locally, you can use: `export_mgm.` 31 | 32 | ``` 33 | ./export_mgm 34 | Forwarding from 127.0.0.1:5672 -> 5672 35 | Forwarding from [::1]:5672 -> 5672 36 | Forwarding from 127.0.0.1:15672 -> 15672 37 | Forwarding from [::1]:15672 -> 15672 38 | Forwarding from 127.0.0.1:15692 -> 15692 39 | Forwarding from [::1]:15692 -> 15692 40 | ``` 41 | 42 | Then point to http://localhost:15672 ( user: guest and password: test ) 43 | 44 | The RabbitMQ cluster is ready and also the Prometheus metrics should be exposed, check the url: 45 | http://localhost:15692/metrics 46 | 47 | you should have: 48 | ``` 49 | ➜ ~ curl -s http://localhost:15692/metrics | more 50 | # TYPE erlang_mnesia_held_locks gauge 51 | # HELP erlang_mnesia_held_locks Number of held locks. 52 | erlang_mnesia_held_locks 0 53 | # TYPE erlang_mnesia_lock_queue gauge 54 | # HELP erlang_mnesia_lock_queue Number of transactions waiting for a lock. 55 | erlang_mnesia_lock_queue 0 56 | # TYPE erlang_mnesia_transaction_participants gauge 57 | ``` 58 | 59 | 60 | ### Deploy Prometheus 61 | 62 | Use the script: `5_install_prometheus` to install Prometheus with also the RabbitMQ metrics. 63 | 64 | ``` 65 | helm install --name prom -f prometheus_values.yaml stable/prometheus 66 | ``` 67 | 68 | 69 | Use `export_prom`: 70 | ``` 71 | /export_prom 72 | Forwarding from 127.0.0.1:9090 -> 9090 73 | Forwarding from [::1]:9090 -> 9090 74 | ``` 75 | 76 | test it locally on: http://localhost:9000 77 | 78 | Check the targets on: http://localhost:9090/targets: 79 | 80 | ![RabbitMQ Targets](https://github.com/Gsantomaggio/rabbitmq-utils/blob/master/k8s/helm/rabbitmq3.8/img/prom_rabbitmq_targets.png) 81 | 82 | 83 | ### Deploy Grafana 84 | 85 | Use the script: 6_install_grafana to install Grafana with the Prometheus link and the RabbitMQ templates: 86 | 87 | 88 | ``` 89 | helm install --name graf -f grafana_values.yaml stable/grafana 90 | ``` 91 | 92 | 93 | Use `export_graf` 94 | ``` 95 | ./export_graf 96 | Forwarding from 127.0.0.1:3000 -> 3000 97 | Forwarding from [::1]:3000 -> 3000 98 | ``` 99 | 100 | Test it locally on: http://localhost:3000 (user: admin, password: admin1) 101 | 102 | The installation is ready: 103 | 104 | ![RabbitMQ Targets](https://github.com/Gsantomaggio/rabbitmq-utils/blob/master/k8s/helm/rabbitmq3.8/img/rabbitmq_grafana.png) 105 | 106 | 107 | 108 | 109 | 110 | 111 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/delete_grafana: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" 3 | helm delete --purge graf 4 | 5 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/delete_prometheus: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" 3 | helm delete --purge prom 4 | 5 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/delete_rabbitmq_ha: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" 3 | helm delete --purge rmq-ha 4 | 5 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/export_graf: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" 3 | export POD_NAME=$(kubectl get pods --namespace default -l "app=grafana,release=graf" -o jsonpath="{.items[0].metadata.name}") 4 | kubectl --namespace default port-forward $POD_NAME 3000 5 | 6 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/export_mgm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" 3 | export POD_NAME=$(kubectl get pods --namespace default -l "app=rabbitmq-ha" -o jsonpath="{.items[0].metadata.name}") 4 | kubectl port-forward $POD_NAME --namespace default 5672:5672 15672:15672 15692:15692 5 | 6 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/export_prom: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" 3 | export POD_NAME=$(kubectl get pods --namespace default -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}") 4 | kubectl --namespace default port-forward $POD_NAME 9090 5 | 6 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/img/prom_rabbitmq_targets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Gsantomaggio/rabbitmq-utils/07423e9b253af5e602e3b06e65d1fc22157586f7/k8s/helm/rabbitmq3.8/img/prom_rabbitmq_targets.png -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/img/rabbitmq_grafana.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Gsantomaggio/rabbitmq-utils/07423e9b253af5e602e3b06e65d1fc22157586f7/k8s/helm/rabbitmq3.8/img/rabbitmq_grafana.png -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/load_kind_images: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo docker pull rabbitmq:3.8.0-alpine 3 | kind load docker-image rabbitmq:3.8.0-alpine 4 | sudo docker pull prom/alertmanager:v0.18.0 5 | kind load docker-image prom/alertmanager:v0.18.0 6 | sudo docker pull grafana/grafana:6.3.4 7 | kind load docker-image grafana/grafana:6.3.4 8 | 9 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/rmq_definition.json: -------------------------------------------------------------------------------- 1 | {"rabbit_version":"3.8.0","users":[{"name":"management","password_hash":"I/4D5AI1slLEiY71SdhDLgZklnjS5JUl0qqG6mYyU0kGmf/o","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"management"},{"name":"guest","password_hash":"LPY0ZNeYib9LG60JdKU77pKxbtm0jHgBz4uYnWsyfpsln6Bf","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"administrator"},{"name":"test","password_hash":"w+K0ewiOBppRlD1O13DIM/V84btbMgIBYzXSDYXr3g7W19G5","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"administrator"}],"vhosts":[{"name":"/"}],"permissions":[{"user":"test","vhost":"/","configure":".*","write":".*","read":".*"},{"user":"guest","vhost":"/","configure":".*","write":".*","read":".*"}],"topic_permissions":[],"parameters":[],"global_parameters":[{"name":"cluster_name","value":"rabbit@rmq-ha-rabbitmq-ha-0.rmq-ha-rabbitmq-ha-discovery.default.svc.cluster.local"}],"policies":[],"queues":[{"name":"training_queue_7","vhost":"/","durable":false,"auto_delete":false,"arguments":{}},{"name":"training_queue_5","vhost":"/","durable":false,"auto_delete":false,"arguments":{}},{"name":"training_queue_6","vhost":"/","durable":false,"auto_delete":false,"arguments":{}},{"name":"training_queue_1","vhost":"/","durable":false,"auto_delete":false,"arguments":{}},{"name":"training_queue_4","vhost":"/","durable":false,"auto_delete":false,"arguments":{}},{"name":"training_queue_3","vhost":"/","durable":false,"auto_delete":false,"arguments":{}},{"name":"training_queue_2","vhost":"/","durable":false,"auto_delete":false,"arguments":{}}],"exchanges":[],"bindings":[]} -------------------------------------------------------------------------------- /k8s/helm/rabbitmq3.8/run_test: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ../../../examples/python/.env3/bin/activate 3 | python3 ../../../examples/python//pika_produce_consume.py localhost 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq_asas/c: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | scp -i ~/keys/engcloud/gsantomaggio.pem rabbitmq-ha_values.yaml sles@10.86.0.133:/home/sles/rmq/k8s/elm/rabbitmq_asas/rabbitmq-ha_values.yaml 3 | echo "done" -------------------------------------------------------------------------------- /k8s/helm/rabbitmq_asas/delete_exporter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm delete --purge rmq-exp 3 | 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq_asas/delete_grafana: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm delete --purge graf 3 | 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq_asas/delete_prometheus: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm delete --purge prom 3 | 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq_asas/delete_rabbitmq_ha: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm delete --purge rmq-ha 3 | 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq_asas/install_exporter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm install --name rmq-exp -f rabbitmq-exporter_values.yaml stable/prometheus-rabbitmq-exporter --namespace stage \ 3 | --set rabbitmq.url=http://rmq-ha-rabbitmq-ha.stage:15672 4 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq_asas/install_grafana: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm install --name graf -f grafana_values.yaml stable/grafana --namespace stage 3 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq_asas/install_prometheus: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm install --name prom -f prometheus_values.yaml stable/prometheus --namespace stage 3 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq_asas/install_rabbitmq_ha: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | helm install --name rmq-ha -f rabbitmq-ha_values.yaml stable/rabbitmq-ha --namespace stage 3 | -------------------------------------------------------------------------------- /k8s/helm/rabbitmq_asas/rabbitmq-exporter_values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for prometheus-rabbitmq-exporter. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | replicaCount: 1 5 | image: 6 | repository: kbudde/rabbitmq-exporter 7 | tag: v0.29.0 8 | pullPolicy: IfNotPresent 9 | service: 10 | type: ClusterIP 11 | externalPort: 9419 12 | internalPort: 9419 13 | resources: {} 14 | # We usually recommend not to specify default resources and to leave this as a conscious 15 | # choice for the user. This also increases chances charts run on environments with little 16 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 17 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 18 | # limits: 19 | # cpu: 100m 20 | # memory: 128Mi 21 | # requests: 22 | # cpu: 100m 23 | # memory: 128Mi 24 | 25 | nodeSelector: {} 26 | 27 | tolerations: [] 28 | 29 | affinity: {} 30 | 31 | loglevel: info 32 | rabbitmq: 33 | user: guest 34 | password: test 35 | # If existingPasswordSecret is set then password is ignored 36 | existingPasswordSecret: ~ 37 | capabilities: bert,no_sort 38 | include_queues: ".*" 39 | include_vhost: ".*" 40 | skip_queues: "test.*" 41 | skip_verify: "false" 42 | skip_vhost: "^$" 43 | exporters: "exchange,node,overview,queue" 44 | output_format: "TTY" 45 | timeout: 30 46 | max_queues: 0 47 | 48 | annotations: {} 49 | # prometheus.io/scrape: "true" 50 | # prometheus.io/path: "/metrics" 51 | # prometheus.io/port: 9419 52 | 53 | prometheus: 54 | monitor: 55 | enabled: false 56 | additionalLabels: {} 57 | interval: 15s 58 | namespace: [] 59 | -------------------------------------------------------------------------------- /k8s/kind/dashboard/admin-role-binding.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: admin-user 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: admin-user 12 | namespace: kube-system 13 | -------------------------------------------------------------------------------- /k8s/kind/dashboard/dashboard-adminuser.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: admin-user 5 | namespace: kube-system -------------------------------------------------------------------------------- /k8s/kind/get_token: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') 3 | -------------------------------------------------------------------------------- /k8s/kind/setup: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | kind create cluster 3 | sleep 3 4 | export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" 5 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yaml 6 | sleep 8 7 | kubectl apply -f dashboard/dashboard-adminuser.yaml 8 | kubectl apply -f dashboard/admin-role-binding.yml 9 | sleep 2 10 | helm init --wait 11 | sleep 5 12 | kubectl create serviceaccount --namespace kube-system tiller 13 | kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller 14 | kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' 15 | sleep 2 16 | #helm install stable/prometheus-operator 17 | -------------------------------------------------------------------------------- /microservices_demo/1_rabbitmq.py: -------------------------------------------------------------------------------- 1 | import pika 2 | import time 3 | import _thread 4 | import os 5 | import common 6 | 7 | credentials = pika.PlainCredentials('test', 'test') 8 | 9 | 10 | def print_method(service, key): 11 | common.prGreen("Service %s received: routing key: [%r], message: \n " % (service, key)) 12 | 13 | 14 | def callback(ch, method, properties, body): 15 | common.sleep_random() 16 | print_method("clothes", method.routing_key) 17 | common.print_json(body) 18 | ch.basic_ack(delivery_tag=method.delivery_tag) 19 | 20 | 21 | def callbackemail(ch, method, properties, body): 22 | common.sleep_random() 23 | print_method("email", method.routing_key) 24 | common.print_json(body) 25 | ch.basic_ack(delivery_tag=method.delivery_tag) 26 | 27 | 28 | def callbackstore(ch, method, properties, body): 29 | common.sleep_random() 30 | print_method("store", method.routing_key) 31 | common.print_json(body) 32 | ch.basic_ack(delivery_tag=method.delivery_tag) 33 | 34 | 35 | def start_consumers(service_name, queue_name, cb): 36 | time.sleep(1.5) 37 | common.prPurple("Service for %s started. Queue: %r" % (service_name, queue_name)) 38 | common.divide() 39 | connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost', credentials=credentials)) 40 | channel = connection.channel() 41 | channel.basic_consume(on_message_callback=cb, queue=queue_name, auto_ack=False) 42 | channel.start_consuming() 43 | 44 | 45 | def consumers(): 46 | _thread.start_new(start_consumers, ("Clothes", "clothes_queue", callback,)) 47 | # time.sleep(0.5) 48 | # _thread.start_new(start_consumers, ("Food", "food_queue", callback,)) 49 | # time.sleep(0.5) 50 | # _thread.start_new(start_consumers, ("Books", "books_queue", callback)) 51 | time.sleep(0.5) 52 | _thread.start_new(start_consumers, ("Send the emails", "mail_queue", callbackemail,)) 53 | 54 | 55 | def sendmessage(message): 56 | import json 57 | json_string = json.dumps(message) 58 | connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost', credentials=credentials)) 59 | channel = connection.channel() 60 | channel.basic_publish(exchange='orders', 61 | routing_key="clothes.coats", 62 | body=json_string) 63 | connection.close() 64 | 65 | 66 | def create_env(): 67 | connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost', credentials=credentials)) 68 | channel = connection.channel() 69 | channel.exchange_declare(exchange="orders", exchange_type="topic", durable=True) 70 | channel.queue_declare(queue="clothes_queue", durable=True) 71 | # channel.queue_declare(queue="food_queue", durable=True) 72 | channel.queue_declare(queue="mail_queue", durable=True) 73 | # channel.queue_declare(queue="books_queue", durable=True) 74 | channel.queue_declare(queue="store_queue", durable=True) 75 | 76 | channel.queue_bind(queue="clothes_queue", exchange="orders", routing_key="clothes.#") 77 | # channel.queue_bind(queue="food_queue", exchange="orders", routing_key="food.#") 78 | channel.queue_bind(queue="mail_queue", exchange="orders", routing_key="#") 79 | channel.queue_unbind(queue="store_queue", exchange="orders", routing_key="#") 80 | 81 | connection.close() 82 | pass 83 | 84 | 85 | def menu(): 86 | order = common.Order() 87 | os.system('clear') 88 | common.divide() 89 | common.prCyan("======== RABBITMQ Micro-services DEMO ========") 90 | common.divide() 91 | common.prLightPurple("MENU") 92 | common.prLightPurple("1 - send the new order status") 93 | common.prLightPurple("2 - send the delivered status") 94 | common.prLightPurple("3 - add1 store service") 95 | common.divide() 96 | s = input() 97 | while s != "q": 98 | if s == "1": 99 | common.prPurple("Sending status \"new_order\"") 100 | common.divide() 101 | dict1 = {'id': order.get_and_inc(), "status": "new_order", "what": "Coats"} 102 | sendmessage(dict1) 103 | if s == "2": 104 | common.prPurple("Sending status \"delivered\"") 105 | common.divide() 106 | dict1 = {'id': order.get_id(), "status": "delivered", "what": "Coats", "name": "Jon Snow", 107 | "address": "Winterfell"} 108 | sendmessage(dict1) 109 | if s == "3": 110 | connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost', credentials=credentials)) 111 | channel = connection.channel() 112 | channel.queue_bind(queue="store_queue", exchange="orders", routing_key="#") 113 | connection.close() 114 | time.sleep(0.5) 115 | _thread.start_new(start_consumers, ("Store", "store_queue", callbackstore,)) 116 | s = input() 117 | 118 | 119 | ##FORMAT python 120 | create_env() 121 | consumers() 122 | menu() 123 | -------------------------------------------------------------------------------- /microservices_demo/2_kafka.py: -------------------------------------------------------------------------------- 1 | import pprint 2 | 3 | from kafka import KafkaProducer 4 | from kafka import KafkaConsumer 5 | import _thread 6 | import os 7 | import json 8 | import common 9 | 10 | producer = KafkaProducer(bootstrap_servers='localhost:9092') 11 | 12 | 13 | def sendmessage(message): 14 | import json 15 | json_string = json.dumps(message) 16 | producer.send('orders', key=bytes('clothes.coats', 'utf-8'), value=bytes(json_string, 'utf-8')) 17 | producer.flush() 18 | 19 | 20 | def new_consumer(group_id): 21 | consumer = KafkaConsumer('orders', group_id=group_id, auto_offset_reset='smallest') 22 | 23 | for msg in consumer: 24 | common.sleep_random() 25 | json_data = json.loads(msg.value) 26 | common.prGreen("\nService %s got a message: -- Msg Key: %s, partition: [%s] offset [%s]:" % ( 27 | group_id, msg.key, msg.partition, msg.offset)) 28 | pprint.pprint(json_data, indent=1, width=40) 29 | 30 | 31 | def start_mail_consumer(): 32 | common.sleep_random() 33 | print("Service for \"mail\" started") 34 | common.divide() 35 | new_consumer("mail") 36 | 37 | 38 | def start_clothes_consumer(): 39 | common.sleep_random() 40 | print("Service for \"clothes\" started") 41 | common.divide() 42 | new_consumer("clothes") 43 | 44 | 45 | def start_store_consumer(): 46 | common.sleep_random() 47 | print("Service for \" store \" started") 48 | common.divide() 49 | new_consumer("store") 50 | 51 | 52 | def consumers(): 53 | _thread.start_new(start_mail_consumer, ()) 54 | _thread.start_new(start_clothes_consumer, ()) 55 | 56 | 57 | # mention about exactly one 58 | def menu(): 59 | order = common.Order() 60 | os.system('clear') 61 | common.divide() 62 | common.prCyan("======== KAFKA Micro-services DEMO ========") 63 | common.divide() 64 | common.prLightPurple("MENU") 65 | common.prLightPurple("1 - send the new order status") 66 | common.prLightPurple("2 - send the delivered status") 67 | common.prLightPurple("3 - add store service") 68 | common.divide() 69 | s = input() 70 | while s != "q": 71 | if s == "1": 72 | common.prPurple("Sending status \"new_order\"") 73 | common.divide() 74 | dict1 = {'id': order.get_and_inc(), "status": "new_order", "what": "Coats"} 75 | sendmessage(dict1) 76 | if s == "2": 77 | common.prPurple("Sending status \"delivered\"") 78 | common.divide() 79 | dict1 = {'id': order.get_id(), "status": "delivered", "what": "Coats", "name": "Jon Snow", 80 | "address": "Winterfell"} 81 | sendmessage(dict1) 82 | if s == "3": 83 | _thread.start_new(start_store_consumer, ()) 84 | 85 | s = input() 86 | 87 | 88 | consumers() 89 | 90 | menu() 91 | -------------------------------------------------------------------------------- /microservices_demo/common.py: -------------------------------------------------------------------------------- 1 | import time 2 | import json 3 | import pprint 4 | 5 | 6 | def prRed(skk): print("\033[91m {}\033[00m".format(skk)) 7 | 8 | 9 | def prGreen(skk): print("\033[92m {}\033[00m".format(skk)) 10 | 11 | 12 | def prYellow(skk): print("\033[93m {}\033[00m".format(skk)) 13 | 14 | 15 | def prLightPurple(skk): print("\033[94m {}\033[00m".format(skk)) 16 | 17 | 18 | def prPurple(skk): print("\033[95m {}\033[00m".format(skk)) 19 | 20 | 21 | def prCyan(skk): print("\033[96m {}\033[00m".format(skk)) 22 | 23 | 24 | def prLightGray(skk): print("\033[97m {}\033[00m".format(skk)) 25 | 26 | 27 | def prBlack(skk): print("\033[98m {}\033[00m".format(skk)) 28 | 29 | 30 | class Order: 31 | def __init__(self): 32 | self.id = 0 33 | 34 | def get_and_inc(self): 35 | self.id += 1 36 | return self.get_id() 37 | 38 | def get_id(self): 39 | return self.id 40 | 41 | 42 | def sleep_random(): 43 | from random import randint 44 | v = randint(1000, 3000) 45 | time.sleep(v / 10000) 46 | 47 | 48 | def divide(): 49 | prYellow("---------------------------------------------------") 50 | 51 | 52 | def print_json(data): 53 | json_data = json.loads(data) 54 | pprint.pprint(json_data, indent=1, width=40) 55 | divide() 56 | -------------------------------------------------------------------------------- /openstack/mandatory_test/README.md: -------------------------------------------------------------------------------- 1 | Python test example for `mandatory` flag in olso.messaging 2 | === 3 | 4 | See: 5 | * https://blueprints.launchpad.net/oslo.messaging/+spec/transport-options 6 | * https://review.opendev.org/#/c/660373/ 7 | * https://review.opendev.org/#/c/666241/ 8 | 9 | 10 | 11 | Basic Idea 12 | === 13 | We added a new class called: `TransportOptions`, the class can be used as optional parameter for `RPCClient`: 14 | ```python 15 | options = oslo_messaging.TransportOptions(at_least_once=True) 16 | client = oslo_messaging.RPCClient(transport, target, transport_options=options) 17 | ``` 18 | 19 | the parameter `at_least_once` [is translated to `mandatory`](https://github.com/openstack/oslo.messaging/blob/master/oslo_messaging/_drivers/impl_rabbit.py#L1223) flag for RabbitMQ driver 20 | 21 | 22 | 23 | Implementation 24 | === 25 | 26 | Inside the function, `_publish` decode the option value, as: 27 | 28 | then pass the mandatory flag to the publish. 29 | 30 | `on_return` function raises a new exception called: 31 | 32 | `MessageUndeliverable` 33 | 34 | so in case, the message is not routed to any queue, the call will raise the exception. 35 | 36 | in this way: 37 | 38 | ```python 39 | try: 40 | r = client.call({}, 'foo', id_value=str(i), test_value="ciao") 41 | print("hello" + r + " - number: " + str(number)) 42 | except oslo_messaging.exceptions.MessageUndeliverable as e: 43 | print("MessageUndeliverable error, RabbitMQ Exception: %s, routing_key: %s message: %s exchange: %s:" % ( 44 | e.exception, e.routing_key, e.message.body, e.exchange)) 45 | ``` 46 | 47 | 48 | How to test it 49 | === 50 | * `git clone https://review.opendev.org/openstack/oslo.messaging mandatory` 51 | * `cd mandatory` 52 | * `python3 -m venv . && source bin/activate` 53 | * `pip install -r requirements.txt` 54 | * `python3 setup.py develop` 55 | * `wget https://raw.githubusercontent.com/Gsantomaggio/rabbitmq-utils/master/openstack/mandatory_test/mandatory_client_fail.py` 56 | * `sudo docker run -d -p 5672:5672 --hostname my-rabbit rabbitmq:3` 57 | * `python3 mandatory_client_fail.py enable_mandatory` 58 | 59 | you can reapeat the test using: 60 | `python3 mandatory_client_fail.py default` that is the currect behaviour, you will the different in response time. 61 | 62 | 63 | * `enable_mandatory` is reaised immediatly 64 | * `default` you have to wait the default timeout 65 | 66 | `enable_mandatory` result: 67 | ``` 68 | MessageUndeliverable error, RabbitMQ Exception: Basic.return: (312) NO_ROUTE, routing_key: my_not_existing_topic message: {"oslo.version": "2.0", "oslo.message": "{\"method\": \"foo\", \"args\": {\"id_value\": \"0\", \"test_value\": \"ciao\"}, \"namespace\": \"test\", \"version\": \"2.0\", \"_msg_id\": \"862e5d334e974bdb80ed18aedebb5b70\", \"_reply_q\": \"reply_cbd86ab1d4664597af3ab94975a9647f\", \"_timeout\": null, \"_unique_id\": \"6d9682551e69456ca2df52c5fe1f8b5d\"}"} exchange: my_exchange: 69 | ``` 70 | 71 | `default` result: 72 | ``` 73 | MessagingTimeout error: Timed out waiting for a reply to message ID 986e56ef352d4a7a8b07d345eab13e49 74 | ``` 75 | da daaa!!! 76 | -------------------------------------------------------------------------------- /openstack/mandatory_test/mandatory_client_fail.py: -------------------------------------------------------------------------------- 1 | import oslo_messaging 2 | from oslo_config import cfg 3 | import time 4 | import sys 5 | import os 6 | import _thread 7 | 8 | transport_default = "my_exchange" 9 | 10 | def prRed(skk): print("\033[91m {}\033[00m".format(skk)) 11 | 12 | 13 | def prGreen(skk): print("\033[92m {}\033[00m".format(skk)) 14 | 15 | 16 | def prYellow(skk): print("\033[93m {}\033[00m".format(skk)) 17 | 18 | 19 | def prLightPurple(skk): print("\033[94m {}\033[00m".format(skk)) 20 | 21 | 22 | def prPurple(skk): print("\033[95m {}\033[00m".format(skk)) 23 | 24 | 25 | def prCyan(skk): print("\033[96m {}\033[00m".format(skk), end='', flush=True) 26 | 27 | 28 | def prLightGray(skk): print("\033[97m {}\033[00m".format(skk)) 29 | 30 | 31 | def prBlack(skk): print("\033[98m {}\033[00m".format(skk)) 32 | 33 | 34 | def divide(): 35 | prYellow("---------------------------------------------------------------") 36 | 37 | 38 | 39 | 40 | def start_client(at_least_once): 41 | oslo_messaging.set_transport_defaults(transport_default) 42 | transport = oslo_messaging.get_transport(cfg.CONF) 43 | 44 | # in this way you can simulate the mandatory flag error 45 | # inside the function `call` 46 | # change: options = oslo_messaging.TransportOptions(at_least_once=True) 47 | # to: options = oslo_messaging.TransportOptions(at_least_once=False) 48 | # in this way you will see the different behaviour 49 | # replace 'my_not_existing_topic' value with to 'my_topic' to make it 50 | # working. 51 | 52 | target = oslo_messaging.Target(topic="my_not_existing_topic", version='2.0', 53 | namespace='test') 54 | # with at_least_once=False ( current default value) you will see the 55 | # timeout error. 56 | # with at_least_once=True ( so mandatory flag) you will see the excpetion raised. 57 | # that it is faster to raise :))! 58 | 59 | options = oslo_messaging.TransportOptions(at_least_once=at_least_once) 60 | client = oslo_messaging.RPCClient(transport, target, transport_options=options) 61 | 62 | for i in range(0, 1): 63 | time.sleep(0.1) 64 | 65 | try: 66 | r = client.call({}, 'foo', id_value=str(i), test_value="ciao", timeout=2) 67 | print("hello" + r + " - number: " + str(number)) 68 | except oslo_messaging.exceptions.MessageUndeliverable as e: 69 | ### Raised when at_least_once is True, and it is reaised immediately 70 | prRed("MessageUndeliverable error, RabbitMQ Exception: %s, routing_key: %s message: %s exchange: %s: \n" % ( 71 | e.exception, e.routing_key, e.message.body, e.exchange)) 72 | 73 | except oslo_messaging.exceptions.MessagingTimeout as et: 74 | ### Raised when at_least_once is False, you have to wait the timeout 75 | prRed("MessagingTimeout error: %s: \n" % (str(et))) 76 | 77 | 78 | 79 | def start_timer(): 80 | c = 1 81 | while True: 82 | time.sleep(1) 83 | prCyan(str(c) + " " ) 84 | c=c+1 85 | 86 | 87 | os.system('clear') 88 | 89 | divide() 90 | print(" OpenStack Transport Options Test") 91 | print(" Error queue missing simulation ") 92 | divide() 93 | prLightPurple(" ***** MENU *****") 94 | prLightPurple("1 - Don't use the Mandatory Flag (old behaviour)") 95 | prLightPurple("2 - Use the Mandatory Flag (new Feature)") 96 | divide() 97 | s = input() 98 | 99 | at_least_once = False 100 | if s == "1": 101 | at_least_once = False 102 | prYellow("Client Module sent a message to an Exchange without queues") 103 | prYellow("Manadtory Flag is set to False, let's wait the timeout") 104 | _thread.start_new(start_timer, ()) 105 | 106 | if s == "2": 107 | at_least_once = True 108 | prYellow("Client Module sent a message to an Exchange without queues") 109 | prYellow("Manadtory Flag is set to True") 110 | 111 | divide() 112 | start_client(at_least_once) 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /openstack/mandatory_test/mandatory_test.py: -------------------------------------------------------------------------------- 1 | import oslo_messaging 2 | from oslo_config import cfg 3 | import _thread 4 | import time 5 | 6 | transport_default = "my_exchange" 7 | topic_default = "my_topic" 8 | 9 | 10 | class TestEndpoint(object): 11 | target = oslo_messaging.Target(namespace='test', version='2.0') 12 | 13 | def __init__(self, server): 14 | self.server = server 15 | 16 | def foo(self, _ctx, id_value, test_value): 17 | print("id_value: " + str(id_value) + " - test_value: " + test_value) 18 | return id_value 19 | 20 | 21 | def start_server(): 22 | oslo_messaging.set_transport_defaults(transport_default) 23 | transport = oslo_messaging.get_transport(cfg.CONF) 24 | # cfg.CONF(["--config-file", "oslo.messaging.conf"]) 25 | 26 | target = oslo_messaging.Target(topic=topic_default, server='myserver') 27 | 28 | endpoints = [TestEndpoint(None)] 29 | server = oslo_messaging.get_rpc_server(transport, target, endpoints, 30 | executor='threading') 31 | server.start() 32 | server.wait() 33 | 34 | 35 | def call(transport, target, number): 36 | 37 | # with at_least_once=False ( current default value) you will see the 38 | # timeout error. 39 | # with at_least_once=True ( so mandatory flag) you will see the excpetion raised. 40 | # that it is faster to raise :))! 41 | 42 | options = oslo_messaging.TransportOptions(at_least_once=True) 43 | print("starting client") 44 | 45 | client = oslo_messaging.RPCClient(transport, target, transport_options=options) 46 | 47 | for i in range(0, 10): 48 | time.sleep(0.2) 49 | try: 50 | r = client.call({}, 'foo', id_value=str(i), test_value="ciao") 51 | print("hello" + r + " - number: " + str(number)) 52 | except oslo_messaging.exceptions.MessageUndeliverable as e: 53 | print("MessageUndeliverable error, RabbitMQ Exception: %s, routing_key: %s message: %s exchange: %s:" % ( 54 | e.exception, e.routing_key, e.message.body, e.exchange)) 55 | 56 | 57 | def start_client(): 58 | oslo_messaging.set_transport_defaults(transport_default) 59 | transport = oslo_messaging.get_transport(cfg.CONF) 60 | 61 | # in this way you can simulate the mandatory flag error 62 | # inside the function `call` 63 | # change: options = oslo_messaging.TransportOptions(at_least_once=True) 64 | # to: options = oslo_messaging.TransportOptions(at_least_once=False) 65 | # in this way you will see the different behaviour 66 | # replace 'my_not_existing_topic' value with to 'my_topic' to make it 67 | # working. 68 | 69 | target = oslo_messaging.Target(topic="my_not_existing_topic", version='2.0', 70 | namespace='test') 71 | _thread.start_new_thread(call, (transport, target, 1,)) 72 | 73 | 74 | # Create two threads as follows 75 | try: 76 | _thread.start_new_thread(start_server, ()) 77 | time.sleep(2) 78 | start_client() 79 | except: 80 | print("Error: unable to start thread") 81 | 82 | while 1: 83 | pass 84 | -------------------------------------------------------------------------------- /openstack/oslo-example/oslo.messaging.example.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Gsantomaggio/rabbitmq-utils/07423e9b253af5e602e3b06e65d1fc22157586f7/openstack/oslo-example/oslo.messaging.example.py -------------------------------------------------------------------------------- /openstack/pika-examples/pika-example.py: -------------------------------------------------------------------------------- 1 | import pika 2 | import time 3 | import threading 4 | import _thread 5 | import eventlet 6 | 7 | pool = eventlet.GreenPool() 8 | 9 | connection = pika.BlockingConnection() 10 | channel = connection.channel() 11 | 12 | channel.basic_publish( 13 | exchange='', 14 | routing_key='my_queue', 15 | properties=pika.BasicProperties( 16 | expiration='3000', 17 | ), 18 | body='my message' 19 | ) 20 | 21 | credentials = pika.PlainCredentials('test', 'test') 22 | connection10 = pika.BlockingConnection(pika.ConnectionParameters(host='20.0.0.10', credentials=credentials)) 23 | connection11 = pika.BlockingConnection(pika.ConnectionParameters(host='20.0.0.11', credentials=credentials)) 24 | connection12 = pika.BlockingConnection(pika.ConnectionParameters(host='20.0.0.12', credentials=credentials)) 25 | queue_name = "a_" 26 | q_numbers = 5 27 | 28 | 29 | def declare_queues(): 30 | channel10 = connection10.channel() 31 | channel11 = connection11.channel() 32 | channel12 = connection12.channel() 33 | 34 | for i in range(1, q_numbers): 35 | channel10.queue_declare(queue=queue_name + "_10_" + str(i), arguments={'x-queue-type': 'quorum'}, durable=True) 36 | channel11.queue_declare(queue=queue_name + "_11_" + str(i), arguments={'x-queue-type': 'quorum'}, durable=True) 37 | channel12.queue_declare(queue=queue_name + "_12_" + str(i), arguments={'x-queue-type': 'quorum'}, durable=True) 38 | 39 | 40 | def callback(ch, method, properties, body): 41 | print(" [x] %r %s" % (body, threading.currentThread().getName())) 42 | # ch.basic_ack(delivery_tag=method.delivery_tag) 43 | 44 | 45 | def start_consumers10(index): 46 | try: 47 | 48 | connection = pika.BlockingConnection(pika.ConnectionParameters(host='20.0.0.10', credentials=credentials)) 49 | channel = connection.channel() 50 | channel.basic_consume(callback, 51 | queue=queue_name + "_10_" + str(index), 52 | no_ack=False) 53 | 54 | channel.start_consuming() 55 | except: 56 | print("Error") 57 | time.sleep(2) 58 | start_consumers10(index) 59 | 60 | 61 | def start_consumers11(index): 62 | connection = pika.BlockingConnection(pika.ConnectionParameters(host='20.0.0.11', credentials=credentials)) 63 | channel = connection.channel() 64 | channel.basic_consume(callback, 65 | queue=queue_name + "_10_" + str(index), 66 | no_ack=False) 67 | 68 | channel.start_consuming() 69 | 70 | 71 | def start_consumers12(index): 72 | connection = pika.BlockingConnection(pika.ConnectionParameters(host='20.0.0.12', credentials=credentials)) 73 | channel = connection.channel() 74 | channel.basic_consume(callback, 75 | queue=queue_name + "_10_" + str(index), 76 | no_ack=False) 77 | 78 | channel.start_consuming() 79 | 80 | 81 | # here I'd like to have different threads, one for consumer 82 | # the 'start_consuming' is blocking, so I am looking how to 83 | # use different threads or make it working in somehow. 84 | # The currect code does not work, as you can image, the question is 85 | # is there a way to have different consumers (wiht the same connection) 86 | # that can work in "pseudo/parallel" even sharing the same thread? 87 | # The consumers spend 90% of their time in idle, also a sort of green/thread 88 | # can work. 89 | def consumers(): 90 | for i in range(1, q_numbers): 91 | _thread.start_new(start_consumers10, (i,)) 92 | _thread.start_new(start_consumers11, (i,)) 93 | _thread.start_new(start_consumers12, (i,)) 94 | 95 | 96 | def start_publisher10(index): 97 | connection = pika.BlockingConnection(pika.ConnectionParameters(host='20.0.0.10', credentials=credentials)) 98 | channel = connection.channel() 99 | message = "info: Hello World!" 100 | while True: 101 | channel.basic_publish(exchange='', 102 | routing_key=queue_name + "_10_" + str(index), 103 | body=message) 104 | time.sleep(0.1) 105 | 106 | 107 | def start_publisher11(index): 108 | connection = pika.BlockingConnection(pika.ConnectionParameters(host='20.0.0.11', credentials=credentials)) 109 | channel = connection.channel() 110 | message = "info: Hello World!" 111 | while True: 112 | channel.basic_publish(exchange='', 113 | routing_key=queue_name + "_10_" + str(index), 114 | body=message) 115 | time.sleep(0.1) 116 | 117 | 118 | def start_publisher12(index): 119 | connection = pika.BlockingConnection(pika.ConnectionParameters(host='20.0.0.12', credentials=credentials)) 120 | channel = connection.channel() 121 | message = "info: Hello World!" 122 | while True: 123 | channel.basic_publish(exchange='', 124 | routing_key=queue_name + "_10_" + str(index), 125 | body=message) 126 | time.sleep(0.1) 127 | 128 | 129 | def publishers(): 130 | for i in range(1, q_numbers): 131 | _thread.start_new(start_publisher10, (i,)) 132 | _thread.start_new(start_publisher11, (i,)) 133 | _thread.start_new(start_publisher12, (i,)) 134 | 135 | 136 | declare_queues() 137 | consumers() 138 | publishers() 139 | 140 | time.sleep(3000) 141 | -------------------------------------------------------------------------------- /openstack/pika-examples/py_pika_publish.py: -------------------------------------------------------------------------------- 1 | import _thread 2 | 3 | import pika 4 | import time 5 | import uuid 6 | import sys 7 | 8 | 9 | class PyPikaTest: 10 | 11 | def callback(self, ch, method, properties, body): 12 | print(" [x] %s" % (body)) 13 | 14 | def start_consumers12(self, rm, qname): 15 | credentials = pika.PlainCredentials('test', 'test') 16 | connection = pika.BlockingConnection(pika.ConnectionParameters(host=rm, port=5672, credentials=credentials)) 17 | channel = connection.channel() 18 | channel.basic_consume(self.callback, 19 | queue=qname, 20 | no_ack=False) 21 | 22 | channel.start_consuming() 23 | 24 | def publish(self, rm, qname): 25 | credentials = pika.PlainCredentials('test', 'test') 26 | c = pika.BlockingConnection(pika.ConnectionParameters(host=rm, port=5672, credentials=credentials)) 27 | 28 | channel = c.channel() 29 | # str(uuid.uuid4()) 30 | channel.queue_declare(queue=qname, auto_delete=False) 31 | _properties = pika.BasicProperties( 32 | content_type='application/json', 33 | content_encoding='utf-8' 34 | ) 35 | print("start: %s" % (time.ctime(time.time()))) 36 | for i in range(1, 900000): 37 | time.sleep(5) 38 | channel.basic_publish( 39 | exchange='', 40 | routing_key=qname, 41 | properties=_properties, 42 | body='message: ' + str(i) 43 | ) 44 | print("end: %s" % (time.ctime(time.time()))) 45 | 46 | def thread_publish(self, rm): 47 | for i in range(1, 8): 48 | qname = str(uuid.uuid4()) 49 | _thread.start_new_thread(self.publish, (rm, qname,)) 50 | time.sleep(3) 51 | _thread.start_new_thread(self.start_consumers12, (rm, qname,)) 52 | 53 | 54 | print('starting .. %s' % sys.argv[1]) 55 | x = PyPikaTest() 56 | x.thread_publish(sys.argv[1]) 57 | 58 | input("Press Enter to continue...") 59 | -------------------------------------------------------------------------------- /openstack/py-amqp-examples/create_queues.py: -------------------------------------------------------------------------------- 1 | import _thread 2 | from queue import Queue 3 | 4 | import pika 5 | import time 6 | import uuid 7 | import sys 8 | 9 | 10 | class PyPikaTest: 11 | 12 | def __init__(self): 13 | self.qin = Queue() 14 | self.qout = Queue() 15 | 16 | def pump_queues_name(self): 17 | for i in range(2000): 18 | s = str(uuid.uuid4()) 19 | self.qin.put(s) 20 | self.qout.put(s) 21 | 22 | def create_queues(self, rm): 23 | credentials = pika.PlainCredentials('test', 'test') 24 | c = pika.BlockingConnection(pika.ConnectionParameters(port=rm, host="10.0.0.10", credentials=credentials)) 25 | 26 | channel = c.channel() 27 | while not self.qin.empty(): 28 | name = self.qin.get() 29 | channel.exchange_declare(exchange=name, exchange_type="topic", durable=True) 30 | channel.queue_declare(queue=name, auto_delete=False, durable=True) 31 | for i in range(1): 32 | channel.queue_bind(queue=name, exchange=name, routing_key=str(i)) 33 | print("creating: %s" % (name)) 34 | 35 | def destroy_queues(self, rm): 36 | credentials = pika.PlainCredentials('test', 'test') 37 | c = pika.BlockingConnection(pika.ConnectionParameters(port=rm, host="10.0.0.10", credentials=credentials)) 38 | channel = c.channel() 39 | 40 | while not self.qout.empty(): 41 | name = self.qout.get() 42 | channel.queue_delete(queue=name) 43 | channel.exchange_delete(exchange=name) 44 | print("removing: %s" % (name)) 45 | 46 | def thread_create_queues(self, rm): 47 | self.pump_queues_name() 48 | for i in range(1, 15): 49 | _thread.start_new_thread(self.create_queues, (rm,)) 50 | # time.sleep(30) 51 | #for i in range(1, 1): 52 | # _thread.start_new_thread(self.destroy_queues, (rm,)) 53 | 54 | 55 | print('starting .. %s' % sys.argv[1]) 56 | x = PyPikaTest() 57 | x.thread_create_queues(sys.argv[1]) 58 | 59 | input("Press Enter to continue...") 60 | -------------------------------------------------------------------------------- /openstack/py-amqp-examples/py-amqp-example.py: -------------------------------------------------------------------------------- 1 | from amqp import connection 2 | import os 3 | import sys 4 | import _thread 5 | import threading 6 | 7 | from kombu import Producer 8 | 9 | 10 | class PyAmqpTest: 11 | 12 | def __init__(self): 13 | self.connection = connection.Connection().connect() 14 | 15 | for i in range(1, 100): 16 | name = "%s:%d:%s" % (os.path.basename(sys.argv[0]), 17 | os.getpid(), 18 | "UUUID" + str(i)) 19 | cp = { 20 | 'capabilities': { 21 | 'authentication_failure_close': True, 22 | 'connection.blocked': True, 23 | 'consumer_cancel_notify': True 24 | }, 25 | 'connection_name': name} 26 | 27 | connection.Connection(client_properties=cp).connect() 28 | 29 | for i in range(1, 100): 30 | name = "%s:%d:%s" % ("nova-conductor", 31 | 123456, 32 | "my_guid" + str(i)) 33 | cp = { 34 | 'capabilities': { 35 | 'authentication_failure_close': True, 36 | 'connection.blocked': True, 37 | 'consumer_cancel_notify': True 38 | }, 39 | 'connection_name': name} 40 | connection.Connection(client_properties=cp).connect() 41 | 42 | def create_exchange(self): 43 | channel = self.connection.channel() 44 | 45 | channel.exchange_declare(exchange="some.exchange.name", type="topic", auto_delete=False) 46 | args_x = {"x-dead-letter-exchange": "some.exchange.name", 'x-dead-letter-routing-key': 'my_key'} 47 | channel.queue_declare(queue="my_queue", durable=True, auto_delete=False, arguments=args_x) 48 | channel.queue_declare(queue="my_dead_queue", auto_delete=False, durable=True) 49 | channel.queue_bind(queue="my_dead_queue", exchange="some.exchange.name", routing_key="my_key") 50 | 51 | pass 52 | 53 | def create_queue(self): 54 | print("Creating queues..:" + threading.currentThread().getName()) 55 | channel = self.connection.channel() 56 | for i in range(1, 3): 57 | print(" Current Thread:" + threading.currentThread().getName() + " index:" + str(i)) 58 | # str(uuid.uuid4() 59 | channel.queue_declare(queue="aa_" + str(i), durable=True, exclusive=False, auto_delete=False, 60 | arguments={'x-queue-type': 'classic'}) 61 | 62 | def publish(self): 63 | cn = connection.Connection(host='localhost:5672', userid='test', password='test', 64 | confirm_publish=True) 65 | cn.connect() 66 | channel = cn.channel() 67 | producer = Producer(channel) 68 | for i in range(1, 300): 69 | producer.publish("hello", "aa_1") 70 | time.sleep(2) 71 | 72 | def start(self): 73 | try: 74 | for i in range(1, 1): 75 | print("Starting threads index:" + str(i)) 76 | _thread.start_new_thread(self.create_queue, ()) 77 | 78 | except: 79 | print("Error: unable to start thread") 80 | 81 | self.create_queue() 82 | 83 | 84 | x = PyAmqpTest() 85 | # x.create_exchange() 86 | x.publish() 87 | # x.publish() 88 | 89 | while True: 90 | import time 91 | 92 | time.sleep(0.5) 93 | pass 94 | -------------------------------------------------------------------------------- /openstack/py-amqp-examples/py_amqp_publish.py: -------------------------------------------------------------------------------- 1 | import _thread 2 | 3 | import librabbitmq as amqp 4 | import time 5 | import uuid 6 | import sys 7 | 8 | 9 | class PyAmqpTest: 10 | 11 | def publish(self, rm): 12 | c = amqp.Connection(host=rm, userid="test", password="test") 13 | channel = c.channel() 14 | c.channel() 15 | 16 | qname = str(uuid.uuid4()) 17 | message = amqp.Message( 18 | channel=channel, 19 | body='the quick brown fox jumps over the lazy dog', 20 | properties=dict(content_type='application/json', 21 | content_encoding='utf-8')) 22 | 23 | channel.queue_declare(queue=qname, auto_delete=False) 24 | print("start: %s" % (time.ctime(time.time()))) 25 | for i in range(0, 100): 26 | channel.basic_publish(message, routing_key=qname) 27 | print("end: %s" % (time.ctime(time.time()))) 28 | c.close() 29 | 30 | def thread_publish(self, rm): 31 | from concurrent.futures import ThreadPoolExecutor 32 | executor = ThreadPoolExecutor(max_workers=40) 33 | for i in range(0, 90): 34 | executor.submit(self.publish, rm) 35 | 36 | 37 | print('starting .. %s' % sys.argv[1]) 38 | 39 | x = PyAmqpTest() 40 | x.thread_publish(sys.argv[1]) 41 | 42 | input("Press Enter to continue...") 43 | -------------------------------------------------------------------------------- /openstack/tox-func-rabbit/Dockerfile: -------------------------------------------------------------------------------- 1 | from rabbitmq 2 | 3 | RUN apt-get update && \ 4 | apt-get -y install gcc \ 5 | python3 \ 6 | git \ 7 | python-tox \ 8 | python3-dev 9 | -------------------------------------------------------------------------------- /openstack/tox-func-rabbit/README.md: -------------------------------------------------------------------------------- 1 | Docker image to execute the RabbitMQ functional tests in OpenStack 2 | === 3 | 4 | A docker image to execute `openstack-tox-py36` tests. 5 | 6 | How to use 7 | === 8 | 9 | * `wget https://raw.githubusercontent.com/Gsantomaggio/rabbitmq-utils/master/openstack/tox-func-rabbit/Dockerfile` 10 | * `sudo docker build -t tox-func-rabbit .` 11 | * `git clone https://review.opendev.org/openstack/oslo.messaging ` 12 | * `cd oslo.messaging` 13 | * `sudo rm -rf .tox && sudo docker run -it -v $(pwd):/home/git/ tox-func-rabbit:latest sh -c "cd /home/git && tox -epy36-func-rabbit -vv"` 14 | -------------------------------------------------------------------------------- /rabbitmq-suse/leap_15_ipv6/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure("2") do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://vagrantcloud.com/search. 15 | 16 | config.vm.define "testing-15.0" do |leap_15| 17 | leap_15.vm.box = "openSUSE-Leap-15.0-Vagrant.x86_64" 18 | leap_15.vm.box_url = "https://download.opensuse.org/repositories/Virtualization:/Appliances:/Images:/openSUSE-Leap-15.0/images/boxes/openSUSE-Leap-15.0-Vagrant.x86_64.json" 19 | end 20 | 21 | # Disable automatic box update checking. If you disable this, then 22 | # boxes will only be checked for updates when the user runs 23 | # `vagrant box outdated`. This is not recommended. 24 | # config.vm.box_check_update = false 25 | 26 | # Create a forwarded port mapping which allows access to a specific port 27 | # within the machine from a port on the host machine. In the example below, 28 | # accessing "localhost:8080" will access port 80 on the guest machine. 29 | # NOTE: This will enable public access to the opened port 30 | # config.vm.network "forwarded_port", guest: 80, host: 8080 31 | 32 | # Create a forwarded port mapping which allows access to a specific port 33 | # within the machine from a port on the host machine and only allow access 34 | # via 127.0.0.1 to disable public access 35 | # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" 36 | 37 | # Create a private network, which allows host-only access to the machine 38 | # using a specific IP. 39 | # config.vm.network "private_network", ip: "192.168.33.10" 40 | 41 | # Create a public network, which generally matched to bridged network. 42 | # Bridged networks make the machine appear as another physical device on 43 | # your network. 44 | # config.vm.network "public_network" 45 | 46 | # Share an additional folder to the guest VM. The first argument is 47 | # the path on the host to the actual folder. The second argument is 48 | # the path on the guest to mount the folder. And the optional third 49 | # argument is a set of non-required options. 50 | # config.vm.synced_folder "../data", "/vagrant_data" 51 | 52 | # Provider-specific configuration so you can fine-tune various 53 | # backing providers for Vagrant. These expose provider-specific options. 54 | # Example for VirtualBox: 55 | # 56 | # config.vm.provider "virtualbox" do |vb| 57 | # # Display the VirtualBox GUI when booting the machine 58 | # vb.gui = true 59 | # 60 | # # Customize the amount of memory on the VM: 61 | # vb.memory = "1024" 62 | # end 63 | # 64 | # View the documentation for the provider you are using for more 65 | # information on available options. 66 | 67 | config.vm.provision "shell", inline: <<-SHELL 68 | sudo zypper addrepo -f http://download.opensuse.org/repositories/devel:/languages:/erlang:/Factory/openSUSE_Leap_15.0/devel:languages:erlang:Factory.repo 69 | sudo zypper --gpg-auto-import-keys refresh 70 | sudo zypper -n install wget 71 | sudo rpm --import https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc 72 | wget https://github.com/rabbitmq/rabbitmq-server/releases/download/v3.8.0-beta.3/rabbitmq-server-3.8.0.beta.3-1.suse.noarch.rpm 73 | sudo zypper -n in rabbitmq-server-3.8.0.beta.3-1.suse.noarch.rpm 74 | sudo zypper -n install erlang 75 | sudo touch /etc/rabbitmq/rabbitmq-env.conf 76 | cat >/usr/lib/systemd/system/epmd.socket <> /etc/hosts 31 | sudo echo "10.0.0.12 node2 " >> /etc/hosts 32 | sudo echo "10.0.0.10 node0 " >> /etc/hosts 33 | sudo zypper addrepo -f https://download.opensuse.org/repositories/network:/messaging:/amqp/openSUSE_Leap_15/network:messaging:amqp.repo 34 | sudo zypper addrepo -f http://download.opensuse.org/repositories/devel:/languages:/erlang:/Factory/openSUSE_Leap_15.0/devel:languages:erlang:Factory.repo 35 | sudo zypper --gpg-auto-import-keys refresh 36 | sudo zypper -n install erlang 37 | sudo zypper -n install rabbitmq-server 38 | sudo zypper -n install rabbitmq-server-plugins 39 | sudo rm /etc/rabbitmq/rabbitmq-env.conf 40 | mkdir -p /etc/systemd/system/epmd.socket.d/ 41 | touch /etc/systemd/system/epmd.socket.d/ports.conf 42 | cat >/etc/systemd/system/epmd.socket.d/ports.conf < /var/lib/rabbitmq/.erlang.cookie 54 | sudo bash -c 'echo "vm_memory_high_watermark.relative = 0.9" > /etc/rabbitmq/rabbitmq.conf' 55 | # sudo bash -c 'echo "cluster_partition_handling = pause_minority" >> /etc/rabbitmq/rabbitmq.conf' 56 | sudo bash -c 'echo "queue_master_locator = min-masters " >> /etc/rabbitmq/rabbitmq.conf' 57 | sudo systemctl restart rabbitmq-server 58 | sudo systemctl enable rabbitmq-server 59 | sudo rabbitmq-plugins enable rabbitmq_management 60 | sudo rabbitmqctl stop_app 61 | sudo rabbitmqctl reset 62 | sudo rabbitmqctl join_cluster rabbit@node0 63 | sudo rabbitmqctl start_app 64 | sudo rabbitmqctl add_user test test 65 | sudo rabbitmqctl set_user_tags test administrator 66 | sudo rabbitmqctl set_permissions -p / test ".*" ".*" ".*" 67 | SHELL 68 | end 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /rabbitmq-suse/vagrant_cluster/img/cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Gsantomaggio/rabbitmq-utils/07423e9b253af5e602e3b06e65d1fc22157586f7/rabbitmq-suse/vagrant_cluster/img/cluster.png -------------------------------------------------------------------------------- /rabbitmq_stream/AMQP/nodejs/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/* 2 | dist/* -------------------------------------------------------------------------------- /rabbitmq_stream/AMQP/nodejs/README.md: -------------------------------------------------------------------------------- 1 | NodeJS Stream example with AMQP `amqplib` lib. 2 | 3 | Send: 4 | ``` 5 | node send_stream.js 6 | ``` 7 | 8 | Receive: 9 | ``` 10 | node receive_stream.js 11 | ``` -------------------------------------------------------------------------------- /rabbitmq_stream/AMQP/nodejs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "stream_js", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "gsantomaggio", 10 | "license": "ISC", 11 | "dependencies": { 12 | "amqplib": "^0.8.0" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /rabbitmq_stream/AMQP/nodejs/receive_stream.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | var amqp = require('amqplib'); 4 | 5 | amqp.connect('amqp://localhost').then(function (conn) { 6 | process.once('SIGINT', function () { conn.close(); }); 7 | return conn.createChannel().then(function (ch) { 8 | 9 | var q = 'my_first_stream'; 10 | // Define the queue stream 11 | // Mandatory: exclusive: false, durable: true autoDelete: false 12 | var ok = ch.assertQueue(q, { 13 | exclusive: false, 14 | durable: true, 15 | autoDelete: false, 16 | arguments: { 17 | 'x-queue-type': 'stream', 18 | 'x-max-length-bytes': 2_000_000_000 19 | } 20 | }) 21 | 22 | ch.qos(100); // this is mandatory 23 | 24 | ok = ok.then(function (_qok) { 25 | return ch.consume(q, function (msg) { 26 | console.log(" [x] Received '%s'", msg.content.toString()); 27 | ch.ack(msg); 28 | }, { 29 | noAck: false, 30 | arguments: { 31 | 'x-stream-offset': 'first' // here you canspecify the offset: first, last, next.... 32 | } 33 | }, 34 | ); 35 | }); 36 | 37 | return ok.then(function (_consumeOk) { 38 | console.log(' [*] Waiting for messages. To exit press CTRL+C'); 39 | }); 40 | }); 41 | }).catch(console.warn); -------------------------------------------------------------------------------- /rabbitmq_stream/AMQP/nodejs/send_stream.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | var amqp = require('amqplib'); 4 | 5 | amqp.connect('amqp://localhost').then(function(conn) { 6 | return conn.createChannel().then(function(ch) { 7 | var q = 'my_first_stream'; 8 | var msg = 'Hello World!'; 9 | 10 | 11 | // Define the queue stream 12 | // Mandatory: exclusive: false, durable: true autoDelete: false 13 | var ok = ch.assertQueue(q, { 14 | exclusive: false, 15 | durable: true, 16 | autoDelete: false, 17 | arguments: { 18 | 'x-queue-type': 'stream', 19 | 'x-max-length-bytes': 2_000_000_000 20 | } 21 | }) 22 | 23 | // send the message to the stream queue 24 | return ok.then(function(_qok) { 25 | ch.sendToQueue(q, Buffer.from(msg)); 26 | console.log(" [x] Sent '%s'", msg); 27 | return ch.close(); 28 | }); 29 | }).finally(function() { conn.close(); }); 30 | }).catch(console.warn); -------------------------------------------------------------------------------- /rabbitmq_stream/AMQP/python/stream/.gitignore: -------------------------------------------------------------------------------- 1 | venv -------------------------------------------------------------------------------- /rabbitmq_stream/AMQP/python/stream/README.md: -------------------------------------------------------------------------------- 1 | Python Pika Stream example. 2 | 3 | Send: 4 | ``` 5 | ./send_stream.py 6 | ``` 7 | 8 | Receive: 9 | ``` 10 | ./receive_stream.py 11 | ``` -------------------------------------------------------------------------------- /rabbitmq_stream/AMQP/python/stream/client.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import pika 3 | 4 | connection = pika.BlockingConnection( 5 | pika.ConnectionParameters(host='localhost') 6 | ) 7 | channel_stream = connection.channel() 8 | 9 | channel_stream.queue_declare( 10 | "stream-queue", 11 | durable=True, 12 | arguments={ 13 | 'x-queue-type': 'stream', 14 | } 15 | ) 16 | 17 | for i in range(2): 18 | channel_stream.basic_publish( 19 | exchange='', 20 | routing_key='stream-queue', 21 | body=f"stream data".encode() 22 | ) 23 | connection.close() -------------------------------------------------------------------------------- /rabbitmq_stream/AMQP/python/stream/receive_stream.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import pika 3 | 4 | credentials = pika.PlainCredentials('guest', 'guest') 5 | connection = pika.BlockingConnection(pika.ConnectionParameters(host="localhost", port=5672, 6 | virtual_host="/", 7 | credentials=credentials)) 8 | 9 | q_name = "stream_queue" 10 | channel = connection.channel() 11 | 12 | # Mandatory: exclusive: false, durable: true auto_delete: false 13 | channel.queue_declare(queue=q_name, auto_delete=False, exclusive=False, durable=True, 14 | arguments={ 15 | 'x-queue-type': 'stream', # Mandatory to define stream queue 16 | 'x-max-length-bytes': 2_000_000_000 17 | # Set the queue retention to 2GB else the stream doesn't have any limit 18 | }) 19 | 20 | 21 | def callback(ch, method, properties, body): 22 | print(" [x] %s" % body) 23 | ch.basic_ack(delivery_tag=method.delivery_tag) 24 | 25 | 26 | channel.basic_qos(prefetch_count=100) # mandatory 27 | channel.basic_consume( 28 | queue=q_name, 29 | on_message_callback=callback, 30 | arguments={ 31 | 'x-stream-offset': 'first' # here you can specify the offset: : first, last, next, and timestamp 32 | # with first start consuming always from the beginning 33 | }, 34 | auto_ack=False) 35 | channel.start_consuming() 36 | -------------------------------------------------------------------------------- /rabbitmq_stream/AMQP/python/stream/send_stream.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import pika 3 | 4 | credentials = pika.PlainCredentials('guest', 'guest') 5 | connection = pika.BlockingConnection(pika.ConnectionParameters(host="localhost", port=5672, 6 | virtual_host="/", 7 | credentials=credentials)) 8 | 9 | q_name = "stream_queue" 10 | channel = connection.channel() 11 | 12 | # Mandatory: exclusive: false, durable: true auto_delete: false 13 | channel.queue_declare(queue=q_name, auto_delete=False, exclusive=False, durable=True, 14 | arguments={ 15 | 'x-queue-type': 'stream', # Mandatory to define stream queue 16 | 'x-max-length-bytes': 2_000_000_000 17 | # Set the queue retention to 2GB else the stream doesn't have any limit 18 | }) 19 | 20 | channel.basic_publish( 21 | exchange='', 22 | routing_key=q_name, 23 | body='Hello Stream!') 24 | print(" [x] Sent 'Hello Stream!'") 25 | channel.close() 26 | connection.close() 27 | -------------------------------------------------------------------------------- /rabbitmq_stream/AMQP/python/stream/server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import pika 3 | 4 | connection = pika.BlockingConnection( 5 | pika.ConnectionParameters(host='localhost') 6 | ) 7 | channel_stream = connection.channel() 8 | 9 | channel_stream.queue_declare( 10 | "stream-queue", 11 | auto_delete=False, exclusive=False, durable=True, 12 | arguments={ 13 | 'x-queue-type': 'stream', 14 | } 15 | ) 16 | channel_stream.basic_qos( 17 | prefetch_count=1, 18 | ) 19 | 20 | 21 | class Server(object): 22 | def __init__(self): 23 | channel_stream.basic_consume( 24 | queue="stream-queue", 25 | on_message_callback=self.stream_callback, 26 | ) 27 | 28 | def stream_callback(self, channel, method, props, body): 29 | print(f"received '{body.decode()}' via {method.routing_key}") 30 | channel_stream.stop_consuming() 31 | 32 | 33 | server = Server() 34 | 35 | try: 36 | channel_stream.start_consuming() 37 | except KeyboardInterrupt: 38 | connection.close() -------------------------------------------------------------------------------- /rabbitmq_stream/README.md: -------------------------------------------------------------------------------- 1 | AMQP RabbitMQ Stream Examples 2 | --- 3 | 4 | How to use stream queues with AMQP clients. 5 | 6 | See also: 7 | - https://blog.rabbitmq.com/posts/2021/10/rabbitmq-streams-interoperability/ 8 | - https://www.rabbitmq.com/stream.html 9 | - https://blog.rabbitmq.com/posts/2021/07/rabbitmq-streams-overview 10 | -------------------------------------------------------------------------------- /raft_kv/.gitignore: -------------------------------------------------------------------------------- 1 | .rebar3 2 | _* 3 | .eunit 4 | *.o 5 | *.beam 6 | *.plt 7 | *.swp 8 | *.swo 9 | .erlang.cookie 10 | ebin 11 | log 12 | erl_crash.dump 13 | .rebar 14 | logs 15 | _build 16 | .idea 17 | *.iml 18 | rebar3.crashdump 19 | *~ 20 | -------------------------------------------------------------------------------- /raft_kv/README.md: -------------------------------------------------------------------------------- 1 | # Distributed KV with RabbitMQ Raft Library 2 | 3 | This is a Distributed Key Value example based on [Ra](https://github.com/rabbitmq/ra). 4 | 5 | ## Requirements 6 | 7 | * [rebar3](https://github.com/erlang/rebar3) 8 | 9 | ## Setup your env 10 | 11 | ```bash 12 | wget https://s3.amazonaws.com/rebar3/rebar3 && chmod +x rebar3 13 | ``` 14 | 15 | ## Compiling 16 | 17 | ``` bash 18 | rebar3 compile 19 | ``` 20 | 21 | ## Test the example in localhost: 22 | 23 | To test the example in localhost with three nodes you can: 24 | 25 | - run the node1: 26 | ``` 27 | rebar3 shell --sname node1 28 | ``` 29 | 30 | - run the node2: 31 | ``` 32 | rebar3 shell --sname node2 33 | 34 | ``` 35 | - run the node3: 36 | ``` 37 | rebar3 shell --sname node3 38 | raft_kv:start_local(). 39 | ``` 40 | 41 | Join the other nodes (node3): 42 | ```erlang 43 | raft_kv:join(node2@GaS). 44 | raft_kv:join(node1@GaS). 45 | ``` 46 | 47 | check the members: 48 | 49 | ```erlang 50 | raft_kv_sm:members(). 51 | %% => Cluster Members: 52 | %% => Leader:{kv,node3@GaS} 53 | %% => Followers:[{kv,node1@GaS},{kv,node2@GaS}] 54 | %% => Nodes:[{kv,node1@GaS},{kv,node2@GaS},{kv,node3@GaS}] 55 | ``` 56 | 57 | put values: 58 | ```erlang 59 | (node3@GaS)5> raft_kv:put("key1","value1"). 60 | ok 61 | (node3@GaS)6> raft_kv:put("key2","value1"). 62 | ok 63 | (node3@GaS)7> raft_kv:put("key2","value2"). 64 | ok 65 | (node3@GaS)8> raft_kv:put("key3","value3"). 66 | ok 67 | ``` 68 | 69 | get the values: 70 | ```erlang 71 | (node1@GaS)1> raft_kv:get("key1"). 72 | {ok,"value1"} 73 | (node1@GaS)2> raft_kv:get("key2"). 74 | {ok,"value2"} 75 | (node1@GaS)3> raft_kv:get("key3"). 76 | {ok,"value3"} 77 | (node1@GaS)4> 78 | ``` 79 | 80 | you can test the failover by stopping a node 81 | -------------------------------------------------------------------------------- /raft_kv/rebar.config: -------------------------------------------------------------------------------- 1 | {erl_opts, [debug_info]}. 2 | {deps, [ 3 | {ra, {git, "https://github.com/rabbitmq/ra.git", {branch, "master"}}}, 4 | {cowboy, "2.6.3"}, 5 | {jsx, {git, "https://github.com/talentdeficit/jsx.git", {branch, "v2.8.0"}}} 6 | ]}. 7 | 8 | {shell, [ 9 | % {config, "config/sys.config"}, 10 | {apps, [sasl,crypto,cowboy,raft_kv]} 11 | ]}. 12 | -------------------------------------------------------------------------------- /raft_kv/rebar.lock: -------------------------------------------------------------------------------- 1 | {"1.1.0", 2 | [{<<"aten">>,{pkg,<<"aten">>,<<"0.5.2">>},1}, 3 | {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.6.3">>},0}, 4 | {<<"cowlib">>,{pkg,<<"cowlib">>,<<"2.7.3">>},1}, 5 | {<<"gen_batch_server">>,{pkg,<<"gen_batch_server">>,<<"0.7.0">>},1}, 6 | {<<"jsx">>, 7 | {git,"https://github.com/talentdeficit/jsx.git", 8 | {ref,"3074d4865b3385a050badf7828ad31490d860df5"}}, 9 | 0}, 10 | {<<"ra">>, 11 | {git,"https://github.com/rabbitmq/ra.git", 12 | {ref,"724043e0a8d81d0e2bc6fad0b9c2b95420422e76"}}, 13 | 0}, 14 | {<<"ranch">>,{pkg,<<"ranch">>,<<"1.7.1">>},1}]}. 15 | [ 16 | {pkg_hash,[ 17 | {<<"aten">>, <<"2B4D31261D36C0A11CD6D04AE3F96C8951EB32106E9D2013A5B33BA14F7875AE">>}, 18 | {<<"cowboy">>, <<"99AA50E94E685557CAD82E704457336A453D4ABCB77839AD22DBE71F311FCC06">>}, 19 | {<<"cowlib">>, <<"A7FFCD0917E6D50B4D5FB28E9E2085A0CEB3C97DEA310505F7460FF5ED764CE9">>}, 20 | {<<"gen_batch_server">>, <<"FA458AEBA2979C49F197F2CD33E0C39A42F31CFF80040162FF07405FD9D3D303">>}, 21 | {<<"ranch">>, <<"6B1FAB51B49196860B733A49C07604465A47BDB78AA10C1C16A3D199F7F8C881">>}]} 22 | ]. 23 | -------------------------------------------------------------------------------- /raft_kv/src/raft_kv.app.src: -------------------------------------------------------------------------------- 1 | {application, raft_kv, 2 | [{description, "An OTP application"}, 3 | {vsn, "0.1.0"}, 4 | {registered, []}, 5 | {mod, {raft_kv_app, []}}, 6 | {applications, 7 | [kernel, 8 | stdlib 9 | ]}, 10 | {env,[]}, 11 | {modules, []}, 12 | 13 | {licenses, ["Apache 2.0"]}, 14 | {links, []} 15 | ]}. 16 | -------------------------------------------------------------------------------- /raft_kv/src/raft_kv.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% @author gabriele 3 | %%% @copyright (C) 2019, 4 | %%% @doc 5 | %%% 6 | %%% @end 7 | %%% Created : 29. May 2019 11.28 8 | %%%------------------------------------------------------------------- 9 | -module(raft_kv). 10 | -author("gabriele"). 11 | 12 | -behaviour(gen_server). 13 | 14 | 15 | %% API 16 | -export([start_link/0, callback_mode/0, start_cluster/1, 17 | members/0, start_local/0, join/1, get/1, put/2, 18 | start_http/0, start_http/1]). 19 | 20 | %% gen_server callbacks 21 | -export([init/1, 22 | handle_call/3, 23 | handle_cast/2, 24 | handle_info/2, 25 | terminate/2, 26 | code_change/3, restart_node/1, members_flat/0, get_map/0]). 27 | 28 | -define(SERVER, ?MODULE). 29 | -define(CLUSTER_NAME, "KV Raft Cluster"). 30 | 31 | 32 | -record(state, {}). 33 | 34 | %%%=================================================================== 35 | %%% API 36 | %%%=================================================================== 37 | 38 | %%-------------------------------------------------------------------- 39 | %% @doc 40 | %% Starts the server 41 | %% 42 | %% @end 43 | %%-------------------------------------------------------------------- 44 | -spec(start_link() -> 45 | {ok, Pid :: pid()} | ignore | {error, Reason :: term()}). 46 | start_link() -> 47 | gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). 48 | 49 | init([]) -> 50 | io:format("Starting~n"), 51 | {ok, #state{}}. 52 | 53 | callback_mode() -> 54 | handle_event_function. 55 | 56 | 57 | handle_call({startlocal}, _From, State) -> 58 | R = raft_kv_sm:start_cluster(?CLUSTER_NAME, node()), 59 | {reply, R, State}; 60 | handle_call({join, Node}, _From, State) -> 61 | R = raft_kv_sm:start_and_join(?CLUSTER_NAME, Node), 62 | {reply, R, State}; 63 | handle_call({write, Key, Value}, _From, State) -> 64 | R = raft_kv_sm:write({kv, node()}, Key, Value), 65 | {reply, R, State}; 66 | handle_call({read, Key}, _From, State) -> 67 | R = raft_kv_sm:read({kv, node()}, Key), 68 | {reply, R, State}; 69 | handle_call({members, Node}, _From, State) -> 70 | raft_kv_sm:members(Node), 71 | {reply, ok, State}; 72 | handle_call({members_flat, Node}, _From, State) -> 73 | R = raft_kv_sm:members_flat(Node), 74 | {reply, R, State}; 75 | handle_call({get_map, Node}, _From, State) -> 76 | {ok, R} = raft_kv_sm:get_map({kv, Node}), 77 | {reply, R, State}; 78 | handle_call({restart, Node}, _From, State) -> 79 | raft_kv_sm:restart_node({kv, Node}), 80 | {reply, ok, State}. 81 | 82 | 83 | 84 | handle_cast(_Request, State) -> 85 | {noreply, State}. 86 | 87 | handle_info(_Info, State) -> 88 | {noreply, State}. 89 | 90 | terminate(_Reason, _State) -> 91 | ok. 92 | 93 | code_change(_OldVsn, State, _Extra) -> 94 | {ok, State}. 95 | 96 | 97 | %%%%%%%%%%%% 98 | 99 | start_cluster(Name) -> 100 | gen_statem:call(?MODULE, {start, Name, node()}). 101 | 102 | start_local() -> 103 | gen_statem:call(?MODULE, {startlocal}). 104 | 105 | join(Node) -> 106 | gen_statem:call(?MODULE, {join, Node}). 107 | 108 | put(Key, Value) -> 109 | gen_statem:call(?MODULE, {write, Key, Value}). 110 | 111 | get(Key) -> 112 | gen_statem:call(?MODULE, {read, Key}). 113 | 114 | restart_node(Node) -> 115 | gen_statem:call(?MODULE, {restart_node, Node}). 116 | 117 | members() -> 118 | gen_statem:call(?MODULE, {members, node()}). 119 | 120 | members_flat() -> 121 | gen_statem:call(?MODULE, {members_flat, node()}). 122 | 123 | get_map() -> 124 | gen_statem:call(?MODULE, {get_map, node()}). 125 | 126 | start_http() -> 127 | start_http(8080). 128 | 129 | start_http(PORT) -> 130 | Dispatch = cowboy_router:compile([ 131 | {'_', [ 132 | {"/", raft_kv_http_rest, []}, 133 | {"/get_map", raft_kv_http_rest_map, []} 134 | ]} 135 | ]), 136 | {ok, _} = cowboy:start_clear(http, [{port, PORT}], #{ 137 | env => #{dispatch => Dispatch} 138 | }). 139 | 140 | 141 | 142 | 143 | -------------------------------------------------------------------------------- /raft_kv/src/raft_kv_app.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %% @doc raft_kv public API 3 | %% @end 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(raft_kv_app). 7 | 8 | -behaviour(application). 9 | 10 | %% Application callbacks 11 | -export([start/2, stop/1]). 12 | 13 | %%==================================================================== 14 | %% API 15 | %%==================================================================== 16 | 17 | start(_StartType, _StartArgs) -> 18 | application:ensure_all_started(ra), 19 | raft_kv_sup:start_link(). 20 | 21 | %%-------------------------------------------------------------------- 22 | stop(_State) -> 23 | ok. 24 | 25 | %%==================================================================== 26 | %% Internal functions 27 | %%==================================================================== 28 | -------------------------------------------------------------------------------- /raft_kv/src/raft_kv_http_rest.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% @author gabriele 3 | %%% @copyright (C) 2019, 4 | %%% @doc 5 | %%% 6 | %%% @end 7 | %%% Created : 03. Jun 2019 21.43 8 | %%%------------------------------------------------------------------- 9 | -module(raft_kv_http_rest). 10 | -author("gabriele"). 11 | 12 | -export([init/2]). 13 | -export([content_types_provided/2]). 14 | -export([members_to_json/2]). 15 | 16 | init(Req, Opts) -> 17 | {cowboy_rest, Req, Opts}. 18 | 19 | content_types_provided(Req, State) -> 20 | {[ 21 | {<<"text/html">>, members_to_json}, 22 | {<<"application/json">>, members_to_json}, 23 | {<<"text/plain">>, members_to_json} 24 | ], Req, State}. 25 | 26 | 27 | members_to_json(Req, State) -> 28 | {Leader, Followers, Nodes} = raft_kv:members_flat(), 29 | P = fun(A, AccIn) -> lists:append([ra_lib:ra_server_id_node(A)], AccIn) end, 30 | N = lists:foldl(P, [], Nodes), 31 | F = lists:foldl(P, [], Followers), 32 | Body = jsx:encode([{<<"cluster_name">>,<<"--- Erlang Raft Cluster ---">>}, 33 | {<<"leader_node">>, ra_lib:ra_server_id_node(Leader)}, 34 | {<<"node_followers">>, F}, {<<"cluster_nodes">>, N}]), 35 | 36 | {Body, Req, State}. 37 | -------------------------------------------------------------------------------- /raft_kv/src/raft_kv_http_rest_map.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% @author gabriele 3 | %%% @copyright (C) 2019, 4 | %%% @doc 5 | %%% 6 | %%% @end 7 | %%% Created : 03. Jun 2019 21.43 8 | %%%------------------------------------------------------------------- 9 | -module(raft_kv_http_rest_map). 10 | -author("gabriele"). 11 | 12 | -export([init/2]). 13 | -export([content_types_provided/2]). 14 | -export([map_to_json/2]). 15 | 16 | init(Req, Opts) -> 17 | {cowboy_rest, Req, Opts}. 18 | 19 | content_types_provided(Req, State) -> 20 | {[ 21 | {<<"text/html">>, map_to_json}, 22 | {<<"application/json">>, map_to_json}, 23 | {<<"text/plain">>, map_to_json} 24 | ], Req, State}. 25 | 26 | 27 | map_to_json(Req, State) -> 28 | M = raft_kv:get_map(), 29 | P = fun({Key, Value}, AccIn) -> lists:append([list_to_binary(Key ++ " - " ++ Value)], 30 | AccIn) end, 31 | R = lists:foldl(P, [], M), 32 | Body = jsx:encode([{<<"Keys - Values">>, R}]), 33 | {Body, Req, State}. 34 | -------------------------------------------------------------------------------- /raft_kv/src/raft_kv_sm.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% @author gabriele 3 | %%% @copyright (C) 2019, 4 | %%% @doc 5 | %%% 6 | %%% @end 7 | %%% Created : 29. May 2019 11.32 8 | %%%------------------------------------------------------------------- 9 | -module(raft_kv_sm). 10 | -behaviour(ra_machine). 11 | -author("gabriele"). 12 | 13 | 14 | %% API 15 | -export([init/1, apply/3, write/3, read/2, start_cluster/2, members/0, 16 | members/1, start_local_server/2, start_and_join/2, restart_node/1, members_flat/1, get_map/1]). 17 | 18 | -record(?MODULE, {kvstore = #{} :: map()}). 19 | 20 | 21 | init(_) -> 22 | #?MODULE{}. 23 | 24 | 25 | apply(_Meta, {write, Key, Value}, #?MODULE{kvstore = KvM} = State) -> 26 | KvM0 = maps:put(Key, Value, KvM), 27 | {State#?MODULE{kvstore = KvM0}, ok, []}; 28 | apply(_Meta, {read, Key}, #?MODULE{kvstore = KvM} = State) -> 29 | Reply = maps:get(Key, KvM, undefined), 30 | {State, Reply, []}; 31 | apply(_Meta, {get_map}, #?MODULE{kvstore = KvM} = State) -> 32 | Reply = maps:to_list(KvM), 33 | {State, Reply, []}; 34 | apply(#{index := Idx}, _, State) -> 35 | %% notify all watchers of the change of value 36 | Effects = case Idx rem 1000 of 37 | 0 -> [{release_cursor, Idx, State}]; 38 | _ -> [] 39 | end, 40 | {State, ok, Effects}; 41 | apply(_Meta, {nodedown, _}, State) -> 42 | %% we need to handle the nodedown as well to avoid crashing 43 | {State, ok, []}. 44 | 45 | 46 | write(Server, Key, Value) -> 47 | case ra:process_command(Server, {write, Key, Value}) of 48 | {ok, _, _} -> 49 | ok; 50 | Err -> 51 | Err 52 | end. 53 | 54 | read(Server, Key) -> 55 | case ra:process_command(Server, {read, Key}) of 56 | {ok, Value, _} -> 57 | {ok, Value}; 58 | Err -> 59 | Err 60 | end. 61 | 62 | get_map(Server) -> 63 | case ra:process_command(Server, {get_map}) of 64 | {ok, Value, _} -> 65 | {ok, Value}; 66 | Err -> 67 | Err 68 | end. 69 | 70 | 71 | 72 | start_cluster(Name, Node) -> 73 | ra:start_cluster(Name, {module, ?MODULE, #{}}, [{kv, Node}]). 74 | 75 | start_local_server(Name, Node) -> 76 | ra:start_server(Name, {kv, Node}, {module, ?MODULE, #{}}, []), 77 | ok = ra:trigger_election({kv, Node}). 78 | 79 | 80 | start_and_join(Name, New) -> 81 | ServerRef = {kv, node()}, 82 | {ok, _, _} = ra:add_member(ServerRef, {kv, New}), 83 | ok = ra:start_server(Name, {kv, New}, {module, ?MODULE, #{}}, [ServerRef]), 84 | ok. 85 | 86 | restart_node(Node) -> 87 | ra:restart_server({kv, Node}). 88 | 89 | members() -> 90 | members(node()). 91 | 92 | members(Node) -> 93 | case ra:members({kv, Node}) of 94 | {ok, Result, Leader} -> io:format("Cluster Members:~nLeader:~p~nFollowers:~p~n" ++ 95 | "Nodes:~p~n", [Leader, lists:delete(Leader, Result), Result]); 96 | Err -> io:format("Cluster Status error: ~p", [Err]) 97 | end. 98 | 99 | members_flat(Node) -> 100 | case ra:members({kv, Node}) of 101 | {ok, Result, Leader} -> 102 | {Leader, lists:delete(Leader, Result), Result}; 103 | Err -> {error, Err} 104 | end. 105 | -------------------------------------------------------------------------------- /raft_kv/src/raft_kv_sup.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %% @doc raft_kv top level supervisor. 3 | %% @end 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(raft_kv_sup). 7 | 8 | -behaviour(supervisor). 9 | 10 | %% API 11 | -export([start_link/0]). 12 | 13 | %% Supervisor callbacks 14 | -export([init/1]). 15 | 16 | -define(SERVER, ?MODULE). 17 | 18 | %%==================================================================== 19 | %% API functions 20 | %%==================================================================== 21 | 22 | start_link() -> 23 | supervisor:start_link({local, ?SERVER}, ?MODULE, []). 24 | 25 | %%==================================================================== 26 | %% Supervisor callbacks 27 | %%==================================================================== 28 | 29 | %% Child :: #{id => Id, start => {M, F, A}} 30 | %% Optional keys are restart, shutdown, type, modules. 31 | %% Before OTP 18 tuples must be used to specify a child. e.g. 32 | %% Child :: {Id,StartFunc,Restart,Shutdown,Type,Modules} 33 | init([]) -> 34 | RestartStrategy = {one_for_one, 10, 60}, 35 | ChildSpec = [{raft_kv, {raft_kv, start_link, []}, 36 | permanent, brutal_kill, worker, [raft_kv]}], 37 | 38 | {ok, {RestartStrategy, ChildSpec}}. 39 | %%==================================================================== 40 | %% Internal functions 41 | %%==================================================================== 42 | -------------------------------------------------------------------------------- /test/problem.py: -------------------------------------------------------------------------------- 1 | # This is a sample Python script. 2 | 3 | # Press ⌃R to execute it or replace it with your code. 4 | # Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings. 5 | import queue 6 | import threading 7 | import time 8 | from threading import Thread 9 | 10 | workerQueue = queue.Queue() 11 | 12 | 13 | def start(): 14 | for i in range(10): 15 | workerQueue.put("Valore" + str(i)) 16 | start_threads() 17 | 18 | 19 | def start_threads(): 20 | num_fetch_threads = 2 21 | for i in range(num_fetch_threads): 22 | worker = Thread(target=print_message, args=(workerQueue,)) 23 | worker.setDaemon(True) 24 | worker.start() 25 | 26 | 27 | def print_message(queue_wrk): 28 | while True: 29 | print("In thread {} ha scritto:{}".format(threading.current_thread().name, queue_wrk.get())) 30 | time.sleep(1) 31 | 32 | 33 | # Press the green button in the gutter to run the script. 34 | if __name__ == '__main__': 35 | start() 36 | val = input("string for the thread\n") 37 | while val != "q": 38 | workerQueue.put(val) 39 | val = input("\n") 40 | -------------------------------------------------------------------------------- /vagrant/leap_15_ipv6/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure("2") do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://vagrantcloud.com/search. 15 | 16 | config.vm.define "testing-15.0" do |leap_15| 17 | leap_15.vm.box = "openSUSE-Leap-15.0-Vagrant.x86_64" 18 | leap_15.vm.box_url = "https://download.opensuse.org/repositories/Virtualization:/Appliances:/Images:/openSUSE-Leap-15.0/images/boxes/openSUSE-Leap-15.0-Vagrant.x86_64.json" 19 | end 20 | 21 | # Disable automatic box update checking. If you disable this, then 22 | # boxes will only be checked for updates when the user runs 23 | # `vagrant box outdated`. This is not recommended. 24 | # config.vm.box_check_update = false 25 | 26 | # Create a forwarded port mapping which allows access to a specific port 27 | # within the machine from a port on the host machine. In the example below, 28 | # accessing "localhost:8080" will access port 80 on the guest machine. 29 | # NOTE: This will enable public access to the opened port 30 | # config.vm.network "forwarded_port", guest: 80, host: 8080 31 | 32 | # Create a forwarded port mapping which allows access to a specific port 33 | # within the machine from a port on the host machine and only allow access 34 | # via 127.0.0.1 to disable public access 35 | # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" 36 | 37 | # Create a private network, which allows host-only access to the machine 38 | # using a specific IP. 39 | # config.vm.network "private_network", ip: "192.168.33.10" 40 | 41 | # Create a public network, which generally matched to bridged network. 42 | # Bridged networks make the machine appear as another physical device on 43 | # your network. 44 | # config.vm.network "public_network" 45 | 46 | # Share an additional folder to the guest VM. The first argument is 47 | # the path on the host to the actual folder. The second argument is 48 | # the path on the guest to mount the folder. And the optional third 49 | # argument is a set of non-required options. 50 | # config.vm.synced_folder "../data", "/vagrant_data" 51 | 52 | # Provider-specific configuration so you can fine-tune various 53 | # backing providers for Vagrant. These expose provider-specific options. 54 | # Example for VirtualBox: 55 | # 56 | # config.vm.provider "virtualbox" do |vb| 57 | # # Display the VirtualBox GUI when booting the machine 58 | # vb.gui = true 59 | # 60 | # # Customize the amount of memory on the VM: 61 | # vb.memory = "1024" 62 | # end 63 | # 64 | # View the documentation for the provider you are using for more 65 | # information on available options. 66 | 67 | config.vm.provision "shell", inline: <<-SHELL 68 | sudo zypper addrepo -f http://download.opensuse.org/repositories/devel:/languages:/erlang:/Factory/openSUSE_Leap_15.0/devel:languages:erlang:Factory.repo 69 | sudo zypper --gpg-auto-import-keys refresh 70 | sudo zypper -n install wget 71 | sudo rpm --import https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc 72 | wget https://github.com/rabbitmq/rabbitmq-server/releases/download/v3.8.0-beta.3/rabbitmq-server-3.8.0.beta.3-1.suse.noarch.rpm 73 | sudo zypper -n in rabbitmq-server-3.8.0.beta.3-1.suse.noarch.rpm 74 | sudo zypper -n install erlang 75 | sudo touch /etc/rabbitmq/rabbitmq-env.conf 76 | cat >/usr/lib/systemd/system/epmd.socket <> /etc/hosts 31 | sudo echo "192.168.56.12 node2 " >> /etc/hosts 32 | sudo echo "192.168.56.10 node0 " >> /etc/hosts 33 | SHELL 34 | end 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /vagrant/vagrant_cluster/.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant/* -------------------------------------------------------------------------------- /vagrant/vagrant_cluster/README.md: -------------------------------------------------------------------------------- 1 | How to create a RabbitMQ cluster on Ubuntu 2 | == 3 | 4 | This example creates 3 RabbitMQ nodes in cluster using Ubuntu 5 | 6 | Clone the repo or just copy the `Vagrantfile` 7 | 8 | Then: 9 | ``` 10 | vagrant up 11 | ``` 12 | 13 | 14 | When ready you have: 15 | - 10.0.0.[10..12]:5672 - amqp ports 16 | - 10.0.0.[10..12]:15672 - http management ui ports 17 | 18 | ex: Management ui: 19 | http://10.0.0.10:15672 20 | 21 | - user name: `test` 22 | - password: `test` 23 | 24 | So: 25 | 26 | ![cluster](https://raw.githubusercontent.com/Gsantomaggio/rabbitmq-utils/master/rabbitmq-suse/vagrant_cluster/img/cluster.png) 27 | -------------------------------------------------------------------------------- /vagrant/vagrant_cluster/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | 9 | 10 | BOX_IMAGE = "ubuntu/focal64" 11 | NODE_COUNT = 2 12 | 13 | Vagrant.configure("2") do |config| 14 | config.vm.define "node0" do |subconfig| 15 | subconfig.vm.box = BOX_IMAGE 16 | subconfig.vm.hostname = "node0" 17 | subconfig.vm.network :private_network, ip: "10.0.0.10" 18 | end 19 | 20 | (1..NODE_COUNT).each do |i| 21 | config.vm.define "node#{i}" do |subconfig| 22 | subconfig.vm.box = BOX_IMAGE 23 | subconfig.vm.hostname = "node#{i}" 24 | subconfig.vm.network :private_network, ip: "10.0.0.#{i + 10}" 25 | end 26 | end 27 | 28 | # Install avahi on all machines 29 | config.vm.provision "shell", inline: <<-SHELL 30 | sudo echo "10.0.0.11 node1 " >> /etc/hosts 31 | sudo echo "10.0.0.12 node2 " >> /etc/hosts 32 | sudo echo "10.0.0.10 node0 " >> /etc/hosts 33 | sudo apt-get install curl gnupg -y 34 | curl -fsSL https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc | sudo apt-key add - 35 | sudo apt-key adv --keyserver "hkps://keys.openpgp.org" --recv-keys "0x0A9AF2115F4687BD29803A206B73A36E6026DFCA" 36 | sudo apt-get install apt-transport-https 37 | sudo touch /etc/apt/sources.list.d/bintray.erlang.list 38 | sudo echo "deb https://dl.bintray.com/rabbitmq-erlang/debian focal erlang" >> /etc/apt/sources.list.d/bintray.erlang.list 39 | sudo echo "deb https://dl.bintray.com/rabbitmq/debian bionic main" >> /etc/apt/sources.list.d/bintray.erlang.list 40 | sudo apt-get update -y 41 | sudo apt-get update 42 | sudo apt-get update -y 43 | sudo apt-get install rabbitmq-server -y --fix-missing 44 | sudo systemctl enable rabbitmq-server 45 | sudo systemctl start rabbitmq-server 46 | sudo chmod 600 /var/lib/rabbitmq/.erlang.cookie 47 | sudo echo "PLEASE_GENERATE_A_SECURE_VALUE" > /var/lib/rabbitmq/.erlang.cookie 48 | sudo bash -c 'echo "vm_memory_high_watermark.relative = 0.9" > /etc/rabbitmq/rabbitmq.conf' 49 | # sudo bash -c 'echo "cluster_partition_handling = pause_minority" >> /etc/rabbitmq/rabbitmq.conf' 50 | sudo bash -c 'echo "queue_master_locator = min-masters " >> /etc/rabbitmq/rabbitmq.conf' 51 | sudo systemctl restart rabbitmq-server 52 | sudo systemctl enable rabbitmq-server 53 | sleep 5 54 | sudo rabbitmq-plugins enable rabbitmq_management 55 | sudo rabbitmqctl stop_app 56 | sudo rabbitmqctl reset 57 | sudo rabbitmqctl join_cluster rabbit@node0 58 | sudo rabbitmqctl start_app 59 | sudo rabbitmqctl add_user test test 60 | sudo rabbitmqctl set_user_tags test administrator 61 | sudo rabbitmqctl set_permissions -p / test ".*" ".*" ".*" 62 | SHELL 63 | end 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /vagrant/vagrant_cluster/img/cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Gsantomaggio/rabbitmq-utils/07423e9b253af5e602e3b06e65d1fc22157586f7/vagrant/vagrant_cluster/img/cluster.png -------------------------------------------------------------------------------- /workshop/README.md: -------------------------------------------------------------------------------- 1 | What I need for the workshop 2 | 3 | 4 | Telegram Channel: 5 | ==== 6 | https://t.me/+GzBO0DlyJcozZDlk 7 | 8 | 9 | 10 | RabbitMQ 11 | ===== 12 | Run the docker image 13 | 14 | ``` 15 | docker run -it --rm --name rabbitmq-stream \ 16 | -p 5552:5552 -p 5672:5672 -p 15672:15672 \ 17 | -e RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="-rabbitmq_stream advertised_host localhost" \ 18 | --pull always \ 19 | rabbitmq:3-management 20 | ``` 21 | 22 | enable the right plugins 23 | 24 | ``` 25 | docker exec -it rabbitmq-stream /bin/bash -c "rabbitmq-plugins enable rabbitmq_stream_management" 26 | ``` 27 | 28 | 29 | RabbitMQ stream Protocol 30 | === 31 | https://github.com/rabbitmq/rabbitmq-server/blob/v3.11.x/deps/rabbitmq_stream/docs/PROTOCOL.adoc#create 32 | 33 | 34 | 35 | Envoy 36 | ==== 37 | Install your Envoy 38 | 39 | https://www.envoyproxy.io/docs/envoy/latest/start/install# 40 | 41 | 42 | 43 | Stream Client 44 | ==== 45 | Peek one of these: 46 | 47 | - .NEt https://github.com/rabbitmq/rabbitmq-stream-dotnet-client 48 | - Go https://github.com/rabbitmq/rabbitmq-stream-go-client 49 | - Java https://github.com/rabbitmq/rabbitmq-stream-java-client 50 | - Rust https://github.com/rabbitmq/rabbitmq-stream-rust-client 51 | - Python https://github.com/qweeze/rstream 52 | 53 | 54 | Envoy SDKs 55 | ==== 56 | https://github.com/proxy-wasm/proxy-wasm-rust-sdk/ 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /workshop/envoyfilter/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | target/ -------------------------------------------------------------------------------- /workshop/envoyfilter/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "ahash" 7 | version = "0.8.3" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" 10 | dependencies = [ 11 | "cfg-if", 12 | "once_cell", 13 | "version_check", 14 | ] 15 | 16 | [[package]] 17 | name = "byteorder" 18 | version = "1.4.3" 19 | source = "registry+https://github.com/rust-lang/crates.io-index" 20 | checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" 21 | 22 | [[package]] 23 | name = "cfg-if" 24 | version = "1.0.0" 25 | source = "registry+https://github.com/rust-lang/crates.io-index" 26 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 27 | 28 | [[package]] 29 | name = "hashbrown" 30 | version = "0.13.2" 31 | source = "registry+https://github.com/rust-lang/crates.io-index" 32 | checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" 33 | dependencies = [ 34 | "ahash", 35 | ] 36 | 37 | [[package]] 38 | name = "log" 39 | version = "0.4.14" 40 | source = "registry+https://github.com/rust-lang/crates.io-index" 41 | checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" 42 | dependencies = [ 43 | "cfg-if", 44 | ] 45 | 46 | [[package]] 47 | name = "once_cell" 48 | version = "1.18.0" 49 | source = "registry+https://github.com/rust-lang/crates.io-index" 50 | checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" 51 | 52 | [[package]] 53 | name = "proxy-wasm" 54 | version = "0.2.1" 55 | source = "registry+https://github.com/rust-lang/crates.io-index" 56 | checksum = "823b744520cd4a54ba7ebacbffe4562e839d6dcd8f89209f96a1ace4f5229cd4" 57 | dependencies = [ 58 | "hashbrown", 59 | "log", 60 | ] 61 | 62 | [[package]] 63 | name = "stream_filter" 64 | version = "0.1.0" 65 | dependencies = [ 66 | "byteorder", 67 | "log", 68 | "proxy-wasm", 69 | ] 70 | 71 | [[package]] 72 | name = "version_check" 73 | version = "0.9.4" 74 | source = "registry+https://github.com/rust-lang/crates.io-index" 75 | checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" 76 | -------------------------------------------------------------------------------- /workshop/envoyfilter/Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["network"] 3 | -------------------------------------------------------------------------------- /workshop/envoyfilter/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: build 2 | 3 | all: build 4 | 5 | run-docker: 6 | docker run -it --user "$(id -u)":"$(id -g)" -v "$(shell PWD)":/usr/src/myapp -w /usr/src/myapp rust /bin/bash 7 | 8 | 9 | build: 10 | rustup target add wasm32-unknown-unknown 11 | cargo build --target wasm32-unknown-unknown --release 12 | run-compose: 13 | docker rmi -f rabbitmq_filter_proxy && docker-compose up 14 | run-envoy: 15 | envoy -c envoy/envoy_tcp_v3.yaml --log-format "[%Y-%m-%d %T.%e][%t][%l] %v" 16 | 17 | run-envoy-empty: 18 | envoy -c envoy/envoy_tcp_v3_empty.yaml --log-format "[%Y-%m-%d %T.%e][%t][%l] %v" 19 | run-tls: build 20 | cd envoy && \ 21 | envoy -c envoy_tcp_v3_tls.yaml --bootstrap-version 3 --concurrency 1 22 | generate-cert: 23 | rm -rf envoy/tls-gen && \ 24 | git clone https://github.com/michaelklishin/tls-gen envoy/tls-gen && \ 25 | cd envoy/tls-gen/basic && \ 26 | make 27 | 28 | -------------------------------------------------------------------------------- /workshop/envoyfilter/README.md: -------------------------------------------------------------------------------- 1 | # RabbitMQ HTTP and AMQP Envoy WASM Filter 2 | 3 | Based on the [proxy-wasm crate](https://crates.io/crates/proxy-wasm/0.3.0) 4 | 5 | ## Building 6 | 7 | ```shell 8 | make build 9 | ``` 10 | 11 | ### 1. Install Rust 12 | 13 | For your OS, follow [installation instructions from the Rust website](https://www.rust-lang.org/tools/install). 14 | 15 | ### 2. Install Web Assembly for Rust (WASM) 16 | 17 | Run the [command listed on the Rust WASM site](https://rustwasm.github.io/wasm-pack/installer/). 18 | 19 | ### 3. Build The Project 20 | 21 | From the root directory of the project (`tanzu-cluster-operator/plugins/rabbitmq_filter/`), run - 22 | 23 | ```shell 24 | make build 25 | ``` 26 | 27 | ### 4. Run locally 28 | you need envoy >= 1.7.1 installated 29 | 30 | ``` 31 | make run 32 | ``` 33 | 34 | ### 5. Deploy Envoy with filter and deploy RabbitMQ 35 | 36 | From the root directory of the project, run - 37 | 38 | ```shell 39 | make run-compose 40 | ``` 41 | 42 | ### 6. Test the Filter using perf-test 43 | 44 | ```shell 45 | docker run -it --network host pivotalrabbitmq/perf-test:2.13.0 --queue-pattern '@@@@-%d' --queue-pattern-from 1 --queue-pattern-to 1 --producers 1 --consumers 1 -h amqp://guest:guest@localhost:5673 --rate 10 46 | ``` 47 | 48 | ### 7. Run TLS example end to end 49 | 50 | ``` 51 | make generate-tls 52 | make run-tls 53 | ``` 54 | [TLS Config file](https://github.com/rabbitmq/tanzu-cluster-operator/blob/envoy/plugins/rabbitmq_filter/envoy/envoy_tcp_v3_tls.yaml) 55 | 56 | 57 | ## Examples: 58 | 59 | - [Deploy on Istio](https://github.com/rabbitmq/tanzu-cluster-operator/tree/envoy/plugins/rabbitmq_filter/examples/istio) 60 | -------------------------------------------------------------------------------- /workshop/envoyfilter/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | 4 | proxy: 5 | build: 6 | context: ./envoy 7 | dockerfile: Dockerfile.proxy 8 | volumes: 9 | - ./envoy/envoy_tcp.yaml:/etc/envoy_tcp.yaml 10 | - ./target/wasm32-unknown-unknown/release/rabbitmq_http_filter.wasm:/etc/rabbitmq_http_filter.wasm 11 | - ./target/wasm32-unknown-unknown/release/rabbitmq_network_filter.wasm:/etc/rabbitmq_network_filter.wasm 12 | networks: 13 | - envoymesh 14 | expose: 15 | - "5673" 16 | - "15673" 17 | - "8001" 18 | ports: 19 | - "5673:5673" 20 | - "15673:15673" 21 | - "8001:8001" 22 | 23 | rabbitmq_service: 24 | image: rabbitmq:management 25 | networks: 26 | envoymesh: 27 | aliases: 28 | - rabbitmq_service 29 | expose: 30 | - "15672" 31 | - "5672" 32 | ports: 33 | - "15672:15672" 34 | - "5672:5672" 35 | 36 | networks: 37 | envoymesh: {} -------------------------------------------------------------------------------- /workshop/envoyfilter/envoy/.dockerignore: -------------------------------------------------------------------------------- 1 | envoy 2 | istio/ 3 | solo/ 4 | release/ 5 | -------------------------------------------------------------------------------- /workshop/envoyfilter/envoy/Dockerfile.proxy: -------------------------------------------------------------------------------- 1 | FROM istio/proxyv2:1.8.0 2 | ENTRYPOINT /usr/local/bin/envoy -c /etc/envoy_tcp.yaml --service-cluster proxy --concurrency 4 3 | -------------------------------------------------------------------------------- /workshop/envoyfilter/envoy/envoy_tcp_v3.yaml: -------------------------------------------------------------------------------- 1 | admin: {"accessLogPath":"/dev/null","address":{"socketAddress":{"address":"127.0.0.1","portValue":9901}}} 2 | 3 | static_resources: 4 | listeners: 5 | - name: ingress 6 | per_connection_buffer_limit_bytes: 4096 7 | address: 8 | socket_address: 9 | address: 0.0.0.0 10 | port_value: 5553 11 | filter_chains: 12 | - filters: 13 | - name: envoy.filters.network.wasm 14 | typed_config: 15 | "@type": type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm 16 | config: 17 | configuration: {"@type":"type.googleapis.com/google.protobuf.StringValue", 18 | "value":""} 19 | name: "rmq" 20 | root_id: "filter" 21 | vm_config: 22 | vm_id: "1" 23 | runtime: envoy.wasm.runtime.v8 24 | code: {"local":{"filename":"target/wasm32-unknown-unknown/release/stream_filter.wasm"}} 25 | allow_precompiled: true 26 | 27 | 28 | - name: envoy.filters.network.tcp_proxy 29 | typed_config: 30 | "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy 31 | stat_prefix: ingress 32 | cluster: stream 33 | 34 | clusters: 35 | - name: stream 36 | connect_timeout: 3.25s 37 | per_connection_buffer_limit_bytes: 4096 38 | type: LOGICAL_DNS 39 | lb_policy: ROUND_ROBIN 40 | load_assignment: 41 | cluster_name: stream 42 | endpoints: 43 | - lb_endpoints: 44 | - endpoint: 45 | address: 46 | socket_address: 47 | address: 127.0.0.1 48 | port_value: 5552 49 | -------------------------------------------------------------------------------- /workshop/envoyfilter/envoy/envoy_tcp_v3_empty.yaml: -------------------------------------------------------------------------------- 1 | admin: {"accessLogPath":"/dev/null","address":{"socketAddress":{"address":"127.0.0.1","portValue":9901}}} 2 | 3 | static_resources: 4 | listeners: 5 | - name: ingress 6 | per_connection_buffer_limit_bytes: 4096 7 | address: 8 | socket_address: 9 | address: 0.0.0.0 10 | port_value: 5553 # the proxy port exposed! 11 | filter_chains: 12 | - filters: 13 | - name: envoy.filters.network.tcp_proxy 14 | typed_config: 15 | "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy 16 | stat_prefix: ingress 17 | cluster: stream 18 | 19 | clusters: 20 | - name: stream 21 | connect_timeout: 3.25s 22 | per_connection_buffer_limit_bytes: 4096 23 | type: LOGICAL_DNS 24 | lb_policy: ROUND_ROBIN 25 | load_assignment: 26 | cluster_name: stream 27 | endpoints: 28 | - lb_endpoints: 29 | - endpoint: 30 | address: 31 | socket_address: 32 | address: 127.0.0.1 # the rabbitmq address 33 | port_value: 5552 34 | -------------------------------------------------------------------------------- /workshop/envoyfilter/envoy/envoy_tcp_v3_tls.yaml: -------------------------------------------------------------------------------- 1 | admin: {"accessLogPath":"/dev/null","address":{"socketAddress":{"address":"127.0.0.1","portValue":9901}}} 2 | 3 | static_resources: 4 | listeners: 5 | - name: ingress 6 | per_connection_buffer_limit_bytes: 4096 7 | address: 8 | socket_address: 9 | address: 0.0.0.0 10 | port_value: 5673 11 | listener_filters: 12 | - name: "envoy.filters.listener.tls_inspector" 13 | typed_config: { } 14 | filter_chains: 15 | - filters: 16 | - name: envoy.filters.network.wasm 17 | typed_config: 18 | "@type": type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm 19 | config: 20 | configuration: {"@type":"type.googleapis.com/google.protobuf.StringValue", 21 | "value":""} 22 | name: "rabbitmq.filters.network.network" 23 | root_id: "rabbitmq.filters.network.network" 24 | vm_config: 25 | vm_id: "rabbitmq.filters.network.network" 26 | runtime: envoy.wasm.runtime.v8 27 | code: {"local":{"filename":"../target/wasm32-unknown-unknown/release/rabbitmq_network_filter.wasm"}} 28 | allow_precompiled: true 29 | - name: envoy.filters.network.tcp_proxy 30 | typed_config: 31 | "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy 32 | stat_prefix: ingress 33 | cluster: amqps 34 | transport_socket: 35 | name: envoy.transport_sockets.tls 36 | typed_config: 37 | "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext 38 | common_tls_context: 39 | tls_certificates: 40 | - certificate_chain: { filename: "tls-gen/basic/result/server_certificate.pem" } 41 | private_key: { filename: "tls-gen/basic/result/server_key.pem" } 42 | tls_params: 43 | tls_minimum_protocol_version: "TLSv1_2" 44 | require_client_certificate: false 45 | 46 | clusters: 47 | - name: amqps 48 | connect_timeout: 3.25s 49 | per_connection_buffer_limit_bytes: 4096 50 | transport_socket: 51 | name: envoy.transport_sockets.tls 52 | typed_config: 53 | "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext 54 | common_tls_context: 55 | tls_params: 56 | tls_minimum_protocol_version: "TLSv1_2" 57 | # cipher_suites: 58 | # - ECDHE-RSA-AES256-GCM-SHA384 59 | # # - ECDHE-RSA-AES128-SHA 60 | # # - ECDHE-ECDSA-AES256-GCM-SHA384 61 | 62 | # common_tls_context: 63 | # validation_context: 64 | # trusted_ca: 65 | # filename: /Users/gas/git/michaelklishin/tls-gen/basic/result/ca_certificate.pem 66 | type: LOGICAL_DNS 67 | lb_policy: ROUND_ROBIN 68 | load_assignment: 69 | cluster_name: amqps 70 | endpoints: 71 | - lb_endpoints: 72 | - endpoint: 73 | address: 74 | socket_address: 75 | address: 127.0.0.1 76 | port_value: 5671 77 | # transport_socket: 78 | # name: envoy.transport_sockets.tls 79 | # typed_config: 80 | # "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext 81 | # common_tls_context: 82 | # tls_certificates: 83 | # - certificate_chain: { filename: "/Users/gas/git/michaelklishin/tls-gen/basic/result/server_certificate.pem" } 84 | # private_key: { filename: "/Users/gas/git/michaelklishin/tls-gen/basic/result/server_key.pem" } 85 | # validation_context: 86 | # trusted_ca: 87 | # filename: /Users/gas/git/michaelklishin/tls-gen/basic/result/ca_certificate.pem -------------------------------------------------------------------------------- /workshop/envoyfilter/examples/istio/README.md: -------------------------------------------------------------------------------- 1 | ### Deploy RabbitMQ with ISTIO 2 | 3 | ```shell 4 | kubectl label namespace default istio-injection=enabled 5 | kubectl apply -f rabbitmq.yaml 6 | kubectl apply -f istio_rabbitmq_vservice.yaml 7 | ``` 8 | 9 | deploy the filter: 10 | 11 | ```shell 12 | kubectl cp target/wasm32-unknown-unknown/release/rabbitmq_network_filter.wasm -c istio-proxy default/filter-server-0:/var/local/wasm-filters/ 13 | kubectl cp target/wasm32-unknown-unknown/release/rabbitmq_http_filter.wasm -c istio-proxy default/filter-server-0:/var/local/wasm-filters/ 14 | kubectl apply -f istio_rabbitmq_filter.yaml 15 | ``` 16 | 17 | The filter is copied on the `istio-proxy` 18 | 19 | to see the wasm logs on the `istio-proxy`: 20 | 21 | ``` 22 | curl -X POST localhost:15000/logging?wasm=info 23 | ``` -------------------------------------------------------------------------------- /workshop/envoyfilter/examples/istio/istio_rabbitmq_filter.yaml: -------------------------------------------------------------------------------- 1 | # enable filter logs curl -X POST localhost:15000/logging?wasm=info 2 | # global --set global.logging.level=default:info,wasm:info 3 | apiVersion: networking.istio.io/v1alpha3 4 | kind: EnvoyFilter 5 | metadata: 6 | name: rabbitmq-env-filter 7 | spec: 8 | configPatches: 9 | - applyTo: NETWORK_FILTER 10 | match: 11 | context: SIDECAR_INBOUND 12 | proxy: 13 | proxyVersion: '1\.8.*' 14 | listener: 15 | portNumber: 5672 16 | filterChain: 17 | filter: 18 | name: envoy.filters.network.tcp_proxy 19 | patch: 20 | operation: INSERT_BEFORE 21 | value: 22 | name: envoy.filters.network.wasm 23 | typed_config: 24 | "@type": type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm 25 | config: 26 | name: rabbitmq.filters.network.network 27 | rootId: rabbitmq.filters.network.network 28 | vmConfig: 29 | code: 30 | local: 31 | filename: /var/local/wasm-filters/rabbitmq_network_filter.wasm 32 | runtime: envoy.wasm.runtime.v8 33 | vmId: rabbitmq.filters.network.network 34 | allow_precompiled: true 35 | --- 36 | apiVersion: networking.istio.io/v1alpha3 37 | kind: EnvoyFilter 38 | metadata: 39 | name: rabbitmq-bufferlimit 40 | spec: 41 | configPatches: 42 | - applyTo: LISTENER 43 | ListenerMatch: 0.0.0.0_5672 44 | patch: 45 | operation: MERGE 46 | value: 47 | per_connection_buffer_limit_bytes: 4096 48 | --- 49 | apiVersion: networking.istio.io/v1alpha3 50 | kind: EnvoyFilter 51 | metadata: 52 | name: rabbitmq-env-filter-http 53 | spec: 54 | configPatches: 55 | - applyTo: HTTP_FILTER 56 | match: 57 | context: ANY 58 | proxy: 59 | proxyVersion: '1\.8.*' 60 | listener: 61 | portNumber: 15672 62 | filterChain: 63 | filter: 64 | name: envoy.http_connection_manager 65 | subFilter: 66 | name: envoy.router 67 | patch: 68 | operation: INSERT_BEFORE 69 | value: 70 | typed_config: 71 | "@type": type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm 72 | config: 73 | name: rabbitmq.filters.http.network 74 | rootId: rabbitmq.filters.http.network 75 | vmConfig: 76 | code: 77 | local: 78 | filename: /var/local/wasm-filters/rabbitmq_http_filter.wasm 79 | runtime: envoy.wasm.runtime.v8 80 | vmId: rabbitmq.filters.http.network 81 | allow_precompiled: true 82 | name: envoy.filters.http.wasm 83 | # --- 84 | # apiVersion: networking.istio.io/v1alpha3 85 | # kind: EnvoyFilter 86 | # metadata: 87 | # name: rabbitmq-original-src 88 | # spec: 89 | # configPatches: 90 | # - applyTo: LISTENER 91 | # match: 92 | # context: SIDECAR_INBOUND 93 | # listener: 94 | # portNumber: 5672 95 | # patch: 96 | # operation: MERGE 97 | # value: 98 | # listenerFilters: 99 | # - name: envoy.listener.original_src 100 | # config: 101 | # mark: 133 102 | -------------------------------------------------------------------------------- /workshop/envoyfilter/examples/istio/istio_rabbitmq_vservice.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Istio Authors 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | apiVersion: networking.istio.io/v1alpha3 15 | kind: Gateway 16 | metadata: 17 | name: rabbitmq-app-gateway 18 | spec: 19 | selector: 20 | istio: ingressgateway 21 | servers: 22 | - port: 23 | number: 5672 24 | name: amqp 25 | protocol: TCP 26 | hosts: 27 | - "*" 28 | - port: 29 | number: 5671 30 | name: amqps 31 | protocol: TLS 32 | tls: 33 | mode: PASSTHROUGH 34 | # mode: SIMPLE 35 | # credentialName: rabbitmq-bugs-tls 36 | hosts: 37 | - "*" 38 | - port: 39 | number: 15671 40 | name: https 41 | protocol: HTTPS 42 | tls: 43 | mode: PASSTHROUGH 44 | # mode: SIMPLE 45 | # credentialName: rabbitmq-bugs-tls 46 | hosts: 47 | - "*" 48 | # - bugs.4messages.net 49 | - port: 50 | number: 15672 51 | name: http 52 | protocol: HTTP 53 | hosts: 54 | - "*" 55 | --- 56 | apiVersion: networking.istio.io/v1alpha3 57 | kind: VirtualService 58 | metadata: 59 | name: rabbitmq-app-tcp 60 | spec: 61 | hosts: 62 | - "*" 63 | gateways: 64 | - rabbitmq-app-gateway 65 | tcp: 66 | - match: 67 | - port: 5672 68 | route: 69 | - destination: 70 | host: filter 71 | port: 72 | number: 5672 73 | --- 74 | apiVersion: networking.istio.io/v1alpha3 75 | kind: VirtualService 76 | metadata: 77 | name: rabbitmq-app-tls 78 | spec: 79 | hosts: 80 | - bugs.4messages.net 81 | gateways: 82 | - rabbitmq-app-gateway 83 | tcp: 84 | - match: 85 | - port: 5671 86 | route: 87 | - destination: 88 | host: filter 89 | port: 90 | number: 5671 91 | --- 92 | apiVersion: networking.istio.io/v1alpha3 93 | kind: VirtualService 94 | metadata: 95 | name: rabbitmq-app-https 96 | spec: 97 | hosts: 98 | - bugs.4messages.net 99 | gateways: 100 | - rabbitmq-app-gateway 101 | http: 102 | - match: 103 | - port: 15671 104 | - uri: 105 | prefix: / 106 | route: 107 | - destination: 108 | host: filter 109 | port: 110 | number: 15671 111 | --- 112 | apiVersion: networking.istio.io/v1alpha3 113 | kind: VirtualService 114 | metadata: 115 | name: rabbitmq-app-http 116 | spec: 117 | hosts: 118 | - "*" 119 | gateways: 120 | - rabbitmq-app-gateway 121 | http: 122 | - match: 123 | - port: 15672 124 | - uri: 125 | prefix: / 126 | route: 127 | - destination: 128 | host: filter 129 | port: 130 | number: 15672 -------------------------------------------------------------------------------- /workshop/envoyfilter/examples/istio/rabbitmq-bugs-perftest.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: rabbitmq-bugs-perf-test 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: rabbitmq-bugs-perf-test 10 | template: 11 | metadata: 12 | labels: 13 | app: rabbitmq-bugs-perf-test 14 | annotations: 15 | prometheus.io/port: "8080" 16 | prometheus.io/scrape: "true" 17 | spec: 18 | containers: 19 | - name: perftest 20 | # https://hub.docker.com/r/pivotalrabbitmq/perf-test/tags 21 | # SNI support was added in 2.13.0: https://github.com/rabbitmq/rabbitmq-perf-test/pull/253 22 | image: pivotalrabbitmq/perf-test:2.13.0 23 | resources: 24 | limits: 25 | cpu: 0.5 26 | memory: 128Mi 27 | requests: 28 | cpu: 0.5 29 | memory: 128Mi 30 | command: 31 | - /bin/bash 32 | - -c 33 | args: 34 | - |- 35 | bin/runjava com.rabbitmq.perf.PerfTest \ 36 | --uri "amqps://$RABBITMQ_USER:$RABBITMQ_PASS@filter.default:5671/%2f" 37 | env: 38 | - name: RABBITMQ_USER 39 | valueFrom: 40 | secretKeyRef: 41 | name: filter-default-user 42 | key: username 43 | - name: RABBITMQ_PASS 44 | valueFrom: 45 | secretKeyRef: 46 | name: filter-default-user 47 | key: password 48 | 49 | - name: PRODUCERS 50 | value: "3" 51 | # - name: VARIABLE_RATE 52 | # value: "1:30,10:30,2:30,0:60" 53 | ## - name: CONFIRM 54 | # value: "1" 55 | # - name: JSON_BODY 56 | # value: "true" 57 | # - name: SIZE 58 | # value: "1000" 59 | - name: FLAG 60 | value: "persistent" 61 | - name: ROUTING_KEY 62 | value: "quorum" 63 | 64 | - name: CONSUMERS 65 | value: "6" 66 | # - name: CONSUMER_LATENCY 67 | # value: "50000" 68 | 69 | - name: QUEUE_PATTERN 70 | value: "quorum-%d" 71 | - name: QUEUE_PATTERN_FROM 72 | value: "1" 73 | - name: QUEUE_PATTERN_TO 74 | value: "3" 75 | - name: QUEUE_ARGS 76 | value: "x-max-length=30000,x-queue-type=quorum,x-max-in-memory-bytes=9000000" 77 | - name: AUTO_DELETE 78 | value: "false" 79 | 80 | - name: METRICS_PROMETHEUS 81 | value: "true" 82 | -------------------------------------------------------------------------------- /workshop/envoyfilter/examples/istio/rabbitmq.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: wasm-disk 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 1Gi 11 | storageClassName: standard 12 | --- 13 | apiVersion: rabbitmq.com/v1beta1 14 | kind: RabbitmqCluster 15 | metadata: 16 | name: filter 17 | spec: 18 | # skipPostDeploySteps: false 19 | replicas: 1 20 | # tls: 21 | # secretName: rabbitmq-bugs-tls 22 | resources: 23 | requests: 24 | cpu: 2 25 | memory: 2Gi 26 | limits: 27 | cpu: 2 28 | memory: 2Gi 29 | rabbitmq: 30 | additionalConfig: | 31 | vm_memory_high_watermark.relative = 0.7 32 | # proxy_protocol = true 33 | override: 34 | statefulSet: 35 | spec: 36 | template: 37 | metadata: 38 | annotations: 39 | # sidecar.istio.io/interceptionMode: TPROXY 40 | sidecar.istio.io/userVolumeMount: '[{"name": "rabbitmq-filter", "mountPath": "/var/local/wasm-filters/"}]' 41 | sidecar.istio.io/userVolume: '[{"name": "rabbitmq-filter", "PersistentVolumeClaim": {"claimName": "wasm-disk"}}]' 42 | -------------------------------------------------------------------------------- /workshop/envoyfilter/img/envoy_tls_termination_initiator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Gsantomaggio/rabbitmq-utils/07423e9b253af5e602e3b06e65d1fc22157586f7/workshop/envoyfilter/img/envoy_tls_termination_initiator.png -------------------------------------------------------------------------------- /workshop/envoyfilter/network/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "ahash" 7 | version = "0.7.6" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" 10 | dependencies = [ 11 | "getrandom", 12 | "once_cell", 13 | "version_check", 14 | ] 15 | 16 | [[package]] 17 | name = "cfg-if" 18 | version = "1.0.0" 19 | source = "registry+https://github.com/rust-lang/crates.io-index" 20 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 21 | 22 | [[package]] 23 | name = "getrandom" 24 | version = "0.2.5" 25 | source = "registry+https://github.com/rust-lang/crates.io-index" 26 | checksum = "d39cd93900197114fa1fcb7ae84ca742095eed9442088988ae74fa744e930e77" 27 | dependencies = [ 28 | "cfg-if", 29 | "libc", 30 | "wasi", 31 | ] 32 | 33 | [[package]] 34 | name = "hashbrown" 35 | version = "0.11.2" 36 | source = "registry+https://github.com/rust-lang/crates.io-index" 37 | checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" 38 | dependencies = [ 39 | "ahash", 40 | ] 41 | 42 | [[package]] 43 | name = "libc" 44 | version = "0.2.120" 45 | source = "registry+https://github.com/rust-lang/crates.io-index" 46 | checksum = "ad5c14e80759d0939d013e6ca49930e59fc53dd8e5009132f76240c179380c09" 47 | 48 | [[package]] 49 | name = "log" 50 | version = "0.4.14" 51 | source = "registry+https://github.com/rust-lang/crates.io-index" 52 | checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" 53 | dependencies = [ 54 | "cfg-if", 55 | ] 56 | 57 | [[package]] 58 | name = "once_cell" 59 | version = "1.10.0" 60 | source = "registry+https://github.com/rust-lang/crates.io-index" 61 | checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" 62 | 63 | [[package]] 64 | name = "proxy-wasm" 65 | version = "0.1.4" 66 | source = "registry+https://github.com/rust-lang/crates.io-index" 67 | checksum = "d5b944c570b7e30d8b6725753360c5f92311d0888d0e86089e1c651f0d3c2ef3" 68 | dependencies = [ 69 | "hashbrown", 70 | "log", 71 | ] 72 | 73 | [[package]] 74 | name = "rabbitmq-network-filter" 75 | version = "0.1.0" 76 | dependencies = [ 77 | "log", 78 | "proxy-wasm", 79 | ] 80 | 81 | [[package]] 82 | name = "version_check" 83 | version = "0.9.4" 84 | source = "registry+https://github.com/rust-lang/crates.io-index" 85 | checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" 86 | 87 | [[package]] 88 | name = "wasi" 89 | version = "0.10.2+wasi-snapshot-preview1" 90 | source = "registry+https://github.com/rust-lang/crates.io-index" 91 | checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" 92 | -------------------------------------------------------------------------------- /workshop/envoyfilter/network/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "stream_filter" 3 | version = "0.1.0" 4 | authors = ["Gabriele Santomaggio "] 5 | edition = "2021" 6 | 7 | [lib] 8 | name = "stream_filter" 9 | crate-type = ["cdylib"] 10 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 11 | 12 | [dependencies] 13 | proxy-wasm = "0.2.1" 14 | log = "0.4" 15 | byteorder = "1" -------------------------------------------------------------------------------- /workshop/envoyfilter/network/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use byteorder::ByteOrder; 4 | // use log::error; 5 | use log::{error, info}; 6 | use proxy_wasm::traits::*; 7 | use proxy_wasm::types::*; 8 | 9 | /// A std Result with a lapin::Error error type 10 | // pub type Result = std::result::Result; 11 | #[no_mangle] 12 | pub fn _start() { 13 | proxy_wasm::set_log_level(LogLevel::Info); 14 | 15 | proxy_wasm::set_stream_context(|context_id, root_context_id| -> Box { 16 | Box::new(StreamingAuthorizer { 17 | context_id, 18 | root_context_id, 19 | }) 20 | }); 21 | 22 | proxy_wasm::set_root_context(|_root_context_id| -> Box { 23 | Box::new(StreamingAuthorizer::new()) 24 | }); 25 | } 26 | 27 | 28 | struct StreamingAuthorizer { 29 | context_id: u32, 30 | root_context_id: u32, 31 | } 32 | 33 | 34 | impl Context for StreamingAuthorizer {} 35 | 36 | impl StreamingAuthorizer { 37 | fn new() -> Self { 38 | return Self { 39 | context_id: 0, 40 | root_context_id: 0, 41 | }; 42 | } 43 | 44 | fn parse(tou8: &&[u8]) { 45 | let length = byteorder::BigEndian::read_i32(&tou8); 46 | let command = byteorder::BigEndian::read_u16(&tou8[4..]); 47 | let version = byteorder::BigEndian::read_u16(&tou8[6..]); 48 | let mut index: i32 = 16; 49 | if command == 17 { 50 | let corr_id = byteorder::BigEndian::read_u32(&tou8[8..]); 51 | let _items = byteorder::BigEndian::read_u32(&tou8[12..]); 52 | 53 | let mut i = 0; 54 | info!("\x1b[0;32mHeader: protocol-length {:?} command {:?} version {:?}\x1b[0m", 55 | length, 56 | command, 57 | version); 58 | while i < 6 { 59 | let len = byteorder::BigEndian::read_u16(&tou8[index as usize..]); 60 | index = index + 2 as i32; 61 | let end = index as usize + len as usize; 62 | let bytes = &tou8[index as usize..end as usize]; 63 | let key = String::from_utf8(bytes.to_vec()); 64 | index = index + len as i32; 65 | 66 | let len_v = byteorder::BigEndian::read_u16(&tou8[index as usize..]); 67 | index = index + 2 as i32; 68 | let end = index as usize + len_v as usize; 69 | let bytes_v = &tou8[index as usize..end as usize]; 70 | let value = String::from_utf8(bytes_v.to_vec()); 71 | index = index + len_v as i32; 72 | // mymap.insert(key.unwrap(), value.unwrap()); 73 | info!("\x1b[0;33m Client Prop: {:?} - :{:?}\x1b[0m", 74 | key.unwrap(), value.unwrap()); 75 | i = i + 1; 76 | } 77 | } 78 | if command == 2 { 79 | index = index + 1 as i32; 80 | let len = byteorder::BigEndian::read_u32(&tou8[index as usize..]); 81 | info!("\x1b[0;33m publihed {:?} messages\x1b[0m",len); 82 | } 83 | } 84 | } 85 | 86 | impl RootContext for StreamingAuthorizer { 87 | fn on_vm_start(&mut self, _vm_configuration_size: usize) -> bool { 88 | info!("[RABBITMQ-WASM-FILTER] Init Streaming"); 89 | true 90 | } 91 | 92 | fn on_tick(&mut self) { 93 | info!("[RABBITMQ-WASM-FILTER] ON TICK"); 94 | } 95 | 96 | fn on_queue_ready(&mut self, queue_id: u32) { 97 | info!("[RABBITMQ-WASM-FILTER] ON QUEUE READY {}", queue_id); 98 | } 99 | } 100 | 101 | impl StreamContext for StreamingAuthorizer { 102 | fn on_new_connection(&mut self) -> Action { 103 | Action::Continue 104 | } 105 | 106 | 107 | fn on_downstream_data(&mut self, data_size: usize, end_of_stream: bool) -> Action { 108 | if let Some(data) = self.get_downstream_data(0, data_size) { 109 | let tou8: &[u8] = &data; 110 | // info!("Stream traffic-raw {:?}", tou8); 111 | Self::parse(&tou8); 112 | let version = byteorder::BigEndian::read_u16(&tou8[6..]); 113 | if version ==1 { 114 | error!("\x1b[0;31m Drop connection: VERSION {:?} not allowed \x1b[0m", 115 | version); 116 | return Action::Pause; 117 | } 118 | 119 | } 120 | return Action::Continue; 121 | } 122 | 123 | 124 | fn on_downstream_close(&mut self, _peer_type: PeerType) { 125 | // info!("[RABBITMQ-WASM-FILTER] DOWN CLOSE STREAM, Peer Type: {:?}", peer_type); 126 | } 127 | 128 | 129 | fn on_upstream_data(&mut self, _data_size: usize, _end_of_stream: bool) -> Action { 130 | return Action::Continue; 131 | 132 | // if let Some(data) = self.get_upstream_data(0, _data_size) { 133 | // let tou8: &[u8] = &data; 134 | // let length = byteorder::BigEndian::read_u32(&tou8); 135 | // let command = byteorder::BigEndian::read_u16(&tou8[4..]); 136 | // let version = byteorder::BigEndian::read_u16(&tou8[6..]); 137 | // info!("\x1b[0;33m[RABBITMQ-WASM-FILTER] Header: protocol length {:?} command {:?} version {:?}\x1b[0m", 138 | // length, 139 | // command, 140 | // version); 141 | // if command == 21 { 142 | // info!("\x1b[0;31m[RABBITMQ-WASM-FILTER] Header: version ERROR {:?}\x1b[0m", 143 | // version); 144 | // return Action::Pause 145 | // } 146 | // } 147 | // return Action::Continue 148 | } 149 | 150 | 151 | fn on_upstream_close(&mut self, _peer_type: PeerType) { 152 | // info!("[RABBITMQ-WASM-FILTER] CLOSE UP STREAM, Peer Type: {:?}", peer_type); 153 | } 154 | } 155 | --------------------------------------------------------------------------------