├── src ├── rabbit_consumer_stress_app.erl ├── rabbit_consumer_stress_sup.erl ├── rabbit_http_stress.erl ├── http_api_measure.erl ├── rabbit_consumer_stress.erl ├── rabbit_stress.erl └── rabbit_churn.erl ├── Makefile └── README.md /src/rabbit_consumer_stress_app.erl: -------------------------------------------------------------------------------- 1 | -module(rabbit_consumer_stress_app). 2 | -behaviour(application). 3 | 4 | -export([start/2]). 5 | -export([stop/1]). 6 | 7 | start(_Type, _Args) -> 8 | rabbit_consumer_stress_sup:start_link(). 9 | 10 | stop(_State) -> 11 | ok. 12 | -------------------------------------------------------------------------------- /src/rabbit_consumer_stress_sup.erl: -------------------------------------------------------------------------------- 1 | -module(rabbit_consumer_stress_sup). 2 | -behaviour(supervisor). 3 | 4 | -export([start_link/0]). 5 | -export([init/1]). 6 | 7 | start_link() -> 8 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 9 | 10 | init([]) -> 11 | Procs = [], 12 | {ok, {{one_for_one, 1, 5}, Procs}}. 13 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PROJECT = rabbit_stress 2 | PROJECT_DESCRIPTION = Rabbitmq consumer stress test 3 | PROJECT_VERSION = 0.0.1 4 | 5 | DEPS = amqp_client getopt gun 6 | 7 | dep_amqp_client_commit = stable 8 | 9 | include erlang.mk 10 | 11 | 12 | escript:: 13 | cp rabbit_stress rabbit_consumer_stress 14 | cp rabbit_stress rabbit_http_stress 15 | rm rabbit_stress -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Naive RabbitMQ stress test tool 2 | 3 | This repo contains two stress testing tools: 4 | - Process creation tool 5 | - HTTP API tool 6 | 7 | Process creation tool is used stress test creation of connections, channels, queues and consumers in rabbitmq broker. 8 | 9 | HTTP API tool is used to measure HTTP API requests performance. 10 | 11 | ## Process creation tool 12 | 13 | This tool uses erlang client to open connections, create channels, queues and consumers. 14 | 15 | It creates connections, channels for each connection, queues for each channel 16 | and consumers for each queue, waits for `sleep` milliseconds and then closes connections. 17 | 18 | The tool currently targets version `3.6.x` 19 | 20 | ## HTTP API tool 21 | 22 | This tool is used to measure response time of HTTP API requests, that run in parallel. 23 | 24 | It uses `gun` HTTP client to create parallel connections and execute requests. 25 | 26 | # Usage 27 | 28 | To build escript file run: 29 | 30 | ``` 31 | make escript 32 | ``` 33 | 34 | To show the tool usage: 35 | 36 | Process creation tool: 37 | 38 | ``` 39 | ./rabbit_consumer_stress -h 40 | ``` 41 | 42 | HTTP API tool: 43 | 44 | ``` 45 | ./rabbit_http_stress -h 46 | ``` 47 | 48 | Test output will contain execution time statistics for series of test runs. 49 | 50 | Memory report contains rabbitmq memory breakdown. 51 | 52 | 53 | -------------------------------------------------------------------------------- /src/rabbit_http_stress.erl: -------------------------------------------------------------------------------- 1 | -module(rabbit_http_stress). 2 | 3 | -export([main/1]). 4 | 5 | -define(OPTIONS, [ 6 | {host, $H, "url", {string, "localhost"}, "HTTP host to connect to."}, 7 | {port, $P, "port", {integer, 15672}, "Port to connect to."}, 8 | {total_requests, $r, "total_requests", {integer, 1000}, "Total number of requests for each target"}, 9 | {parallel, $p, "parallel", {integer, 5}, "Number of requests for each target to run in parallel"}, 10 | {report_memory, $M, "report_memory", {boolean, false}, "Report memory breakdown during run"}, 11 | {node, $n, "node", atom, "Node name of tested broker node. Required to report memory"}, 12 | {self_node_name, $N, "self_node_name", {atom, http_test}, "Name of the test node."} 13 | ]). 14 | 15 | 16 | main(["-h"]) -> 17 | getopt:usage(?OPTIONS, "rabbit_http_stress"); 18 | main(Args) -> 19 | case getopt:parse(?OPTIONS, Args) of 20 | {ok, {Options, []}} -> 21 | run_test(Options); 22 | {ok, {_, Invalid}} -> 23 | io:format("Invalid options ~p~n" 24 | "Run 'rabbit_http_stress -h' to see available options~n", 25 | [Invalid]) 26 | end. 27 | 28 | run_test(Options) -> 29 | Host = proplists:get_value(host, Options), 30 | Port = proplists:get_value(port, Options), 31 | Total = proplists:get_value(total_requests, Options), 32 | Parallel = proplists:get_value(parallel, Options), 33 | Node = proplists:get_value(node, Options), 34 | ReportMemory = proplists:get_value(report_memory, Options), 35 | SelfNode = proplists:get_value(self_node_name, Options), 36 | rabbit_stress:start_distribution(SelfNode), 37 | 38 | TestFun = fun() -> http_api_measure:start_test(Host, Port, Total, Parallel) end, 39 | case ReportMemory of 40 | true -> 41 | rabbit_stress:with_memory(Node, 5000, TestFun); 42 | false -> 43 | TestFun() 44 | end. 45 | 46 | -------------------------------------------------------------------------------- /src/http_api_measure.erl: -------------------------------------------------------------------------------- 1 | -module(http_api_measure). 2 | 3 | -compile(export_all). 4 | 5 | % Very long http timeout 6 | -define(TIMEOUT, 1000000). 7 | 8 | prepare() -> 9 | inets:start(), 10 | application:ensure_all_started(gun). 11 | 12 | start_test(Host, Port, Total, Parallel) -> 13 | prepare(), 14 | Requests = ["/api/queues", "/api/channels", "/api/connections"], 15 | RequestPlans = gen_request_plans(Host, Port, Total, Parallel, Requests), 16 | lists:foreach( 17 | fun({T, P, Q} = Plan) -> 18 | io:format("Executing ~p requests for query ~p with ~p parallel processes.~n", 19 | [T, Q, P]), 20 | Result = run_plan(Plan), 21 | format_report(Plan, Result) 22 | end, 23 | RequestPlans). 24 | 25 | format_report({Requests, _, _}, Result) -> 26 | Raw = [ round(Time) || Time <- Result ], 27 | Avg = lists:sum(Raw) / Requests, 28 | Sorted = lists:sort(Raw), 29 | Mean = lists:nth(round(0.5 * Requests), Sorted), 30 | Per75 = lists:nth(round(0.75 * Requests), Sorted), 31 | Per90 = lists:nth(round(0.9 * Requests), Sorted), 32 | Per95 = lists:nth(round(0.95 * Requests), Sorted), 33 | Max = lists:max(Sorted), 34 | Min = lists:min(Sorted), 35 | 36 | Stats = [{avg, Avg}, 37 | {mean, Mean}, 38 | {per75, Per75}, 39 | {per90, Per90}, 40 | {per95, Per95}, 41 | {max, Max}, 42 | {min, Min}], 43 | 44 | io:format("~nTime statistics ~p~n", [Stats]). 45 | 46 | gen_request_plans(Host, Port, Total, Parallel, Requests) -> 47 | [{Total, Parallel, {Host, Port, Req}} || Req <- Requests]. 48 | 49 | run_plan({Total, Parallel, Req}) -> 50 | Pid = self(), 51 | [ request_proc(Req, Pid) || _ <- lists:seq(1, Parallel) ], 52 | plan_loop(Total, 0, []). 53 | 54 | request_proc(Req, Pid) -> 55 | spawn_link(fun() -> 56 | request_loop(Req, Pid) 57 | end). 58 | 59 | request_loop({Host, Port, Path} = Req, Pid) -> 60 | Pid ! {request, self()}, 61 | receive 62 | next -> 63 | {ok, ConnPid} = gun:open(Host, Port), 64 | gun:await_up(ConnPid), 65 | {Time, _} = timer:tc(fun() -> 66 | StreamRef = gun:get(ConnPid, Path, [{<<"authorization">>, "Basic Z3Vlc3Q6Z3Vlc3Q="}]), 67 | case gun:await(ConnPid, StreamRef, ?TIMEOUT) of 68 | {response, fin, _Status, _Headers} -> 69 | no_data; 70 | {response, nofin, _Status, _Headers} -> 71 | {ok, _} = gun:await_body(ConnPid, StreamRef, ?TIMEOUT) 72 | end, 73 | gun:flush(StreamRef), 74 | gun:cancel(ConnPid, StreamRef) 75 | end), 76 | gun:shutdown(ConnPid), 77 | Pid ! {done, Time}, 78 | io:format("."), 79 | request_loop(Req, Pid); 80 | done -> 81 | ok 82 | end. 83 | 84 | plan_loop(0, 0, Done) -> 85 | receive 86 | {request, Pid} -> 87 | Pid ! done, 88 | plan_loop(0, 0, Done) 89 | after 100 -> 90 | Done 91 | end; 92 | plan_loop(Planned, Waiting, Done) -> 93 | receive 94 | {done, Time} -> 95 | plan_loop(Planned, Waiting - 1, [Time | Done]); 96 | {request, Pid} -> 97 | case Planned of 98 | 0 -> 99 | Pid ! done, 100 | plan_loop(Planned, Waiting, Done); 101 | _ -> 102 | Pid ! next, 103 | plan_loop(Planned - 1, Waiting + 1, Done) 104 | end 105 | end. -------------------------------------------------------------------------------- /src/rabbit_consumer_stress.erl: -------------------------------------------------------------------------------- 1 | -module(rabbit_consumer_stress). 2 | 3 | -export([main/1]). 4 | 5 | -define(OPTIONS, 6 | [ 7 | {type, $t, "type", {atom, network}, "Connection type."}, 8 | {runs, $r, "runs", {integer, 10}, "Number of test runs."}, 9 | {mode, $m, "mode", {atom, sync}, "Run mode. sync - run sequentially, async - run in parallel"}, 10 | {interval, $i, "interval", {integer, 2000}, "Interval between starting parallel runs."}, 11 | {connections, $c, "connections", {integer, 10}, "Number of connections"}, 12 | {channels, $C, "channels", {integer, 10}, "Number of channels per connection"}, 13 | {queues, $q, "queues", {integer, 10}, "Number of queues per channel"}, 14 | {consumers, $k, "consumers", {integer, 2}, "Number of consumers per queue"}, 15 | {sleep, $s, "sleep", {integer, 2000}, "Time to keep connection alive"}, 16 | {node, $n, "node", atom, "Node name of tested broker node."}, 17 | {report_memory, $M, "report_memory", {boolean, false}, "Report memory breakdown during run"}, 18 | {self_node_name, $N, "self_node_name", {atom, undefined}, "Name of the test node."}, 19 | {host, $H, "host", {string, "localhost"}, "Host to connect to"}, 20 | {port, $P, "port", {integer, 5672}, "Port to connect to"}, 21 | {producers, $p, "producers", {integer, 1}, "Number of producers"}, 22 | {publish_interval, $I, "publish_interval", {integer, 500}, "Interval to wait between publishes"}, 23 | {consume_interval, $K, "consume_interval", {integer, 500}, "Interval to wait between consumes"}, 24 | {verbose, $V, "verbose", {boolean, false}, "Verbose memory report"}, 25 | {report_memory_interval, $R, "report_memory_interval", {integer, 2000}, "Memory report interval"} 26 | ]). 27 | 28 | main(["-h"]) -> 29 | getopt:usage(?OPTIONS, "rabbit_consumer_stress"); 30 | main(Args) -> 31 | case getopt:parse(?OPTIONS, Args) of 32 | {ok, {Options, []}} -> 33 | run_test(Options); 34 | {ok, {_, Invalid}} -> 35 | io:format("Invalid options ~p~n" 36 | "Run 'rabbit_consumer_stress -h' to see available options~n", 37 | [Invalid]) 38 | end. 39 | 40 | run_test(Options) -> 41 | Type = proplists:get_value(type, Options), 42 | Runs = proplists:get_value(runs, Options), 43 | SyncMode = proplists:get_value(mode, Options), 44 | Interval = proplists:get_value(interval, Options), 45 | Connections = proplists:get_value(connections, Options), 46 | Channels = proplists:get_value(channels, Options), 47 | Queues = proplists:get_value(queues, Options), 48 | Consumers = proplists:get_value(consumers, Options), 49 | Sleep = proplists:get_value(sleep, Options), 50 | Node = proplists:get_value(node, Options), 51 | ReportMemory = proplists:get_value(report_memory, Options), 52 | SelfNodeName = proplists:get_value(self_node_name, Options), 53 | Host = proplists:get_value(host, Options), 54 | Port = proplists:get_value(port, Options), 55 | Producers = proplists:get_value(producers, Options), 56 | PublishInterval = proplists:get_value(publish_interval, Options), 57 | ConsumeInterval = proplists:get_value(consume_interval, Options), 58 | Verbose = proplists:get_value(verbose, Options), 59 | MemInterval = proplists:get_value(report_memory_interval, Options), 60 | rabbit_stress:start_distribution(SelfNodeName), 61 | case {Type, Node} of 62 | {direct, undefined} -> 63 | io:format("~nDirect connections require node to be specified!~n~n"), 64 | getopt:usage(?OPTIONS, "rabbit_consumer_stress"); 65 | _ -> 66 | TestFun = fun() -> 67 | rabbit_churn:with_stats( 68 | #{ 69 | node => Node, 70 | runs => Runs, 71 | interval => Interval, 72 | sleep => Sleep, 73 | sync_mode => SyncMode, 74 | connection_type => Type, 75 | connections => Connections, 76 | channels => Channels, 77 | queues => Queues, 78 | consumers => Consumers, 79 | host => Host, 80 | port => Port, 81 | producers => Producers, 82 | publish_interval => PublishInterval, 83 | consume_interval => ConsumeInterval 84 | }) 85 | end, 86 | case ReportMemory of 87 | true -> 88 | rabbit_stress:with_memory(Node, MemInterval, TestFun, Verbose); 89 | false -> 90 | TestFun() 91 | end 92 | end. 93 | 94 | 95 | 96 | -------------------------------------------------------------------------------- /src/rabbit_stress.erl: -------------------------------------------------------------------------------- 1 | -module(rabbit_stress). 2 | 3 | -export([main/1, start_distribution/1]). 4 | -export([with_memory/4, report_memory/3]). 5 | 6 | main(Args) -> 7 | case script_name() of 8 | "rabbit_consumer_stress" -> 9 | rabbit_consumer_stress:main(Args); 10 | "rabbit_http_stress" -> 11 | rabbit_http_stress:main(Args); 12 | Other -> 13 | io:format("Unknown sctipt ~p~n", [Other]) 14 | end. 15 | 16 | script_name() -> 17 | filename:basename(escript:script_name(), ".escript"). 18 | 19 | start_distribution(undefined) -> 20 | Candidate = "stress_test_" ++ integer_to_list(rand:uniform(100000)), 21 | case start_distribution(list_to_atom(Candidate)) of 22 | {ok, _} = OK -> OK; 23 | _ -> start_distribution(undefined) 24 | end; 25 | start_distribution(NodeName) -> 26 | net_kernel:start([NodeName, shortnames]). 27 | 28 | with_memory(undefined, _, TestFun, _) -> TestFun(); 29 | with_memory(Node, Time, TestFun, Verbose) -> 30 | io:format("Reporting memory from node ~p~n~n", [Node]), 31 | MemCollector = memory_collector(), 32 | {ok, MemInterval} = timer:apply_interval( 33 | Time, rabbit_stress, report_memory, [Node, MemCollector, Verbose]), 34 | Result = TestFun(), 35 | {ok, cancel} = timer:cancel(MemInterval), 36 | report_memory_collector(MemCollector, Verbose), 37 | Result. 38 | 39 | report_memory(Node, MemCollector, Verbose) -> 40 | memory_report(Node, MemCollector, Verbose), 41 | % io:format("ETS_TABLES:~n"), 42 | % ets_report(Node), 43 | ok. 44 | 45 | % ets_report(Node) -> 46 | % [io:format("~p:" ++ 47 | % [" " || _ <- lists:seq(1, 50 - length(atom_to_list(K)))] ++ 48 | % " ~.2f Mb~n", [K, V/1000000]) 49 | % || {K,V} <- rpc:call(Node, rabbit_vm, ets_tables_memory, [[all]])]. 50 | 51 | memory_report(Node, MemCollector, Verbose) -> 52 | Mem = rpc:call(Node, rabbit_vm,memory, []), 53 | MemCollector ! {memory, Mem}, 54 | case Verbose of 55 | true -> 56 | io:format("MEMORY: ~n"), 57 | [io:format("~p:" ++ 58 | [" " || _ <- lists:seq(1, 22 - length(atom_to_list(K)))] ++ 59 | " ~.2f Mb~n", [K, V/1000000]) 60 | || {K,V} <- Mem]; 61 | false -> ok 62 | end. 63 | 64 | memory_collector() -> 65 | spawn_link(fun() -> memory_collector([]) end). 66 | 67 | report_memory_collector(MemCollector, Verbose) -> 68 | MemCollector ! {report, self()}, 69 | receive {memory_report, Mems} -> 70 | Aggr = aggregate_memory_collection(Mems), 71 | case Verbose of 72 | true -> io:format("Memory stats ~p~n", [Aggr]); 73 | false -> ok 74 | end, 75 | lists:foreach( 76 | fun(Item) -> 77 | Breakdown = [{K, proplists:get_value(Item, V)} || {K, V} <- Aggr], 78 | io:format("~s:~n", [Item]), 79 | print_breakdown(Breakdown, length(atom_to_list(Item)) + 1) 80 | end, 81 | [min, max, avg]) 82 | after 1000 -> error(no_memory_report_from_collector) 83 | end. 84 | 85 | aggregate_memory_collection([]) -> 86 | io:format("No memory reports ~n"); 87 | aggregate_memory_collection([Mem | Rest]) -> 88 | {Columns, FirstRow} = lists:unzip(Mem), 89 | Rows = [ V || M <- Rest, {_, V} <- [lists:unzip(M)] ], 90 | Stats = merge_rows(Columns, [FirstRow | Rows], []), 91 | lists:map( 92 | fun({Col, Vals}) -> 93 | {Col, aggregate(Vals)} 94 | end, 95 | Stats). 96 | 97 | print_breakdown(Breakdown, Skip) -> 98 | [io:format(lists:duplicate(Skip, $ ) ++ "~p:" ++ 99 | [" " || _ <- lists:seq(1, 22 - length(atom_to_list(K)))] ++ 100 | " ~.2f Mb~n", [K, V/1000000]) 101 | || {K,V} <- Breakdown]. 102 | 103 | merge_rows([], _, Acc) -> Acc; 104 | merge_rows([Column | Columns], Rows, Acc) -> 105 | {Heads, Tails} = lists:unzip([ {H, T} || [H | T] <- Rows ]), 106 | merge_rows(Columns, Tails, [{Column, Heads} | Acc]). 107 | 108 | aggregate(Vals) -> 109 | Length = length(Vals), 110 | Avg = lists:sum(Vals) / Length, 111 | Sorted = lists:sort(Vals), 112 | 113 | Mean = lists:nth(round(0.5 * Length), Sorted), 114 | Per75 = lists:nth(round(0.75 * Length), Sorted), 115 | Per90 = lists:nth(round(0.9 * Length), Sorted), 116 | Per95 = lists:nth(round(0.95 * Length), Sorted), 117 | Max = lists:max(Sorted), 118 | Min = lists:min(Sorted), 119 | 120 | [{avg, Avg}, 121 | {mean, Mean}, 122 | {per75, Per75}, 123 | {per90, Per90}, 124 | {per95, Per95}, 125 | {max, Max}, 126 | {min, Min}]. 127 | 128 | memory_collector(Acc) -> 129 | receive 130 | {memory, Mem} -> memory_collector([Mem | Acc]); 131 | {report, Pid} -> Pid ! {memory_report, Acc} 132 | end. 133 | 134 | -------------------------------------------------------------------------------- /src/rabbit_churn.erl: -------------------------------------------------------------------------------- 1 | -module(rabbit_churn). 2 | -include_lib("amqp_client/include/amqp_client.hrl"). 3 | -compile(export_all). 4 | 5 | open_n_connections(Count, Params) -> 6 | n_items(Count, 7 | fun() -> 8 | {ok, C} = amqp_connection:start(Params), 9 | C 10 | end). 11 | 12 | open_n_channels(Count, Connection) -> 13 | n_items(Count, 14 | fun() -> 15 | {ok, Ch} = amqp_connection:open_channel(Connection), 16 | amqp_channel:call(Ch, #'basic.qos'{prefetch_count = 1}), 17 | Ch 18 | end). 19 | 20 | create_n_queues(Count, Channel) -> 21 | n_items(Count, 22 | fun() -> 23 | QueueName = generate_queue_name(Channel), 24 | #'queue.declare_ok'{queue = QueueName} = 25 | amqp_channel:call(Channel, #'queue.declare'{queue = QueueName}), 26 | QueueName 27 | end). 28 | 29 | create_n_consumers(Count, Channel, QueueName, ConsumeInterval) -> 30 | n_items(Count, 31 | fun() -> 32 | Pid = spawn_link(fun() -> consumer_loop(Channel, ConsumeInterval) end), 33 | 34 | #'basic.consume_ok'{consumer_tag = Tag} = 35 | amqp_channel:subscribe(Channel, 36 | #'basic.consume'{queue = QueueName}, 37 | Pid), 38 | Tag 39 | end). 40 | 41 | consumer_loop(Channel, ConsumeInterval) -> 42 | timer:sleep(ConsumeInterval), 43 | receive 44 | #'basic.consume_ok'{} -> 45 | consumer_loop(Channel, ConsumeInterval); 46 | #'basic.cancel_ok'{} -> 47 | ok; 48 | {#'basic.deliver'{delivery_tag = DTag}, _Content} -> 49 | amqp_channel:call(Channel, #'basic.ack'{delivery_tag = DTag}), 50 | consumer_loop(Channel, ConsumeInterval) 51 | end. 52 | 53 | close_consumers(ConsumerTags, Channel) -> 54 | lists:map( 55 | fun(Tag) -> 56 | amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = Tag}) 57 | end, 58 | ConsumerTags). 59 | 60 | delete_queues(Queues, Channel) -> 61 | lists:map( 62 | fun(Q) -> 63 | amqp_channel:call(Channel, #'queue.delete'{queue = Q}) 64 | end, 65 | Queues). 66 | 67 | close_connections(Connections) -> 68 | lists:map( 69 | fun(Conn) -> 70 | amqp_connection:close(Conn) 71 | end, 72 | Connections). 73 | 74 | generate_queue_name(_Channel) -> 75 | list_to_binary("queue" ++ 76 | integer_to_list(rand:uniform(10000))). 77 | 78 | with_stats(#{runs := Runs, 79 | interval := Interval, 80 | sleep := Sleep, 81 | sync_mode := SyncMode, 82 | connection_type := ConnectionType, 83 | connections := Connections, 84 | channels := Channels, 85 | queues := Queues, 86 | consumers := Consumers } = Config) -> 87 | SyncMsg = case SyncMode of 88 | sync -> "sequentially"; 89 | async -> "in parallel" 90 | end, 91 | io:format( 92 | "START TEST For ~p runs ~s with ~p ms interval~n" 93 | "Starting ~p connections x ~p channels x ~p queues x ~p consumers~n" 94 | "waiting for ~p for each run~n" 95 | "Connection type: ~p~n", 96 | [Runs, SyncMsg, Interval, 97 | Connections, Channels, Queues, Consumers, 98 | Sleep, 99 | ConnectionType]), 100 | 101 | Raw = start_test(Config), 102 | io:format("END TEST~n"), 103 | 104 | Avg = lists:sum(Raw) / Runs, 105 | Sorted = lists:sort(Raw), 106 | 107 | Mean = lists:nth(round(0.5 * Runs), Sorted), 108 | Per75 = lists:nth(round(0.75 * Runs), Sorted), 109 | Per90 = lists:nth(round(0.9 * Runs), Sorted), 110 | Per95 = lists:nth(round(0.95 * Runs), Sorted), 111 | Max = lists:max(Sorted), 112 | Min = lists:min(Sorted), 113 | 114 | Stats = [{avg, Avg}, 115 | {mean, Mean}, 116 | {per75, Per75}, 117 | {per90, Per90}, 118 | {per95, Per95}, 119 | {max, Max}, 120 | {min, Min}], 121 | io:format("STATS ~p~n", [Stats]). 122 | 123 | start_test(#{node := Node, 124 | runs := Runs, 125 | interval := Interval, 126 | sleep := Sleep, 127 | sync_mode := SyncMode, 128 | connection_type := ConnectionType, 129 | connections := NConnections, 130 | channels := NChannels, 131 | queues := NQueues, 132 | consumers := NConsumers, 133 | producers := NProducers, 134 | publish_interval := PublishInterval, 135 | consume_interval := ConsumeInterval, 136 | host := Host, 137 | port := Port }) -> 138 | ConnectionParams = case ConnectionType of 139 | direct -> #amqp_params_direct{node = Node}; 140 | network -> #amqp_params_network{host = Host, port = Port} 141 | end, 142 | Pids = n_items(Runs, 143 | fun(Index) -> 144 | Fun = fun() -> 145 | Conns = open_n_connections(NConnections, ConnectionParams), 146 | ConnChannels = lists:map( 147 | fun(Conn) -> 148 | Channels = open_n_channels(NChannels, Conn), 149 | ChannelQueues = lists:map( 150 | fun(Chan) -> 151 | Queues = create_n_queues(NQueues, Chan), 152 | Conss = lists:map( 153 | fun(Q) -> 154 | Producers = create_n_producers(NProducers, Chan, Q, PublishInterval), 155 | Consumers = create_n_consumers(NConsumers, Chan, Q, ConsumeInterval), 156 | {Producers, Consumers} 157 | end, 158 | Queues), 159 | {Chan, Queues, Conss} 160 | end, 161 | Channels), 162 | ChannelQueues 163 | end, 164 | Conns), 165 | timer:sleep(Sleep), 166 | 167 | lists:map( 168 | fun(ChannelQueues) -> 169 | lists:map( 170 | fun({Chan, Queues, Conss}) -> 171 | lists:map( 172 | fun({Prod, Cons}) -> 173 | stop_producers(Prod), 174 | close_consumers(select_some(Cons), Chan) 175 | end, 176 | Conss), 177 | 178 | delete_queues(Queues, Chan) 179 | end, 180 | ChannelQueues), 181 | lists:map( 182 | fun({Chan, _, _}) -> 183 | amqp_channel:close(Chan) 184 | end, 185 | ChannelQueues) 186 | end, 187 | ConnChannels), 188 | close_connections(Conns) 189 | end, 190 | Self = self(), 191 | case SyncMode of 192 | async -> 193 | Pid = spawn_link(fun() -> 194 | Self ! {self(), timer:tc(Fun)}, 195 | io:format(".~p.", [Index]) 196 | end), 197 | timer:sleep(Interval), 198 | Pid; 199 | sync -> 200 | Self ! {none, timer:tc(Fun)}, 201 | io:format("..~p..", [Index]), 202 | none 203 | end 204 | end), 205 | io:format("~n"), 206 | ConstTime = Sleep * 1000, 207 | lists:map( 208 | fun(Pid) -> 209 | receive {Pid, {Time, _}} -> 210 | Time - ConstTime 211 | after 1000000 -> 212 | exit(timeout_waiting_for_test) 213 | end 214 | end, 215 | Pids). 216 | 217 | create_n_producers(N, Chan, Queue, Interval) -> 218 | n_items(N, fun() -> 219 | spawn_link(fun() -> 220 | send_message(Chan, Queue), 221 | producer_loop(Chan, Queue, Interval) 222 | end) 223 | end). 224 | 225 | producer_loop(Chan, Queue, Interval) -> 226 | receive 227 | stop -> ok 228 | after Interval -> 229 | send_message(Chan, Queue), 230 | producer_loop(Chan, Queue, Interval) 231 | end. 232 | 233 | send_message(Chan, Queue) -> 234 | Msg = list_to_binary("message" ++ erlang:ref_to_list(make_ref())), 235 | amqp_channel:call(Chan, 236 | #'basic.publish'{routing_key = Queue}, 237 | #amqp_msg{payload = Msg}). 238 | 239 | stop_producers(Prods) -> 240 | lists:map(fun(P) -> P ! stop end, Prods). 241 | 242 | n_items(Count, Fn) -> 243 | lists:map(fun(Index) -> 244 | case erlang:fun_info(Fn, arity) of 245 | {arity, 0} -> Fn(); 246 | {arity, 1} -> Fn(Index) 247 | end 248 | end, 249 | lists:seq(1, Count)). 250 | 251 | select_some(List) -> 252 | lists:filter(fun(_) -> rand:uniform() > 0.5 end, List). 253 | 254 | 255 | --------------------------------------------------------------------------------