├── .gitignore
├── .travis.yml
├── Dockerfile
├── aeron-cluster-poc-benchmarks
├── Dockerfile
├── pom.xml
└── src
│ └── main
│ └── java
│ └── io
│ └── scalecube
│ └── acpoc
│ └── benchmarks
│ ├── ClusterClient.java
│ ├── ClusterConstants.java
│ ├── ClusterJmhRunner.java
│ ├── ClusterLatencyBenchmark.java
│ ├── ClusterMember.java
│ ├── ClusterNode.java
│ ├── ClusterRoundTripBenchmark.java
│ ├── ClusterThroughputBenchmark.java
│ ├── ClusteredServiceAddressing.java
│ ├── ClusteredServiceImpl.java
│ ├── Runners.java
│ ├── SenderReceiverAgentRunner.java
│ └── report
│ ├── latency
│ ├── CompositeReportingLatencyListener.java
│ ├── ConsoleReportingLatencyListener.java
│ ├── CsvReportingLatencyListener.java
│ ├── LatencyListener.java
│ └── LatencyReporter.java
│ └── throughput
│ ├── CompositeThroughputListener.java
│ ├── CsvReportingThroughputListener.java
│ ├── ThroughputListener.java
│ └── ThroughputReporter.java
├── aeron-cluster-poc-client
└── pom.xml
├── aeron-cluster-poc-examples
├── README.md
├── pom.xml
├── scripts
│ ├── backup-node.sh
│ ├── benchmarks
│ │ ├── client-ping.sh
│ │ ├── client-to-single-node-ping.sh
│ │ ├── node-pong-0.sh
│ │ ├── node-pong-1.sh
│ │ ├── node-pong-2.sh
│ │ └── single-node-pong.sh
│ ├── client-interactive.sh
│ ├── client.sh
│ ├── d-node-3.sh
│ ├── d-node-4.sh
│ ├── d-node-5.sh
│ ├── docker
│ │ ├── client-interactive.sh
│ │ ├── keep-alive-client-interactive.sh
│ │ ├── node-0.sh
│ │ ├── node-1.sh
│ │ └── node-2.sh
│ ├── node-0-single.sh
│ ├── node-0.sh
│ ├── node-1.sh
│ └── node-2.sh
└── src
│ └── main
│ ├── java
│ └── io
│ │ └── scalecube
│ │ └── acpoc
│ │ ├── ArgsPrinter.java
│ │ ├── ClusterBackupEventsListenerImpl.java
│ │ ├── ClusterBackupRunner.java
│ │ ├── ClusterClientRunner.java
│ │ ├── ClusteredServiceImpl.java
│ │ ├── ClusteredServiceRunner.java
│ │ ├── Configurations.java
│ │ ├── EgressListenerImpl.java
│ │ ├── InteractiveClient.java
│ │ ├── Utils.java
│ │ └── benchmarks
│ │ ├── BenchmarkClusteredService.java
│ │ ├── BenchmarkConfigurations.java
│ │ ├── ClusterClientPing.java
│ │ ├── ClusteredServiceRunner.java
│ │ └── LatencyReporter.java
│ └── resources
│ └── log4j2.xml
├── docker-compose.yml
├── pom.xml
├── profiler
├── aeron-stat.sh
├── async-profiler.jar
├── jattach
├── libasyncProfiler.so
├── profiler.sh
└── samples.jar
├── requirements.txt
├── src
└── main
│ └── scripts
│ ├── cd
│ ├── before-deploy.sh
│ ├── deploy.sh
│ ├── external_build.sh
│ ├── release.sh
│ └── secrets.tar.enc
│ └── ci
│ ├── after-success.sh
│ └── before-install.sh
└── travis-settings.xml
/.gitignore:
--------------------------------------------------------------------------------
1 | # everything that starts with dot (hidden files)
2 | .*
3 | # except this file
4 | !.gitignore
5 | # except this file-extention
6 | !.*.yml
7 |
8 | # Build targets
9 | **/target/
10 |
11 | # logs and reports
12 | *.csv
13 | *.log
14 | *.zip
15 |
16 | # IntelliJ IDEA project files and directories
17 | *.iml
18 |
19 | **/pom.xml.releaseBackup
20 | /release.properties
21 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | addons:
2 | apt:
3 | packages:
4 | - libxml-xpath-perl
5 | sudo: required
6 | language: java
7 | jdk: openjdk11
8 | before_install:
9 | - "./src/main/scripts/ci/before-install.sh"
10 | - "./src/main/scripts/cd/before-deploy.sh"
11 | script: "mvn verify -B"
12 | after_success: "./src/main/scripts/ci/after-success.sh"
13 | deploy:
14 | - provider: script
15 | script:
16 | - "./src/main/scripts/cd/deploy.sh"
17 | on:
18 | branch: develop
19 | - provider: script
20 | script:
21 | - "./src/main/scripts/cd/release.sh"
22 | on:
23 | branch: master
24 | - provider: script
25 | script:
26 | - "./src/main/scripts/cd/release.sh"
27 | on:
28 | tags: true
29 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM openjdk:11
2 |
3 | RUN apt-get update
4 | RUN apt-get -y install vim
5 |
6 | COPY aeron-cluster-poc-examples/target /app/target
7 | COPY aeron-cluster-poc-examples/scripts/docker /app/scripts
8 |
9 | WORKDIR /app
10 |
11 | ENV NUMBER="0"
12 |
13 | RUN ["chmod", "+x", "scripts/node-0.sh"]
14 | RUN ["chmod", "+x", "scripts/node-1.sh"]
15 | RUN ["chmod", "+x", "scripts/node-2.sh"]
16 | CMD ["/bin/sh", "-c", "scripts/node-${NUMBER}.sh"]
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM openjdk:11
2 |
3 | ARG SERVICE_NAME
4 | ARG EXECUTABLE_JAR
5 | ENV DUMPS_DIR /opt/exchange/dumps
6 | ENV SERVICE_NAME ${SERVICE_NAME}
7 |
8 | WORKDIR /opt/exchange
9 |
10 | RUN mkdir -p $DUMPS_DIR
11 |
12 | ENV DEFAULT_JAVA_OPTS="-server \
13 | -XX:+DisableExplicitGC \
14 | -Dsun.rmi.dgc.client.gcInterval=3600000 \
15 | -Dsun.rmi.dgc.server.gcInterval=3600000"
16 |
17 | ENV DEFAULT_OOM_OPTS="-XX:+HeapDumpOnOutOfMemoryError \
18 | -XX:HeapDumpPath=$DUMPS_DIR/$SERVICE_NAME-oom.hprof \
19 | -XX:+UseGCOverheadLimit"
20 |
21 | COPY target/lib lib
22 | COPY target/${EXECUTABLE_JAR}.jar benchmarks.jar
23 |
24 | # jmx server port
25 | EXPOSE 5678
26 |
27 | CMD exec java \
28 | $DEFAULT_JAVA_OPTS \
29 | $JAVA_OPTS \
30 | $DEFAULT_OOM_OPTS \
31 | $OOM_OPTS \
32 | -cp benchmarks.jar $PROGRAM_ARGS
33 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 | aeron-cluster-poc-parent
7 | io.scalecube
8 | 0.0.2-SNAPSHOT
9 |
10 | 4.0.0
11 |
12 | aeron-cluster-poc-benchmarks
13 |
14 |
15 |
16 | org.openjdk.jmh
17 | jmh-core
18 |
19 |
20 | org.openjdk.jmh
21 | jmh-generator-annprocess
22 |
23 |
24 |
25 | io.aeron
26 | aeron-driver
27 |
28 |
29 | io.aeron
30 | aeron-client
31 |
32 |
33 | io.aeron
34 | aeron-samples
35 |
36 |
37 | io.aeron
38 | aeron-cluster
39 |
40 |
41 |
42 | com.fasterxml.jackson.core
43 | jackson-core
44 |
45 |
46 | com.fasterxml.jackson.core
47 | jackson-annotations
48 |
49 |
50 | com.fasterxml.jackson.core
51 | jackson-databind
52 |
53 |
54 | com.fasterxml.jackson.datatype
55 | jackson-datatype-jsr310
56 |
57 |
58 |
59 | io.scalecube
60 | scalecube-commons
61 |
62 |
63 | io.scalecube
64 | scalecube-services-discovery
65 |
66 |
67 |
68 | com.opencsv
69 | opencsv
70 |
71 |
72 |
73 | org.hdrhistogram
74 | HdrHistogram
75 |
76 |
77 | io.scalecube
78 | trace-reporter
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 | org.codehaus.mojo
87 | build-helper-maven-plugin
88 | 3.0.0
89 |
90 |
91 | add-source
92 | generate-sources
93 |
94 | add-source
95 |
96 |
97 |
98 | ${project.build.directory}/generated-sources/java/
99 |
100 |
101 |
102 |
103 |
104 |
105 | org.apache.maven.plugins
106 | maven-compiler-plugin
107 |
108 |
109 | org.apache.maven.plugins
110 | maven-surefire-plugin
111 |
112 |
113 | maven-jar-plugin
114 |
115 |
116 | maven-dependency-plugin
117 |
118 |
119 | com.spotify
120 | dockerfile-maven-plugin
121 |
122 |
123 |
124 |
125 |
126 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/ClusterClient.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import io.aeron.CommonContext;
4 | import io.aeron.cluster.client.AeronCluster;
5 | import io.aeron.cluster.client.AeronCluster.AsyncConnect;
6 | import io.aeron.cluster.client.EgressListener;
7 | import io.aeron.driver.MediaDriver;
8 | import io.aeron.driver.MediaDriver.Context;
9 | import io.scalecube.net.Address;
10 | import io.scalecube.services.ServiceEndpoint;
11 | import io.scalecube.services.discovery.ScalecubeServiceDiscovery;
12 | import io.scalecube.services.discovery.api.ServiceDiscovery;
13 | import java.nio.file.Paths;
14 | import java.time.Duration;
15 | import java.time.LocalDateTime;
16 | import java.util.Collections;
17 | import java.util.List;
18 | import java.util.UUID;
19 | import java.util.concurrent.TimeUnit;
20 | import java.util.function.Supplier;
21 | import org.agrona.CloseHelper;
22 | import org.agrona.concurrent.SleepingIdleStrategy;
23 |
24 | public class ClusterClient implements AutoCloseable {
25 |
26 | private MediaDriver clusterClientMediaDriver;
27 | private AeronCluster clusterClient;
28 |
29 | public static ClusterClient launch(EgressListener egressListener) {
30 | return new ClusterClient(egressListener);
31 | }
32 |
33 | private ClusterClient(EgressListener egressListener) {
34 | try {
35 | start(egressListener);
36 | } catch (Throwable th) {
37 | close();
38 | throw th;
39 | }
40 | }
41 |
42 | private void start(EgressListener egressListener) {
43 | String clientId = "client-benchmark-" + UUID.randomUUID();
44 | String clientDirName =
45 | Paths.get(CommonContext.getAeronDirectoryName(), "aeron", "cluster", clientId).toString();
46 | System.out.println("Cluster client directory: " + clientDirName);
47 |
48 | clusterClientMediaDriver =
49 | MediaDriver.launch(
50 | new Context()
51 | .aeronDirectoryName(clientDirName)
52 | .warnIfDirectoryExists(true)
53 | .dirDeleteOnStart(true)
54 | .dirDeleteOnShutdown(true)
55 | .printConfigurationOnStart(true)
56 | .errorHandler(Throwable::printStackTrace));
57 |
58 | List
endpoints =
59 | Collections.singletonList(Address.create(Runners.HOST_ADDRESS, Runners.NODE_BASE_PORT));
60 | if (Runners.CONNECT_VIA_SEED) {
61 | ServiceDiscovery serviceDiscovery =
62 | new ScalecubeServiceDiscovery(
63 | ServiceEndpoint.builder()
64 | .id(UUID.randomUUID().toString())
65 | .address(Address.create(Runners.HOST_ADDRESS, 0))
66 | .build())
67 | .options(
68 | options -> options.membership(cfg -> cfg.seedMembers(Runners.seedMembers())));
69 | endpoints = ClusterMember.endpoints(serviceDiscovery).block(Duration.ofMinutes(10));
70 | }
71 | String clusterMemberEndpoints = ClusterMember.toClusterMemberEndpoints(endpoints);
72 | System.out.println(
73 | LocalDateTime.now() + " [client] clusterMemberEndpoints = " + clusterMemberEndpoints);
74 |
75 | clusterClient =
76 | connect(
77 | () ->
78 | new AeronCluster.Context()
79 | .messageTimeoutNs(TimeUnit.SECONDS.toNanos(60))
80 | .clusterMemberEndpoints(clusterMemberEndpoints)
81 | .ingressChannel(ClusterMember.ingressChannel(Runners.CLIENT_BASE_PORT))
82 | .egressChannel(ClusterMember.egressChannel(Runners.CLIENT_BASE_PORT))
83 | .egressListener(egressListener)
84 | .aeronDirectoryName(clusterClientMediaDriver.aeronDirectoryName())
85 | .errorHandler(Throwable::printStackTrace));
86 |
87 | System.out.println(LocalDateTime.now() + " [client] Connected to " + clusterMemberEndpoints);
88 | }
89 |
90 | public AeronCluster client() {
91 | return clusterClient;
92 | }
93 |
94 | @Override
95 | public void close() {
96 | CloseHelper.quietClose(clusterClient);
97 | CloseHelper.quietClose(clusterClientMediaDriver);
98 | }
99 |
100 | private AeronCluster connect(Supplier contextSupplier) {
101 | AsyncConnect asyncConnect = AeronCluster.asyncConnect(contextSupplier.get().clone());
102 | SleepingIdleStrategy idleStrategy =
103 | new SleepingIdleStrategy(TimeUnit.MILLISECONDS.toNanos(200));
104 | while (true) {
105 | try {
106 | AeronCluster aeronCluster = asyncConnect.poll();
107 | if (aeronCluster != null) {
108 | return aeronCluster;
109 | }
110 | } catch (Throwable th) {
111 | CloseHelper.quietClose(asyncConnect);
112 | System.err.println(LocalDateTime.now() + " " + th.getMessage());
113 | asyncConnect = AeronCluster.asyncConnect(contextSupplier.get().clone());
114 | }
115 | System.out.println(LocalDateTime.now() + " waiting to connect");
116 | idleStrategy.idle();
117 | }
118 | }
119 | }
120 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/ClusterConstants.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | public class ClusterConstants {
4 |
5 | public static final int LOG_CHANNEL_PORT_OFFSET = 1;
6 | public static final int ARCHIVE_CONTROL_REQUEST_CHANNEL_PORT_OFFSET = 2;
7 | public static final int ARCHIVE_CONTROL_RESPONSE_CHANNEL_PORT_OFFSET = 3;
8 | public static final int ARCHIVE_RECORDING_EVENTS_CHANNEL_PORT_OFFSET = 4;
9 | public static final int SERVICE_CLIENT_FACING_PORT_OFFSET = 5;
10 | public static final int MEMBER_FACTING_PORT_OFFSET = 6;
11 | public static final int LOG_PORT_OFFSET = 7;
12 | public static final int TRANSFER_PORT_OFFSET = 8;
13 | public static final int INGRESS_CHANNEL_PORT_OFFSET = 9;
14 | public static final int EGRESS_CHANNEL_PORT_OFFSET = 10;
15 | public static final int SNAPSHOT_CHANNEL_PORT_OFFSET = 11;
16 | public static final int ORDER_EVENTS_CHANNEL_PORT_OFFSET = 12;
17 |
18 | public static final int ARCHIVE_CONTROL_REQUEST_STREAM_ID_OFFSET = 100;
19 | public static final int ARCHIVE_CONTROL_RESPONSE_STREAM_ID_OFFSET = 110;
20 |
21 | public static final int SNAPSHOT_STREAM_ID = 120;
22 | public static final int ORDER_EVENTS_STREAM_ID = 130;
23 |
24 | private ClusterConstants() {
25 | // Do not instantiate
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/ClusterJmhRunner.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import org.openjdk.jmh.runner.Runner;
4 | import org.openjdk.jmh.runner.RunnerException;
5 | import org.openjdk.jmh.runner.options.Options;
6 | import org.openjdk.jmh.runner.options.OptionsBuilder;
7 | import org.openjdk.jmh.runner.options.VerboseMode;
8 |
9 | public class ClusterJmhRunner {
10 |
11 | /**
12 | * Main method.
13 | *
14 | * @param args args
15 | * @throws RunnerException runner exception
16 | */
17 | public static void main(String[] args) throws RunnerException {
18 | OptionsBuilder optionsBuilder = new OptionsBuilder();
19 | if (Runners.asyncProfilerEnabled()) {
20 | optionsBuilder.jvmArgsPrepend(Runners.asyncProfilerAgentString(ClusterJmhRunner.class));
21 | }
22 | Options options =
23 | optionsBuilder
24 | .forks(Runners.forks())
25 | .jvmArgsAppend(Runners.jvmArgs())
26 | .threads(1)
27 | .verbosity(VerboseMode.NORMAL)
28 | .warmupIterations(Runners.warmupIterations())
29 | .warmupTime(Runners.warmupTime())
30 | .measurementIterations(Runners.measurementIterations())
31 | .measurementTime(Runners.measurementTime())
32 | .result(Runners.resultFilename(ClusterJmhRunner.class))
33 | .include(Runners.includeBenchmarks("acpoc.benchmarks.*.*Benchmark"))
34 | .shouldFailOnError(true)
35 | .build();
36 | new Runner(options).run();
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/ClusterLatencyBenchmark.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import io.aeron.Counter;
4 | import io.aeron.Publication;
5 | import io.aeron.cluster.client.AeronCluster;
6 | import io.aeron.cluster.client.EgressListener;
7 | import io.aeron.logbuffer.Header;
8 | import io.scalecube.acpoc.benchmarks.report.latency.ConsoleReportingLatencyListener;
9 | import io.scalecube.acpoc.benchmarks.report.latency.CsvReportingLatencyListener;
10 | import io.scalecube.acpoc.benchmarks.report.latency.LatencyReporter;
11 | import java.util.concurrent.TimeUnit;
12 | import org.agrona.BitUtil;
13 | import org.agrona.BufferUtil;
14 | import org.agrona.CloseHelper;
15 | import org.agrona.DirectBuffer;
16 | import org.agrona.concurrent.Agent;
17 | import org.agrona.concurrent.UnsafeBuffer;
18 |
19 | public class ClusterLatencyBenchmark {
20 |
21 | /**
22 | * Main method.
23 | *
24 | * @param args args
25 | */
26 | public static void main(String[] args) throws Exception {
27 | try (State state = new State()) {
28 | TimeUnit.SECONDS.sleep(Runners.warmupTime().getTime() * Runners.warmupIterations());
29 | TimeUnit.SECONDS.sleep(Runners.measurementTime().getTime() * Runners.measurementIterations());
30 | }
31 | }
32 |
33 | private static class State implements EgressListener, AutoCloseable {
34 |
35 | private ClusterNode clusterNode;
36 | private ClusterClient clusterClient;
37 | private Counter requested;
38 | private SenderReceiverAgentRunner senderReceiverRunner;
39 | private LatencyReporter reporter;
40 |
41 | State() {
42 | try {
43 | start();
44 | } catch (Throwable th) {
45 | close();
46 | throw th;
47 | }
48 | }
49 |
50 | private void start() {
51 | clusterNode = ClusterNode.launch();
52 | clusterClient = ClusterClient.launch(this);
53 | requested = clusterClient.client().context().aeron().addCounter(404, "requested");
54 | reporter =
55 | LatencyReporter.launch(
56 | new ConsoleReportingLatencyListener(),
57 | new CsvReportingLatencyListener(ClusterLatencyBenchmark.class));
58 | Agent senderAgent = new SenderAgent(clusterClient.client(), requested);
59 | Agent receiverAgent = new ReceiverAgent(clusterClient.client());
60 | senderReceiverRunner = SenderReceiverAgentRunner.launch(senderAgent, receiverAgent);
61 | }
62 |
63 | @Override
64 | public void onMessage(
65 | long clusterSessionId,
66 | long timestamp,
67 | DirectBuffer buffer,
68 | int offset,
69 | int length,
70 | Header header) {
71 | long start = buffer.getLong(offset);
72 | long diff = System.nanoTime() - start;
73 | this.requested.getAndAddOrdered(-1);
74 | reporter.onDiff(diff);
75 | }
76 |
77 | @Override
78 | public void close() {
79 | CloseHelper.quietCloseAll(
80 | reporter, senderReceiverRunner, requested, clusterClient, clusterNode);
81 | }
82 |
83 | private static class SenderAgent implements Agent {
84 |
85 | private static final int MESSAGE_LENGTH = Runners.MESSAGE_LENGTH;
86 |
87 | private final AeronCluster client;
88 | private final UnsafeBuffer offerBuffer;
89 | private final Counter requested;
90 |
91 | private SenderAgent(AeronCluster client, Counter requested) {
92 | this.client = client;
93 | this.offerBuffer =
94 | new UnsafeBuffer(
95 | BufferUtil.allocateDirectAligned(MESSAGE_LENGTH, BitUtil.CACHE_LINE_LENGTH));
96 | this.requested = requested;
97 | }
98 |
99 | @Override
100 | public int doWork() {
101 | long produced = requested.get();
102 | if (produced <= Runners.ROUND_TRIP_MESSAGES_COUNT) {
103 | offerBuffer.putLong(0, System.nanoTime());
104 | long result = client.offer(offerBuffer, 0, MESSAGE_LENGTH);
105 | if (result > 0) {
106 | requested.incrementOrdered();
107 | return 1;
108 | }
109 | checkResult(result);
110 | return (int) result;
111 | }
112 | return 0;
113 | }
114 |
115 | private void checkResult(final long result) {
116 | if (result == Publication.NOT_CONNECTED
117 | || result == Publication.CLOSED
118 | || result == Publication.MAX_POSITION_EXCEEDED) {
119 | throw new IllegalStateException("unexpected publication state: " + result);
120 | }
121 | if (Thread.currentThread().isInterrupted()) {
122 | throw new IllegalStateException("Thread.currentThread().isInterrupted()");
123 | }
124 | }
125 |
126 | @Override
127 | public String roleName() {
128 | return "SenderAgent";
129 | }
130 | }
131 |
132 | private static class ReceiverAgent implements Agent {
133 |
134 | private final AeronCluster client;
135 |
136 | private ReceiverAgent(AeronCluster client) {
137 | this.client = client;
138 | }
139 |
140 | @Override
141 | public int doWork() {
142 | return client.pollEgress();
143 | }
144 |
145 | @Override
146 | public String roleName() {
147 | return "ReceiverAgent";
148 | }
149 | }
150 | }
151 | }
152 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/ClusterMember.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import io.aeron.ChannelUriStringBuilder;
4 | import io.scalecube.net.Address;
5 | import io.scalecube.services.ServiceEndpoint;
6 | import io.scalecube.services.discovery.api.ServiceDiscovery;
7 | import io.scalecube.services.discovery.api.ServiceDiscoveryEvent;
8 | import java.util.Collection;
9 | import java.util.List;
10 | import java.util.StringJoiner;
11 | import reactor.core.publisher.Mono;
12 |
13 | public class ClusterMember {
14 |
15 | /**
16 | * Connects to the cluster with a given seeds to get cluster member endpoints.
17 | *
18 | * @return clusterMemberEndpoints
19 | */
20 | public static Mono> endpoints(ServiceDiscovery serviceDiscovery) {
21 | return serviceDiscovery
22 | .listenDiscovery()
23 | .mergeWith(
24 | serviceDiscovery
25 | .start()
26 | .doOnNext(
27 | self -> System.out.println("service discovery listen on " + self.address()))
28 | .then(Mono.empty()))
29 | .doOnNext(System.out::println)
30 | .filter(ServiceDiscoveryEvent::isGroupAdded)
31 | .take(1)
32 | .flatMapIterable(ServiceDiscoveryEvent::serviceEndpoints)
33 | .map(ServiceEndpoint::address)
34 | .collectList()
35 | .doOnNext(endpoints -> System.out.println("group endpoints = " + endpoints))
36 | .flatMap(endpoints -> serviceDiscovery.shutdown().thenReturn(endpoints));
37 | }
38 |
39 | /**
40 | * Converts a given addresses to cluster member endpoints property.
41 | *
42 | * @param endpoints endpoints
43 | * @return cluster member endpoints
44 | */
45 | public static String toClusterMemberEndpoints(Collection endpoints) {
46 | StringJoiner joiner = new StringJoiner(",");
47 | endpoints.forEach(
48 | address ->
49 | joiner.add(
50 | new StringBuilder()
51 | .append(address.hashCode())
52 | .append('=')
53 | .append(address.host())
54 | .append(':')
55 | .append(address.port() + ClusterConstants.SERVICE_CLIENT_FACING_PORT_OFFSET)));
56 |
57 | return joiner.toString();
58 | }
59 |
60 | public static String egressChannel() {
61 | return egressChannel(Runners.CLIENT_BASE_PORT);
62 | }
63 |
64 | /**
65 | * Returns egress channel by a given base port.
66 | *
67 | * @param basePort base port
68 | * @return egress channel
69 | */
70 | public static String egressChannel(int basePort) {
71 | return new ChannelUriStringBuilder()
72 | .media("udp")
73 | .reliable(true)
74 | .endpoint(
75 | Address.create(
76 | Runners.HOST_ADDRESS, basePort + ClusterConstants.EGRESS_CHANNEL_PORT_OFFSET)
77 | .toString())
78 | .build();
79 | }
80 |
81 | public static String ingressChannel() {
82 | return ingressChannel(Runners.CLIENT_BASE_PORT);
83 | }
84 |
85 | /**
86 | * Returns ingress channel by a given base port.
87 | *
88 | * @param basePort base port
89 | * @return ingress channel
90 | */
91 | public static String ingressChannel(int basePort) {
92 | return new ChannelUriStringBuilder()
93 | .media("udp")
94 | .reliable(true)
95 | .endpoint(
96 | Address.create(
97 | Runners.HOST_ADDRESS, basePort + ClusterConstants.INGRESS_CHANNEL_PORT_OFFSET)
98 | .toString())
99 | .build();
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/ClusterNode.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import io.aeron.CommonContext;
4 | import io.aeron.archive.Archive;
5 | import io.aeron.archive.client.AeronArchive;
6 | import io.aeron.cluster.ClusteredMediaDriver;
7 | import io.aeron.cluster.ConsensusModule;
8 | import io.aeron.cluster.ConsensusModule.Configuration;
9 | import io.aeron.cluster.service.ClusteredServiceContainer;
10 | import io.aeron.driver.MediaDriver;
11 | import io.scalecube.net.Address;
12 | import io.scalecube.services.ServiceEndpoint;
13 | import io.scalecube.services.discovery.ScalecubeServiceDiscovery;
14 | import io.scalecube.services.discovery.api.ServiceDiscovery;
15 | import java.io.File;
16 | import java.nio.file.Paths;
17 | import java.time.Duration;
18 | import java.util.Collections;
19 | import java.util.List;
20 | import java.util.UUID;
21 | import org.agrona.CloseHelper;
22 | import org.agrona.IoUtil;
23 |
24 | public class ClusterNode implements AutoCloseable {
25 |
26 | private ClusteredMediaDriver nodeClusteredMediaDriver;
27 | private ClusteredServiceContainer nodeClusteredServiceContainer;
28 |
29 | /**
30 | * Main method to run cluster node separately.
31 | *
32 | * @param args args
33 | */
34 | public static void main(String[] args) throws InterruptedException {
35 | Address member = Address.create(Runners.HOST_ADDRESS, Runners.NODE_BASE_PORT);
36 | List clusterMembers = Collections.singletonList(member);
37 | if (Runners.CONNECT_VIA_SEED) {
38 | ServiceDiscovery serviceDiscovery =
39 | new ScalecubeServiceDiscovery(
40 | ServiceEndpoint.builder()
41 | .id(UUID.randomUUID().toString())
42 | .serviceGroup("ClusterNode", Runners.CLUSTER_GROUP_SIZE)
43 | .address(member)
44 | .build())
45 | .options(
46 | options -> options.membership(cfg -> cfg.seedMembers(Runners.seedMembers())));
47 |
48 | if (Runners.CLUSTER_GROUP_SIZE > 1) {
49 | clusterMembers = ClusterMember.endpoints(serviceDiscovery).block(Duration.ofMinutes(10));
50 | } else {
51 | serviceDiscovery
52 | .start()
53 | .subscribe(
54 | self -> System.out.println("service discovery listen on " + self.address()),
55 | Throwable::printStackTrace);
56 | }
57 | }
58 | try (ClusterNode clusterNode = new ClusterNode(member, clusterMembers)) {
59 | Thread.currentThread().join();
60 | }
61 | }
62 |
63 | /** Launches cluster node. */
64 | public static ClusterNode launch() {
65 | if (Runners.CONNECT_VIA_SEED) {
66 | return null;
67 | }
68 | Address member = Address.create(Runners.HOST_ADDRESS, Runners.NODE_BASE_PORT);
69 | List clusterMembers = Collections.singletonList(member);
70 | return new ClusterNode(member, clusterMembers);
71 | }
72 |
73 | private ClusterNode(Address address, List clusterMembers) {
74 | try {
75 | start(address, clusterMembers);
76 | } catch (Throwable th) {
77 | close();
78 | throw th;
79 | }
80 | }
81 |
82 | private void start(Address address, List clusterMembers) {
83 | String clusterMemberId = Integer.toHexString(Configuration.clusterMemberId());
84 | String nodeId = "node-" + clusterMemberId + "-" + UUID.randomUUID();
85 | String nodeDirName = Paths.get(IoUtil.tmpDirName(), "aeron", "cluster", nodeId).toString();
86 | String nodeAeronDirectoryName =
87 | Paths.get(CommonContext.getAeronDirectoryName(), "media", nodeId).toString();
88 | System.out.println("Cluster node directory: " + nodeDirName);
89 |
90 | ClusteredServiceAddressing addressing = new ClusteredServiceAddressing(address);
91 |
92 | AeronArchive.Context aeronArchiveContext =
93 | new AeronArchive.Context()
94 | .aeronDirectoryName(nodeAeronDirectoryName)
95 | .controlRequestChannel(addressing.archiveControlRequestChannel())
96 | .controlRequestStreamId(addressing.archiveControlRequestStreamId())
97 | .controlResponseChannel(addressing.archiveControlResponseChannel())
98 | .controlResponseStreamId(addressing.archiveControlResponseStreamId())
99 | .recordingEventsChannel(addressing.archiveRecordingEventsChannel())
100 | .errorHandler(Throwable::printStackTrace);
101 |
102 | nodeClusteredMediaDriver =
103 | ClusteredMediaDriver.launch(
104 | new MediaDriver.Context()
105 | .aeronDirectoryName(nodeAeronDirectoryName)
106 | .dirDeleteOnStart(true)
107 | .dirDeleteOnShutdown(true)
108 | .warnIfDirectoryExists(true)
109 | .printConfigurationOnStart(true)
110 | .errorHandler(Throwable::printStackTrace),
111 | new Archive.Context()
112 | .aeronDirectoryName(nodeAeronDirectoryName)
113 | .archiveDir(new File(nodeDirName, "archive"))
114 | .deleteArchiveOnStart(true)
115 | .controlChannel(aeronArchiveContext.controlRequestChannel())
116 | .controlStreamId(aeronArchiveContext.controlRequestStreamId())
117 | .localControlStreamId(aeronArchiveContext.controlRequestStreamId())
118 | .recordingEventsChannel(aeronArchiveContext.recordingEventsChannel())
119 | .recordingEventsStreamId(aeronArchiveContext.recordingEventsStreamId())
120 | .localControlChannel("aeron:ipc?term-length=64k")
121 | .errorHandler(Throwable::printStackTrace),
122 | new ConsensusModule.Context()
123 | .aeronDirectoryName(nodeAeronDirectoryName)
124 | .clusterDir(new File(nodeDirName, "consensus-module"))
125 | .deleteDirOnStart(true)
126 | .archiveContext(aeronArchiveContext.clone())
127 | .clusterMemberId(address.hashCode())
128 | .clusterMembers(ClusteredServiceAddressing.toClusterMembers(clusterMembers))
129 | .ingressChannel("aeron:udp?term-length=64k")
130 | .logChannel(addressing.logChannel())
131 | .errorHandler(Throwable::printStackTrace));
132 |
133 | nodeClusteredServiceContainer =
134 | ClusteredServiceContainer.launch(
135 | new ClusteredServiceContainer.Context()
136 | .aeronDirectoryName(nodeAeronDirectoryName)
137 | .archiveContext(aeronArchiveContext.clone())
138 | .clusterDir(new File(nodeDirName, "service"))
139 | .clusteredService(new ClusteredServiceImpl())
140 | .errorHandler(Throwable::printStackTrace));
141 | }
142 |
143 | @Override
144 | public void close() {
145 | CloseHelper.quietClose(nodeClusteredServiceContainer);
146 | CloseHelper.quietClose(nodeClusteredMediaDriver);
147 | }
148 | }
149 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/ClusterRoundTripBenchmark.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import io.aeron.cluster.client.AeronCluster;
4 | import io.aeron.cluster.client.EgressListener;
5 | import io.aeron.logbuffer.Header;
6 | import java.util.concurrent.TimeUnit;
7 | import org.agrona.BitUtil;
8 | import org.agrona.BufferUtil;
9 | import org.agrona.CloseHelper;
10 | import org.agrona.DirectBuffer;
11 | import org.agrona.concurrent.UnsafeBuffer;
12 | import org.openjdk.jmh.annotations.Benchmark;
13 | import org.openjdk.jmh.annotations.BenchmarkMode;
14 | import org.openjdk.jmh.annotations.Fork;
15 | import org.openjdk.jmh.annotations.Measurement;
16 | import org.openjdk.jmh.annotations.Mode;
17 | import org.openjdk.jmh.annotations.OperationsPerInvocation;
18 | import org.openjdk.jmh.annotations.OutputTimeUnit;
19 | import org.openjdk.jmh.annotations.Param;
20 | import org.openjdk.jmh.annotations.Scope;
21 | import org.openjdk.jmh.annotations.Setup;
22 | import org.openjdk.jmh.annotations.State;
23 | import org.openjdk.jmh.annotations.TearDown;
24 | import org.openjdk.jmh.annotations.Threads;
25 | import org.openjdk.jmh.annotations.Warmup;
26 | import org.openjdk.jmh.infra.Blackhole;
27 |
28 | @Fork(
29 | value = 1,
30 | jvmArgs = {
31 | "-XX:+UnlockExperimentalVMOptions",
32 | "-XX:+TrustFinalNonStaticFields",
33 | "-XX:+UnlockDiagnosticVMOptions",
34 | "-XX:GuaranteedSafepointInterval=300000",
35 | "-XX:BiasedLockingStartupDelay=0",
36 | "-XX:+UseParallelOldGC",
37 | "-Dagrona.disable.bounds.checks=true",
38 | "-Daeron.term.buffer.sparse.file=false",
39 | "-Daeron.threading.mode=SHARED",
40 | "-Daeron.archive.threading.mode=SHARED",
41 | "-Daeron.archive.file.sync.level=0",
42 | "-Daeron.archive.segment.file.length=1g",
43 | "-Daeron.archive.control.mtu.length=4k",
44 | "-Daeron.spies.simulate.connection=true"
45 | })
46 | @Threads(1)
47 | @Warmup(iterations = 3, time = 5)
48 | @Measurement(iterations = 5, time = 15)
49 | public class ClusterRoundTripBenchmark {
50 |
51 | private static final int OPERATIONS_PER_INVOCATION = 1000;
52 |
53 | /**
54 | * Benchmark.
55 | *
56 | * @param state state
57 | * @param blackhole blackhole
58 | */
59 | @Benchmark
60 | @OutputTimeUnit(TimeUnit.NANOSECONDS)
61 | @BenchmarkMode({Mode.SampleTime})
62 | @OperationsPerInvocation(OPERATIONS_PER_INVOCATION)
63 | public void testTime(BenchmarkState state, Blackhole blackhole) {
64 | for (int i = 0; i < OPERATIONS_PER_INVOCATION; i++) {
65 | blackhole.consume(state.run());
66 | }
67 | }
68 |
69 | /**
70 | * Benchmark.
71 | *
72 | * @param state state
73 | * @param blackhole blackhole
74 | */
75 | @Benchmark
76 | @OutputTimeUnit(TimeUnit.SECONDS)
77 | @BenchmarkMode({Mode.Throughput})
78 | @OperationsPerInvocation(OPERATIONS_PER_INVOCATION)
79 | public void testTps(BenchmarkState state, Blackhole blackhole) {
80 | for (int i = 0; i < OPERATIONS_PER_INVOCATION; i++) {
81 | blackhole.consume(state.run());
82 | }
83 | }
84 |
85 | @State(Scope.Benchmark)
86 | public static class BenchmarkState implements EgressListener {
87 |
88 | @Param({"256"})
89 | private int messageLength;
90 |
91 | private ClusterNode clusterNode;
92 | private ClusterClient clusterClient;
93 | private AeronCluster client;
94 | private UnsafeBuffer offerBuffer;
95 | private boolean receivedResponse;
96 |
97 | long run() {
98 | receivedResponse = false;
99 | long result;
100 | do {
101 | result = client.offer(offerBuffer, 0, messageLength);
102 | } while (result <= 0);
103 | while (!receivedResponse) {
104 | client.pollEgress();
105 | }
106 | return result;
107 | }
108 |
109 | /** Setup method. */
110 | @Setup
111 | public void setUp() {
112 | clusterNode = ClusterNode.launch();
113 | clusterClient = ClusterClient.launch(this);
114 | client = clusterClient.client();
115 |
116 | offerBuffer =
117 | new UnsafeBuffer(
118 | BufferUtil.allocateDirectAligned(messageLength, BitUtil.CACHE_LINE_LENGTH));
119 | }
120 |
121 | @Override
122 | public void onMessage(
123 | long clusterSessionId,
124 | long timestamp,
125 | DirectBuffer buffer,
126 | int offset,
127 | int length,
128 | Header header) {
129 | receivedResponse = true;
130 | }
131 |
132 | /** Tear down method. */
133 | @TearDown
134 | public void tearDown() {
135 | CloseHelper.quietClose(clusterClient);
136 | CloseHelper.quietClose(clusterNode);
137 | }
138 | }
139 | }
140 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/ClusterThroughputBenchmark.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import io.aeron.Publication;
4 | import io.aeron.cluster.client.AeronCluster;
5 | import io.aeron.cluster.client.EgressListener;
6 | import io.aeron.logbuffer.Header;
7 | import io.scalecube.acpoc.benchmarks.report.throughput.CsvReportingThroughputListener;
8 | import io.scalecube.acpoc.benchmarks.report.throughput.ThroughputReporter;
9 | import java.util.concurrent.TimeUnit;
10 | import org.agrona.BitUtil;
11 | import org.agrona.BufferUtil;
12 | import org.agrona.CloseHelper;
13 | import org.agrona.DirectBuffer;
14 | import org.agrona.concurrent.Agent;
15 | import org.agrona.concurrent.UnsafeBuffer;
16 |
17 | public class ClusterThroughputBenchmark {
18 |
19 | /**
20 | * Main method.
21 | *
22 | * @param args args
23 | */
24 | public static void main(String[] args) throws Exception {
25 | try (State state = new State()) {
26 | TimeUnit.SECONDS.sleep(Runners.warmupTime().getTime() * Runners.warmupIterations());
27 | TimeUnit.SECONDS.sleep(Runners.measurementTime().getTime() * Runners.measurementIterations());
28 | }
29 | }
30 |
31 | private static class State implements EgressListener, AutoCloseable {
32 |
33 | private ClusterNode clusterNode;
34 | private ClusterClient clusterClient;
35 | private SenderReceiverAgentRunner senderReceiverRunner;
36 | private ThroughputReporter reporter;
37 |
38 | State() {
39 | try {
40 | start();
41 | } catch (Throwable th) {
42 | close();
43 | throw th;
44 | }
45 | }
46 |
47 | private void start() {
48 | clusterNode = ClusterNode.launch();
49 | clusterClient = ClusterClient.launch(this);
50 | reporter =
51 | ThroughputReporter.launch(
52 | new CsvReportingThroughputListener(ClusterThroughputBenchmark.class));
53 | Agent senderAgent = new SenderAgent(clusterClient.client());
54 | Agent receiverAgent = new ReceiverAgent(clusterClient.client());
55 | senderReceiverRunner = SenderReceiverAgentRunner.launch(senderAgent, receiverAgent);
56 | }
57 |
58 | @Override
59 | public void onMessage(
60 | long clusterSessionId,
61 | long timestamp,
62 | DirectBuffer buffer,
63 | int offset,
64 | int length,
65 | Header header) {
66 | reporter.onMessage(1, length);
67 | }
68 |
69 | @Override
70 | public void close() {
71 | CloseHelper.quietCloseAll(reporter, senderReceiverRunner, clusterClient, clusterNode);
72 | }
73 |
74 | private static class SenderAgent implements Agent {
75 |
76 | private static final int MESSAGE_LENGTH = Runners.MESSAGE_LENGTH;
77 |
78 | private final AeronCluster client;
79 | private final UnsafeBuffer offerBuffer;
80 |
81 | private SenderAgent(AeronCluster client) {
82 | this.client = client;
83 | this.offerBuffer =
84 | new UnsafeBuffer(
85 | BufferUtil.allocateDirectAligned(MESSAGE_LENGTH, BitUtil.CACHE_LINE_LENGTH));
86 | }
87 |
88 | @Override
89 | public int doWork() {
90 | long result = client.offer(offerBuffer, 0, MESSAGE_LENGTH);
91 | if (result > 0) {
92 | return 1;
93 | }
94 | checkResult(result);
95 | return 0;
96 | }
97 |
98 | private void checkResult(final long result) {
99 | if (result == Publication.NOT_CONNECTED
100 | || result == Publication.CLOSED
101 | || result == Publication.MAX_POSITION_EXCEEDED) {
102 | throw new IllegalStateException("unexpected publication state: " + result);
103 | }
104 | if (Thread.currentThread().isInterrupted()) {
105 | throw new IllegalStateException("Thread.currentThread().isInterrupted()");
106 | }
107 | }
108 |
109 | @Override
110 | public String roleName() {
111 | return "SenderAgent";
112 | }
113 | }
114 |
115 | private static class ReceiverAgent implements Agent {
116 |
117 | private final AeronCluster client;
118 |
119 | private ReceiverAgent(AeronCluster client) {
120 | this.client = client;
121 | }
122 |
123 | @Override
124 | public int doWork() {
125 | return client.pollEgress();
126 | }
127 |
128 | @Override
129 | public String roleName() {
130 | return "ReceiverAgent";
131 | }
132 | }
133 | }
134 | }
135 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/ClusteredServiceAddressing.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import io.aeron.ChannelUriStringBuilder;
4 | import io.aeron.cluster.ConsensusModule.Configuration;
5 | import io.aeron.cluster.ConsensusModule.Context;
6 | import io.scalecube.net.Address;
7 | import java.util.Collection;
8 | import java.util.stream.Collectors;
9 |
10 | /** Class-aggregator of addressing functions. */
11 | public class ClusteredServiceAddressing implements Cloneable {
12 |
13 | private final Address address;
14 |
15 | public ClusteredServiceAddressing(Address address) {
16 | this.address = address;
17 | }
18 |
19 | public Address logChannelAddress() {
20 | return Address.create(
21 | address.host(), address.port() + ClusterConstants.LOG_CHANNEL_PORT_OFFSET);
22 | }
23 |
24 | /**
25 | * get the Archive Control Request Channel Address.
26 | *
27 | * @return the Archive Control Request Channel Address
28 | */
29 | public Address archiveControlRequestChannelAddress() {
30 | return Address.create(
31 | address.host(),
32 | address.port() + ClusterConstants.ARCHIVE_CONTROL_REQUEST_CHANNEL_PORT_OFFSET);
33 | }
34 |
35 | /**
36 | * get the Archive Control Response Channel Address.
37 | *
38 | * @return the Archive Control Response Channel Address
39 | */
40 | public Address archiveControlResponseChannelAddress() {
41 | return Address.create(
42 | address.host(),
43 | address.port() + ClusterConstants.ARCHIVE_CONTROL_RESPONSE_CHANNEL_PORT_OFFSET);
44 | }
45 |
46 | /**
47 | * get the Archive Recording Events Channel Address.
48 | *
49 | * @return the Archive Recording Events Channel Address.
50 | */
51 | public Address archiveRecordingEventsChannelAddress() {
52 | return Address.create(
53 | address.host(),
54 | address.port() + ClusterConstants.ARCHIVE_RECORDING_EVENTS_CHANNEL_PORT_OFFSET);
55 | }
56 |
57 | public int archiveControlRequestStreamId() {
58 | return address.port() + ClusterConstants.ARCHIVE_CONTROL_REQUEST_STREAM_ID_OFFSET;
59 | }
60 |
61 | public int archiveControlResponseStreamId() {
62 | return address.port() + ClusterConstants.ARCHIVE_CONTROL_RESPONSE_STREAM_ID_OFFSET;
63 | }
64 |
65 | /**
66 | * Returns log channel.
67 | *
68 | * @return log channel
69 | */
70 | public String logChannel() {
71 | return new ChannelUriStringBuilder()
72 | .media("udp")
73 | .reliable(true)
74 | .controlMode("manual")
75 | .controlEndpoint(logChannelAddress().toString())
76 | .build();
77 | }
78 |
79 | /**
80 | * Returns archive control request channel.
81 | *
82 | * @return archive control request channel
83 | */
84 | public String archiveControlRequestChannel() {
85 | return new ChannelUriStringBuilder()
86 | .media("udp")
87 | .reliable(true)
88 | .endpoint(archiveControlRequestChannelAddress().toString())
89 | .build();
90 | }
91 |
92 | /**
93 | * Returns archive control response channel.
94 | *
95 | * @return archive control response channel
96 | */
97 | public String archiveControlResponseChannel() {
98 | return new ChannelUriStringBuilder()
99 | .media("udp")
100 | .reliable(true)
101 | .endpoint(archiveControlResponseChannelAddress().toString())
102 | .build();
103 | }
104 |
105 | /**
106 | * Returns archive recording events channel.
107 | *
108 | * @return archive recording events channel
109 | */
110 | public String archiveRecordingEventsChannel() {
111 | return new ChannelUriStringBuilder()
112 | .media("udp")
113 | .reliable(true)
114 | .controlMode("dynamic")
115 | .controlEndpoint(archiveRecordingEventsChannelAddress().toString())
116 | .build();
117 | }
118 |
119 | /**
120 | * Converts to a string suitable for setting {@link Context#clusterMembers(String)}. NOTE:
121 | * this setting is for static cluster.
122 | */
123 | public static String toClusterMembers(Collection clusterMembers) {
124 | return clusterMembers.stream()
125 | .map(ClusteredServiceAddressing::new)
126 | .map(ClusteredServiceAddressing::asString)
127 | .collect(Collectors.joining("|"));
128 | }
129 |
130 | /**
131 | * Utility function to form an aeron cluster compliant cluster members string. See for details
132 | * {@link Configuration#CLUSTER_MEMBERS_PROP_NAME}
133 | *
134 | * @return cluster members string in aeron cluster format.
135 | */
136 | private String asString() {
137 | return new StringBuilder()
138 | .append(address.hashCode())
139 | .append(',')
140 | .append(address.host())
141 | .append(':')
142 | .append(address.port() + ClusterConstants.SERVICE_CLIENT_FACING_PORT_OFFSET)
143 | .append(',')
144 | .append(address.host())
145 | .append(':')
146 | .append(address.port() + ClusterConstants.MEMBER_FACTING_PORT_OFFSET)
147 | .append(',')
148 | .append(address.host())
149 | .append(':')
150 | .append(address.port() + ClusterConstants.LOG_PORT_OFFSET)
151 | .append(',')
152 | .append(address.host())
153 | .append(':')
154 | .append(address.port() + ClusterConstants.TRANSFER_PORT_OFFSET)
155 | .append(',')
156 | .append(address.host())
157 | .append(':')
158 | .append(address.port() + ClusterConstants.ARCHIVE_CONTROL_REQUEST_CHANNEL_PORT_OFFSET)
159 | .toString();
160 | }
161 | }
162 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/ClusteredServiceImpl.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import io.aeron.ExclusivePublication;
4 | import io.aeron.Image;
5 | import io.aeron.Publication;
6 | import io.aeron.cluster.codecs.CloseReason;
7 | import io.aeron.cluster.service.ClientSession;
8 | import io.aeron.cluster.service.Cluster;
9 | import io.aeron.cluster.service.Cluster.Role;
10 | import io.aeron.cluster.service.ClusteredService;
11 | import io.aeron.logbuffer.Header;
12 | import org.agrona.DirectBuffer;
13 | import org.slf4j.Logger;
14 | import org.slf4j.LoggerFactory;
15 |
16 | public class ClusteredServiceImpl implements ClusteredService {
17 |
18 | private static final Logger logger = LoggerFactory.getLogger(ClusteredServiceImpl.class);
19 |
20 | private Cluster cluster;
21 |
22 | @Override
23 | public void onStart(Cluster cluster, Image snapshotImage) {
24 | this.cluster = cluster;
25 | logger.info(
26 | "onStart => memberId: {}, role: {}, client-sessions: {}",
27 | cluster.memberId(),
28 | cluster.role(),
29 | cluster.clientSessions().size());
30 | if (snapshotImage != null) {
31 | onLoadSnapshot(snapshotImage);
32 | }
33 | }
34 |
35 | @Override
36 | public void onSessionOpen(ClientSession session, long timestampMs) {
37 | logger.info(
38 | "onSessionOpen, timestampMs: {} => memberId: {}, sessionId: {}, "
39 | + "responseChannel: {}, responseStreamId: {}",
40 | timestampMs,
41 | cluster.memberId(),
42 | session.id(),
43 | session.responseChannel(),
44 | session.responseStreamId());
45 | }
46 |
47 | @Override
48 | public void onSessionClose(ClientSession session, long timestampMs, CloseReason closeReason) {
49 | logger.info(
50 | "onSessionClose, timestampMs: {} => memberId: {}, "
51 | + "sessionId: {}, responseChannel: {}, responseStreamId: {}, reason: {}",
52 | timestampMs,
53 | cluster.memberId(),
54 | session.id(),
55 | session.responseChannel(),
56 | session.responseStreamId(),
57 | closeReason);
58 | }
59 |
60 | @Override
61 | public void onSessionMessage(
62 | ClientSession session,
63 | long timestampMs,
64 | DirectBuffer buffer,
65 | int offset,
66 | int length,
67 | Header header) {
68 | if (cluster.role() == Role.LEADER) {
69 | // Send response back
70 | while (true) {
71 | long result = session.offer(buffer, offset, length);
72 | if (result > 0) {
73 | break;
74 | }
75 | checkResultAndIdle(result);
76 | }
77 | }
78 | }
79 |
80 | @Override
81 | public void onTimerEvent(long correlationId, long timestampMs) {
82 | logger.info(
83 | "onTimerEvent, timestampMs: {} => memberId: {}, correlationId: {}",
84 | timestampMs,
85 | cluster.memberId(),
86 | correlationId);
87 | }
88 |
89 | @Override
90 | public void onTakeSnapshot(ExclusivePublication snapshotPublication) {
91 | logger.info(
92 | "onTakeSnapshot => publication: memberId: {}, sessionId: {}, channel: {}, "
93 | + "streamId: {}, position: {}",
94 | cluster.memberId(),
95 | snapshotPublication.sessionId(),
96 | snapshotPublication.channel(),
97 | snapshotPublication.streamId(),
98 | snapshotPublication.position());
99 | }
100 |
101 | private void onLoadSnapshot(Image snapshotImage) {
102 | logger.info(
103 | "onLoadSnapshot => image: memberId: {}, sessionId: {}, channel: {}, "
104 | + "streamId: {}, position: {}",
105 | cluster.memberId(),
106 | snapshotImage.sessionId(),
107 | snapshotImage.subscription().channel(),
108 | snapshotImage.subscription().streamId(),
109 | snapshotImage.position());
110 | }
111 |
112 | @Override
113 | public void onRoleChange(Role newRole) {
114 | logger.info("onRoleChange => memberId: {}, new role: {}", cluster.memberId(), newRole);
115 | }
116 |
117 | @Override
118 | public void onTerminate(Cluster cluster) {
119 | logger.info(
120 | "onTerminate => memberId: {}, role: {}, client-sessions: {}",
121 | cluster.memberId(),
122 | cluster.role(),
123 | cluster.clientSessions().size());
124 | }
125 |
126 | private void checkResultAndIdle(long result) {
127 | if (result == Publication.NOT_CONNECTED
128 | || result == Publication.CLOSED
129 | || result == Publication.MAX_POSITION_EXCEEDED) {
130 | throw new IllegalStateException("unexpected publication state: " + result);
131 | }
132 | if (Thread.currentThread().isInterrupted()) {
133 | throw new IllegalStateException("Unexpected interrupt");
134 | }
135 | cluster.idle();
136 | }
137 | }
138 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/Runners.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import io.scalecube.net.Address;
4 | import java.io.File;
5 | import java.nio.file.Paths;
6 | import java.util.Arrays;
7 | import java.util.List;
8 | import java.util.stream.Collectors;
9 | import org.openjdk.jmh.annotations.Threads;
10 | import org.openjdk.jmh.runner.options.ChainedOptionsBuilder;
11 | import org.openjdk.jmh.runner.options.TimeValue;
12 |
13 | public class Runners {
14 |
15 | public static final String RESULTS_DIR_PROP_NAME = "resultsDir";
16 | public static final String INCLUDE_BENCHMARKS_PROP_NAME = "includeBenchmarks";
17 | public static final String FORKS_PROP_NAME = "forks";
18 | public static final String WARMUP_ITERATIONS_PROP_NAME = "warmup.iterations";
19 | public static final String WARMUP_TIME_SEC_PROP_NAME = "warmup.timeSec";
20 | public static final String MEASUREMENT_ITERATIONS_PROP_NAME = "measurement.iterations";
21 | public static final String MEASUREMENT_TIME_SEC_PROP_NAME = "measurement.timeSec";
22 | public static final String THREADS_PROP_NAME = "threads";
23 | public static final String JVM_ARGS_PROP_NAME = "jvmArgs";
24 |
25 | public static final String DEFAULT_RESULTS_DIR = "target/jmh";
26 | public static final int DEFAULT_FORKS = 1;
27 | public static final int DEFAULT_WARMUP_ITERATIONS = 3;
28 | public static final long DEFAULT_WARMUP_TIME_SEC = 5;
29 | public static final int DEFAULT_MEASUREMENT_ITERATIONS = 5;
30 | public static final long DEFAULT_MEASUREMENT_TIME = 15;
31 | public static final int DEFAULT_THREADS = Threads.MAX;
32 | public static final String DEFAULT_ASYNC_PROFILER_EVENT = "cpu"; // cache-misses, alloc, lock
33 |
34 | public static final String ASYNC_PROFILER_AGENT_FORMAT =
35 | "-agentpath:profiler/libasyncProfiler.so=start,threads,svg=total,event=%s,file=%s";
36 | public static final String ASYNC_PROFILER_RESULT_FILENAME_FORMAT = "%s/profile-%s-%s.svg";
37 | public static final String ASYNC_PROFILER_ENABLED_PROP_NAME = "asyncProfiler.enabled";
38 | public static final String ASYNC_PROFILER_EVENT_PROP_NAME = "asyncProfiler.event";
39 |
40 | public static final int MESSAGE_LENGTH = Integer.getInteger("benchmark.message.length", 256);
41 | public static final long ROUND_TRIP_MESSAGES_COUNT =
42 | Long.getLong("benchmark.round.trip.messages.count", 1);
43 |
44 | public static final String SEEDS_PROPERTY = "benchmark.seeds";
45 | public static final String DEFAULT_SEEDS = "localhost:4801";
46 | public static final boolean CONNECT_VIA_SEED = Boolean.getBoolean("benchmark.connect.via.seed");
47 | public static final int CLUSTER_GROUP_SIZE =
48 | Integer.getInteger("benchmark.cluster.group.size", 1);
49 |
50 | public static final String BENCHMARK_CLIENT_BASE_PORT_PROPERTY = "benchmark.client.base.port";
51 | public static final int CLIENT_BASE_PORT =
52 | Integer.getInteger(BENCHMARK_CLIENT_BASE_PORT_PROPERTY, 9000);
53 | public static final String BENCHMARK_NODE_BASE_PORT_PROPERTY = "benchmark.node.base.port";
54 | public static final int NODE_BASE_PORT =
55 | Integer.getInteger(BENCHMARK_NODE_BASE_PORT_PROPERTY, 10000);
56 | public static final String HOST_ADDRESS = Address.getLocalIpAddress().getHostAddress();
57 |
58 | private Runners() {
59 | // Do not instantiate
60 | }
61 |
62 | /**
63 | * Retrurns boolean indicating is async profiler enabled (and thus string for jvm agent path must
64 | * be formed, see also: {@link #asyncProfilerAgentString(Class)}).
65 | *
66 | * @return true or false; by default false.
67 | */
68 | public static boolean asyncProfilerEnabled() {
69 | return Boolean.getBoolean(ASYNC_PROFILER_ENABLED_PROP_NAME);
70 | }
71 |
72 | /**
73 | * Returns profiler string for agentpath to set {@link ChainedOptionsBuilder#jvmArgs(String...)}.
74 | *
75 | * @param clazz clazz
76 | * @return agent path string
77 | */
78 | public static String asyncProfilerAgentString(Class> clazz) {
79 | return String.format(
80 | ASYNC_PROFILER_AGENT_FORMAT, asyncProfilerEvent(), asyncProfilerResultFilename(clazz));
81 | }
82 |
83 | /**
84 | * Returns result filename to set {@link ChainedOptionsBuilder#result(String)}.
85 | *
86 | * @param clazz clazz
87 | * @return result filename
88 | */
89 | public static String resultFilename(Class> clazz) {
90 | return Paths.get(resultsDir(), clazz.getSimpleName() + ".jmh.csv").toString();
91 | }
92 |
93 | /**
94 | * Returns include regexp string to set {@link ChainedOptionsBuilder#include(String)}.
95 | *
96 | * @param defaultValue default value
97 | * @return include regexp string
98 | */
99 | public static String includeBenchmarks(String defaultValue) {
100 | return System.getProperty(INCLUDE_BENCHMARKS_PROP_NAME, defaultValue);
101 | }
102 |
103 | public static int forks() {
104 | return Integer.getInteger(FORKS_PROP_NAME, DEFAULT_FORKS);
105 | }
106 |
107 | public static int warmupIterations() {
108 | return Integer.getInteger(WARMUP_ITERATIONS_PROP_NAME, DEFAULT_WARMUP_ITERATIONS);
109 | }
110 |
111 | public static TimeValue warmupTime() {
112 | return TimeValue.seconds(Long.getLong(WARMUP_TIME_SEC_PROP_NAME, DEFAULT_WARMUP_TIME_SEC));
113 | }
114 |
115 | public static int measurementIterations() {
116 | return Integer.getInteger(MEASUREMENT_ITERATIONS_PROP_NAME, DEFAULT_MEASUREMENT_ITERATIONS);
117 | }
118 |
119 | public static TimeValue measurementTime() {
120 | return TimeValue.seconds(
121 | Long.getLong(MEASUREMENT_TIME_SEC_PROP_NAME, DEFAULT_MEASUREMENT_TIME));
122 | }
123 |
124 | public static int threads() {
125 | return Integer.getInteger(THREADS_PROP_NAME, DEFAULT_THREADS);
126 | }
127 |
128 | private static String resultsDir() {
129 | String resultsDir = System.getProperty(RESULTS_DIR_PROP_NAME, DEFAULT_RESULTS_DIR);
130 | //noinspection ResultOfMethodCallIgnored
131 | new File(resultsDir).mkdirs();
132 | return resultsDir;
133 | }
134 |
135 | private static String asyncProfilerResultFilename(Class> clazz) {
136 | return String.format(
137 | ASYNC_PROFILER_RESULT_FILENAME_FORMAT,
138 | resultsDir(),
139 | clazz.getSimpleName(),
140 | asyncProfilerEvent());
141 | }
142 |
143 | private static String asyncProfilerEvent() {
144 | return System.getProperty(ASYNC_PROFILER_EVENT_PROP_NAME, DEFAULT_ASYNC_PROFILER_EVENT);
145 | }
146 |
147 | /**
148 | * Returns jvmArgs.
149 | *
150 | * @return jvmArgs
151 | */
152 | public static String[] jvmArgs() {
153 | String property = System.getenv("JAVA_OPTS");
154 | if (property == null) {
155 | property = System.getProperty(JVM_ARGS_PROP_NAME, "");
156 | }
157 | return Arrays.stream(property.split("\\s")).filter(s -> !s.isEmpty()).toArray(String[]::new);
158 | }
159 |
160 | /**
161 | * Returns seed members.
162 | *
163 | * @return seed members set
164 | */
165 | public static List seedMembers() {
166 | return Arrays.stream(System.getProperty(SEEDS_PROPERTY, DEFAULT_SEEDS).split(","))
167 | .map(Address::from)
168 | .collect(Collectors.toList());
169 | }
170 | }
171 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/SenderReceiverAgentRunner.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import static java.lang.System.getProperty;
4 |
5 | import io.aeron.driver.Configuration;
6 | import org.agrona.CloseHelper;
7 | import org.agrona.concurrent.Agent;
8 | import org.agrona.concurrent.AgentRunner;
9 | import org.agrona.concurrent.CompositeAgent;
10 | import org.agrona.concurrent.IdleStrategy;
11 |
12 | public class SenderReceiverAgentRunner implements AutoCloseable {
13 |
14 | enum ThreadingMode {
15 | SHARED,
16 | DEDICATED
17 | }
18 |
19 | public static final String BENCHMARK_THREADING_MODE_PROPERTY = "benchmark.threading.mode";
20 | public static final String BENCHMARK_THREADING_MODE_DEFAULT = "SHARED";
21 | public static final String BENCHMARK_SHARED_IDLE_STRATEGY_PROPERTY =
22 | "benchmark.shared.idle.strategy";
23 | public static final String BENCHMARK_RECEIVER_IDLE_STRATEGY_PROPERTY =
24 | "benchmark.receiver.idle.strategy";
25 | public static final String BENCHMARK_SENDER_IDLE_STRATEGY_PROPERTY =
26 | "benchmark.sender.idle.strategy";
27 |
28 | public static final String DEFAULT_IDLE_STRATEGY = "org.agrona.concurrent.BackoffIdleStrategy";
29 |
30 | private final AgentRunner sharedAgentRunner;
31 | private final AgentRunner senderAgentRunner;
32 | private final AgentRunner receiverAgentRunner;
33 |
34 | public static SenderReceiverAgentRunner launch(Agent senderAgent, Agent receiverAgent) {
35 | return new SenderReceiverAgentRunner(senderAgent, receiverAgent);
36 | }
37 |
38 | private SenderReceiverAgentRunner(Agent senderAgent, Agent receiverAgent) {
39 | ThreadingMode threadingMode =
40 | ThreadingMode.valueOf(
41 | getProperty(BENCHMARK_THREADING_MODE_PROPERTY, BENCHMARK_THREADING_MODE_DEFAULT));
42 | switch (threadingMode) {
43 | case SHARED:
44 | IdleStrategy sharedIdleStrategy =
45 | Configuration.agentIdleStrategy(
46 | getProperty(BENCHMARK_SHARED_IDLE_STRATEGY_PROPERTY, DEFAULT_IDLE_STRATEGY), null);
47 | sharedAgentRunner =
48 | new AgentRunner(
49 | sharedIdleStrategy,
50 | Throwable::printStackTrace,
51 | null,
52 | new CompositeAgent(senderAgent, receiverAgent));
53 | senderAgentRunner = null;
54 | receiverAgentRunner = null;
55 | AgentRunner.startOnThread(sharedAgentRunner);
56 | break;
57 | case DEDICATED:
58 | sharedAgentRunner = null;
59 | IdleStrategy receiverIdleStrategy =
60 | Configuration.agentIdleStrategy(
61 | getProperty(BENCHMARK_RECEIVER_IDLE_STRATEGY_PROPERTY, DEFAULT_IDLE_STRATEGY),
62 | null);
63 | receiverAgentRunner =
64 | new AgentRunner(receiverIdleStrategy, Throwable::printStackTrace, null, receiverAgent);
65 | IdleStrategy senderIdleStrategy =
66 | Configuration.agentIdleStrategy(
67 | getProperty(BENCHMARK_SENDER_IDLE_STRATEGY_PROPERTY, DEFAULT_IDLE_STRATEGY), null);
68 | senderAgentRunner =
69 | new AgentRunner(senderIdleStrategy, Throwable::printStackTrace, null, senderAgent);
70 | AgentRunner.startOnThread(receiverAgentRunner);
71 | AgentRunner.startOnThread(senderAgentRunner);
72 | break;
73 | default:
74 | throw new IllegalArgumentException("ThreadingMode: " + threadingMode);
75 | }
76 | }
77 |
78 | @Override
79 | public void close() {
80 | CloseHelper.quietCloseAll(sharedAgentRunner, senderAgentRunner, receiverAgentRunner);
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/report/latency/CompositeReportingLatencyListener.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks.report.latency;
2 |
3 | import org.HdrHistogram.Histogram;
4 | import org.agrona.CloseHelper;
5 |
6 | public class CompositeReportingLatencyListener implements LatencyListener {
7 |
8 | private final LatencyListener[] listeners;
9 |
10 | public CompositeReportingLatencyListener(LatencyListener... listeners) {
11 | this.listeners = listeners;
12 | }
13 |
14 | @Override
15 | public void onReport(Histogram intervalHistogram) {
16 | for (LatencyListener latencyListener : listeners) {
17 | latencyListener.onReport(intervalHistogram);
18 | }
19 | }
20 |
21 | @Override
22 | public void close() {
23 | CloseHelper.quietCloseAll(listeners);
24 | }
25 |
26 | @Override
27 | public void onTerminate(Histogram accumulatedHistogram) {
28 | for (LatencyListener latencyListener : listeners) {
29 | latencyListener.onTerminate(accumulatedHistogram);
30 | }
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/report/latency/ConsoleReportingLatencyListener.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks.report.latency;
2 |
3 | import org.HdrHistogram.Histogram;
4 |
5 | public class ConsoleReportingLatencyListener implements LatencyListener {
6 |
7 | @Override
8 | public void onReport(Histogram histogram) {
9 | histogram.outputPercentileDistribution(System.err, 5, 1000.0, false);
10 | }
11 |
12 | @Override
13 | public void close() throws Exception {
14 | System.err.println("done");
15 | }
16 |
17 | @Override
18 | public void onTerminate(Histogram accumulatedHistogram) {
19 | accumulatedHistogram.outputPercentileDistribution(System.err, 5, 1000.0, false);
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/report/latency/CsvReportingLatencyListener.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks.report.latency;
2 |
3 | import com.opencsv.CSVWriterBuilder;
4 | import com.opencsv.ICSVWriter;
5 | import io.scalecube.acpoc.benchmarks.Runners;
6 | import java.io.FileWriter;
7 | import org.HdrHistogram.Histogram;
8 | import org.agrona.CloseHelper;
9 | import reactor.core.Exceptions;
10 |
11 | public class CsvReportingLatencyListener implements LatencyListener {
12 |
13 | private static final double VALUE_UNIT_SCALING_RATIO = 1000.0; // microseconds
14 |
15 | private final ICSVWriter csvWriter;
16 | private final String[] csvLine;
17 |
18 | /**
19 | * Initialize CSV latency listener.
20 | *
21 | * @param benchmarkClass benchmark class
22 | */
23 | public CsvReportingLatencyListener(Class> benchmarkClass) {
24 | try {
25 | FileWriter fileWriter = new FileWriter(Runners.resultFilename(benchmarkClass), false);
26 | csvWriter = new CSVWriterBuilder(fileWriter).build();
27 | String[] title = {"p70", "p80", "p90", "p99"};
28 | csvWriter.writeNext(title);
29 | csvWriter.flushQuietly();
30 | csvLine = title;
31 | } catch (Exception e) {
32 | throw Exceptions.propagate(e);
33 | }
34 | }
35 |
36 | @Override
37 | public void onReport(Histogram histogram) {
38 | csvLine[0] =
39 | String.format("%.03g", histogram.getValueAtPercentile(70d) / VALUE_UNIT_SCALING_RATIO);
40 | csvLine[1] =
41 | String.format("%.03g", histogram.getValueAtPercentile(80d) / VALUE_UNIT_SCALING_RATIO);
42 | csvLine[2] =
43 | String.format("%.03g", histogram.getValueAtPercentile(90d) / VALUE_UNIT_SCALING_RATIO);
44 | csvLine[3] =
45 | String.format("%.03g", histogram.getValueAtPercentile(99d) / VALUE_UNIT_SCALING_RATIO);
46 |
47 | csvWriter.writeNext(csvLine);
48 | csvWriter.flushQuietly();
49 | }
50 |
51 | @Override
52 | public void close() {
53 | CloseHelper.quietClose(csvWriter);
54 | }
55 |
56 | @Override
57 | public void onTerminate(Histogram accumulatedHistogram) {
58 | // nothing to do here
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/report/latency/LatencyListener.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks.report.latency;
2 |
3 | import org.HdrHistogram.Histogram;
4 |
5 | public interface LatencyListener extends AutoCloseable {
6 | /**
7 | * Called for a latency report.
8 | *
9 | * @param intervalHistogram the histogram.
10 | */
11 | void onReport(Histogram intervalHistogram);
12 |
13 | void onTerminate(Histogram accumulatedHistogram);
14 | }
15 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/report/latency/LatencyReporter.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks.report.latency;
2 |
3 | import io.scalecube.acpoc.benchmarks.Runners;
4 | import java.time.Duration;
5 | import java.util.concurrent.TimeUnit;
6 | import org.HdrHistogram.Histogram;
7 | import org.HdrHistogram.Recorder;
8 | import org.agrona.CloseHelper;
9 | import reactor.core.Disposable;
10 | import reactor.core.publisher.Flux;
11 | import reactor.core.scheduler.Schedulers;
12 |
13 | public class LatencyReporter implements AutoCloseable {
14 |
15 | private final Recorder histogram;
16 | private final Disposable disposable;
17 |
18 | private Histogram accumulatedHistogram;
19 |
20 | private boolean warmupFinished = false;
21 | private final LatencyListener listener;
22 |
23 | /**
24 | * Launch this test reporter.
25 | *
26 | * @param listeners throughput listeners
27 | * @return a reporter
28 | */
29 | public static LatencyReporter launch(LatencyListener... listeners) {
30 | return new LatencyReporter(new CompositeReportingLatencyListener(listeners));
31 | }
32 |
33 | private LatencyReporter(LatencyListener listener) {
34 | this.listener = listener;
35 | this.histogram = new Recorder(TimeUnit.SECONDS.toNanos(10), 3);
36 | Duration reportDelay =
37 | Duration.ofSeconds(
38 | Runners.warmupTime().convertTo(TimeUnit.SECONDS) * Runners.warmupIterations());
39 | Duration reportInterval = Duration.ofSeconds(Long.getLong("benchmark.report.interval", 1));
40 | this.disposable =
41 | Flux.interval(reportDelay, reportInterval, Schedulers.single())
42 | .doOnCancel(this::onTerminate)
43 | .subscribe(i -> this.run(), Throwable::printStackTrace);
44 | }
45 |
46 | private void run() {
47 | if (warmupFinished) {
48 | Histogram intervalHistogram = histogram.getIntervalHistogram();
49 | if (accumulatedHistogram != null) {
50 | accumulatedHistogram.add(intervalHistogram);
51 | } else {
52 | accumulatedHistogram = intervalHistogram;
53 | }
54 |
55 | listener.onReport(intervalHistogram);
56 | } else {
57 | warmupFinished = true;
58 | histogram.reset();
59 | }
60 | }
61 |
62 | private void onTerminate() {
63 | listener.onTerminate(accumulatedHistogram);
64 | }
65 |
66 | public void onDiff(long diff) {
67 | histogram.recordValue(diff);
68 | }
69 |
70 | @Override
71 | public void close() {
72 | disposable.dispose();
73 | histogram.reset();
74 | CloseHelper.quietClose(listener);
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/report/throughput/CompositeThroughputListener.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks.report.throughput;
2 |
3 | import org.agrona.CloseHelper;
4 |
5 | public class CompositeThroughputListener implements ThroughputListener {
6 |
7 | private final ThroughputListener[] listeners;
8 |
9 | public CompositeThroughputListener(ThroughputListener... listeners) {
10 | this.listeners = listeners;
11 | }
12 |
13 | @Override
14 | public void close() {
15 | CloseHelper.quietCloseAll(listeners);
16 | }
17 |
18 | @Override
19 | public void onReport(double messagesPerSec, double bytesPerSec) {
20 | for (ThroughputListener listener : listeners) {
21 | listener.onReport(messagesPerSec, bytesPerSec);
22 | }
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/report/throughput/CsvReportingThroughputListener.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks.report.throughput;
2 |
3 | import com.opencsv.CSVWriterBuilder;
4 | import com.opencsv.ICSVWriter;
5 | import io.scalecube.acpoc.benchmarks.Runners;
6 | import java.io.FileWriter;
7 | import org.agrona.CloseHelper;
8 | import reactor.core.Exceptions;
9 |
10 | public class CsvReportingThroughputListener implements ThroughputListener {
11 |
12 | private final ICSVWriter csvWriter;
13 | private final String[] csvLine;
14 |
15 | private long totalMessages;
16 | private long totalBytes;
17 | private long seconds;
18 |
19 | /**
20 | * Initialize CSV throughput listener.
21 | *
22 | * @param benchmarkClass benchmark class
23 | */
24 | public CsvReportingThroughputListener(Class> benchmarkClass) {
25 | try {
26 | FileWriter fileWriter = new FileWriter(Runners.resultFilename(benchmarkClass), false);
27 | csvWriter = new CSVWriterBuilder(fileWriter).build();
28 | String[] title = {"messages/sec", "MB/sec", "total messages", "MB payloads"};
29 | csvWriter.writeNext(title);
30 | csvWriter.flushQuietly();
31 | csvLine = title;
32 | } catch (Exception e) {
33 | throw Exceptions.propagate(e);
34 | }
35 | }
36 |
37 | @Override
38 | public void onReport(double messagesPerSec, double bytesPerSec) {
39 | totalMessages += messagesPerSec;
40 | totalBytes += bytesPerSec;
41 | seconds++;
42 | csvLine[0] = String.format("%.07g", messagesPerSec);
43 | csvLine[1] = String.format("%.07g", bytesPerSec / (1024 * 1024));
44 | csvLine[2] = Long.toString(totalMessages);
45 | csvLine[3] = Long.toString(totalBytes / (1024 * 1024));
46 | csvWriter.writeNext(csvLine);
47 | csvWriter.flushQuietly();
48 | System.out.println(
49 | csvLine[0]
50 | + " msgs/sec, "
51 | + csvLine[1]
52 | + " MB/sec, totals "
53 | + csvLine[2]
54 | + " messages "
55 | + csvLine[3]
56 | + " MB payloads");
57 | }
58 |
59 | @Override
60 | public void close() {
61 | csvLine[0] = String.format("%.07g", (double) totalMessages / seconds);
62 | csvLine[1] = String.format("%.07g", ((double) totalBytes / seconds) / (1024 * 1024));
63 | csvLine[2] = Long.toString(totalMessages);
64 | csvLine[3] = Long.toString(totalBytes / (1024 * 1024));
65 | System.out.println("Throughput average: ");
66 | System.out.println(
67 | csvLine[0]
68 | + " msgs/sec, "
69 | + csvLine[1]
70 | + " MB/sec, totals "
71 | + csvLine[2]
72 | + " messages "
73 | + csvLine[3]
74 | + " MB payloads");
75 | csvWriter.writeNext(csvLine);
76 | CloseHelper.quietClose(csvWriter);
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/report/throughput/ThroughputListener.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks.report.throughput;
2 |
3 | /** Interface for reporting of rate information. */
4 | public interface ThroughputListener extends AutoCloseable {
5 | /**
6 | * Called for a rate report.
7 | *
8 | * @param messagesPerSec since last report
9 | * @param bytesPerSec since last report
10 | */
11 | void onReport(double messagesPerSec, double bytesPerSec);
12 | }
13 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-benchmarks/src/main/java/io/scalecube/acpoc/benchmarks/report/throughput/ThroughputReporter.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks.report.throughput;
2 |
3 | import io.scalecube.acpoc.benchmarks.Runners;
4 | import java.time.Duration;
5 | import java.util.concurrent.TimeUnit;
6 | import java.util.concurrent.atomic.LongAdder;
7 | import org.agrona.CloseHelper;
8 | import reactor.core.Disposable;
9 | import reactor.core.publisher.Flux;
10 | import reactor.core.scheduler.Schedulers;
11 |
12 | /** Tracker and listener of throughput rates. */
13 | public class ThroughputReporter implements AutoCloseable {
14 |
15 | private final long reportIntervalNs;
16 | private final ThroughputListener listener;
17 |
18 | private final Disposable disposable;
19 |
20 | private final LongAdder totalBytes = new LongAdder();
21 | private final LongAdder totalMessages = new LongAdder();
22 |
23 | private long lastTotalBytes;
24 | private long lastTotalMessages;
25 | private long lastTimestamp;
26 |
27 | private boolean warmupFinished = false;
28 |
29 | /**
30 | * Launch this test reporter.
31 | *
32 | * @param listeners throughput listeners
33 | * @return a reporter
34 | */
35 | public static ThroughputReporter launch(ThroughputListener... listeners) {
36 | return new ThroughputReporter(new CompositeThroughputListener(listeners));
37 | }
38 |
39 | /**
40 | * Create rate reporter.
41 | *
42 | * @param listener throughput listener
43 | */
44 | private ThroughputReporter(ThroughputListener listener) {
45 | Duration reportDelay =
46 | Duration.ofSeconds(
47 | Runners.warmupTime().convertTo(TimeUnit.SECONDS) * Runners.warmupIterations());
48 | Duration reportInterval = Duration.ofSeconds(Long.getLong("benchmark.report.interval", 1));
49 | this.reportIntervalNs = reportInterval.toNanos();
50 | this.listener = listener;
51 | this.disposable =
52 | Flux.interval(reportDelay, reportInterval, Schedulers.single())
53 | .subscribe(i -> this.run(), Throwable::printStackTrace);
54 | }
55 |
56 | private void run() {
57 | long currentTotalMessages = totalMessages.longValue();
58 | long currentTotalBytes = totalBytes.longValue();
59 | long currentTimestamp = System.nanoTime();
60 |
61 | long timeSpanNs = currentTimestamp - lastTimestamp;
62 | double messagesPerSec =
63 | ((currentTotalMessages - lastTotalMessages) * (double) reportIntervalNs)
64 | / (double) timeSpanNs;
65 | double bytesPerSec =
66 | ((currentTotalBytes - lastTotalBytes) * (double) reportIntervalNs) / (double) timeSpanNs;
67 |
68 | lastTotalBytes = currentTotalBytes;
69 | lastTotalMessages = currentTotalMessages;
70 | lastTimestamp = currentTimestamp;
71 |
72 | if (warmupFinished) {
73 | listener.onReport(messagesPerSec, bytesPerSec);
74 | } else {
75 | warmupFinished = true;
76 | }
77 | }
78 |
79 | /**
80 | * Notify rate reporter of number of messages and bytes received, sent, etc.
81 | *
82 | * @param messages received, sent, etc.
83 | * @param bytes received, sent, etc.
84 | */
85 | public void onMessage(final long messages, final long bytes) {
86 | totalBytes.add(bytes);
87 | totalMessages.add(messages);
88 | }
89 |
90 | @Override
91 | public void close() {
92 | disposable.dispose();
93 | CloseHelper.quietClose(listener);
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-client/pom.xml:
--------------------------------------------------------------------------------
1 |
4 | 4.0.0
5 |
6 | io.scalecube
7 | aeron-cluster-poc-parent
8 | 0.0.2-SNAPSHOT
9 |
10 | aeron-cluster-poc-client
11 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/README.md:
--------------------------------------------------------------------------------
1 | That's example of how to use aeron-cluster library to start cluster of several services and communicating with this cluster.
2 |
3 | scripts/* contains all starting samples.
4 |
5 | Running examples without VM options passed makes no sense since runners do not provide with necessary default values to start cluster.
6 |
7 | How to run:
8 |
9 | Run node-0.sh, node-1.sh, node-2.sh and observe that one of nodes becomes leader.
10 | Now we have 3 nodes of Echo service running in aeron-cluster.
11 |
12 | Then start client.sh
13 |
14 | Observe client exchanging with 'hello' messages with running cluster.
15 | Try stopping/restarting nodes and observe client anyways running.
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/pom.xml:
--------------------------------------------------------------------------------
1 |
4 | 4.0.0
5 |
6 | io.scalecube
7 | aeron-cluster-poc-parent
8 | 0.0.2-SNAPSHOT
9 |
10 | aeron-cluster-poc-examples
11 | This is a basic example of aeron cluster logic: a distributed basic counter
12 |
13 |
14 |
15 |
16 | io.aeron
17 | aeron-driver
18 |
19 |
20 | io.aeron
21 | aeron-client
22 |
23 |
24 | io.aeron
25 | aeron-agent
26 |
27 |
28 | io.aeron
29 | aeron-samples
30 |
31 |
32 | io.aeron
33 | aeron-cluster
34 |
35 |
36 |
37 | net.bytebuddy
38 | byte-buddy
39 |
40 |
41 |
42 | net.bytebuddy
43 | byte-buddy-agent
44 |
45 |
46 |
47 |
48 | io.projectreactor
49 | reactor-core
50 |
51 |
52 |
53 |
54 | org.slf4j
55 | slf4j-api
56 |
57 |
58 | org.apache.logging.log4j
59 | log4j-slf4j-impl
60 |
61 |
62 | org.apache.logging.log4j
63 | log4j-core
64 |
65 |
66 |
67 | com.lmax
68 | disruptor
69 |
70 |
71 |
72 | org.hdrhistogram
73 | HdrHistogram
74 |
75 |
76 | io.scalecube
77 | trace-reporter
78 |
79 |
80 |
81 |
82 |
83 |
84 | maven-jar-plugin
85 |
86 |
87 | maven-dependency-plugin
88 |
89 |
90 |
91 |
92 |
93 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/backup-node.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=localhost:8014" \
13 | -Daeron.archive.control.stream.id=100 \
14 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=localhost:8024" \
15 | -Daeron.archive.control.response.stream.id=100 \
16 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
17 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=localhost:8034" \
18 | -Daeron.cluster.member.id=4 \
19 | -Daeron.cluster.member.status.channel="aeron:udp?term-length=64k|endpoint=20224" \
20 | -Daeron.cluster.members="4,localhost:20114,localhost:20224,localhost:20334,localhost:20444,localhost:8014" \
21 | -Daeron.cluster.members.status.endpoints="localhost:20220,localhost:20221,localhost:20222" \
22 | -Dio.scalecube.acpoc.instanceId=n4 \
23 | -Dio.scalecube.acpoc.cleanStart=false \
24 | -Dio.scalecube.acpoc.cleanShutdown=false \
25 | ${JVM_OPTS} io.scalecube.acpoc.ClusterBackupRunner
26 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/benchmarks/client-ping.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -XX:+UnlockDiagnosticVMOptions \
13 | -XX:GuaranteedSafepointInterval=300000 \
14 | -Daeron.dir=/dev/shm/aeron-client-ping-0 \
15 | -Daeron.threading.mode=SHARED \
16 | -Dagrona.disable.bounds.checks=true \
17 | -Daeron.mtu.length=16k \
18 | -Daeron.cluster.member.endpoints="0=localhost:20110,1=localhost:20111,2=localhost:20112" \
19 | -Dio.scalecube.acpoc.messageLength=256 \
20 | -Dio.scalecube.acpoc.request=16 \
21 | ${JVM_OPTS} io.scalecube.acpoc.benchmarks.ClusterClientPing
22 |
23 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/benchmarks/client-to-single-node-ping.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -XX:+UnlockExperimentalVMOptions \
13 | -XX:+TrustFinalNonStaticFields \
14 | -XX:+UnlockDiagnosticVMOptions \
15 | -XX:GuaranteedSafepointInterval=300000 \
16 | -XX:BiasedLockingStartupDelay=0 \
17 | -XX:+UseParallelOldGC \
18 | -Daeron.term.buffer.sparse.file=false \
19 | -Daeron.socket.so_sndbuf=2m \
20 | -Daeron.socket.so_rcvbuf=2m \
21 | -Daeron.rcv.initial.window.length=2m \
22 | -Daeron.dir=/dev/shm/aeron-client-single-ping-0 \
23 | -Daeron.threading.mode=DEDICATED \
24 | -Daeron.shared.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy \
25 | -Daeron.sharednetwork.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy \
26 | -Daeron.sender.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy \
27 | -Daeron.receiver.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy \
28 | -Daeron.conductor.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy \
29 | -Dagrona.disable.bounds.checks=true \
30 | -Daeron.mtu.length=8k \
31 | -Daeron.cluster.member.endpoints="0=localhost:20110" \
32 | -Dio.scalecube.acpoc.messages=10000000 \
33 | -Dio.scalecube.acpoc.messageLength=256 \
34 | -Dio.scalecube.acpoc.request=1 \
35 | -Dio.scalecube.acpoc.cleanStart=true \
36 | -Dio.scalecube.acpoc.cleanShutdown=true \
37 | ${JVM_OPTS} io.scalecube.acpoc.benchmarks.ClusterClientPing
38 |
39 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/benchmarks/node-pong-0.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | export logLevel=ERROR
11 |
12 | java \
13 | -cp target/${JAR_FILE}:target/lib/* \
14 | -XX:+UnlockDiagnosticVMOptions \
15 | -XX:GuaranteedSafepointInterval=300000 \
16 | -Daeron.dir=/dev/shm/aeron-pong-0 \
17 | -Daeron.threading.mode=SHARED \
18 | -Daeron.archive.threading.mode=SHARED \
19 | -Dagrona.disable.bounds.checks=true \
20 | -Daeron.mtu.length=8k \
21 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=localhost:8010" \
22 | -Daeron.archive.control.stream.id="100" \
23 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=localhost:8020" \
24 | -Daeron.archive.control.response.stream.id="100" \
25 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=localhost:8030" \
26 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
27 | -Daeron.cluster.member.id="0" \
28 | -Daeron.cluster.members="0,localhost:20110,localhost:20220,localhost:20330,localhost:20440,localhost:8010|1,localhost:20111,localhost:20221,localhost:20331,localhost:20441,localhost:8011|2,localhost:20112,localhost:20222,localhost:20332,localhost:20442,localhost:8012" \
29 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
30 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=localhost:20550" \
31 | -Dio.scalecube.acpoc.instanceId=n0 \
32 | -Dio.scalecube.acpoc.cleanStart=true \
33 | -Dio.scalecube.acpoc.cleanShutdown=true \
34 | ${JVM_OPTS} io.scalecube.acpoc.benchmarks.ClusteredServiceRunner
35 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/benchmarks/node-pong-1.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | export logLevel=ERROR
11 |
12 | java \
13 | -cp target/${JAR_FILE}:target/lib/* \
14 | -XX:+UnlockDiagnosticVMOptions \
15 | -XX:GuaranteedSafepointInterval=300000 \
16 | -Dagrona.disable.bounds.checks=true \
17 | -Daeron.dir=/dev/shm/aeron-pong-1 \
18 | -Daeron.threading.mode=SHARED \
19 | -Daeron.archive.threading.mode=SHARED \
20 | -Daeron.mtu.length=8k \
21 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=localhost:8011" \
22 | -Daeron.archive.control.stream.id="100" \
23 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=localhost:8021" \
24 | -Daeron.archive.control.response.stream.id="101" \
25 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=localhost:8031" \
26 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
27 | -Daeron.cluster.member.id="1" \
28 | -Daeron.cluster.members="0,localhost:20110,localhost:20220,localhost:20330,localhost:20440,localhost:8010|1,localhost:20111,localhost:20221,localhost:20331,localhost:20441,localhost:8011|2,localhost:20112,localhost:20222,localhost:20332,localhost:20442,localhost:8012" \
29 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
30 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=localhost:20551" \
31 | -Dio.scalecube.acpoc.instanceId=n1 \
32 | -Dio.scalecube.acpoc.cleanStart=true \
33 | -Dio.scalecube.acpoc.cleanShutdown=true \
34 | ${JVM_OPTS} io.scalecube.acpoc.benchmarks.ClusteredServiceRunner
35 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/benchmarks/node-pong-2.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | export logLevel=ERROR
11 |
12 | java \
13 | -cp target/${JAR_FILE}:target/lib/* \
14 | -XX:+UnlockDiagnosticVMOptions \
15 | -XX:GuaranteedSafepointInterval=300000 \
16 | -Dagrona.disable.bounds.checks=true \
17 | -Daeron.dir=/dev/shm/aeron-pong-2 \
18 | -Daeron.threading.mode=SHARED \
19 | -Daeron.archive.threading.mode=SHARED \
20 | -Daeron.mtu.length=8k \
21 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=localhost:8012" \
22 | -Daeron.archive.control.stream.id="100" \
23 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=localhost:8022" \
24 | -Daeron.archive.control.response.stream.id="102" \
25 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=localhost:8032" \
26 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
27 | -Daeron.cluster.member.id="2" \
28 | -Daeron.cluster.members="0,localhost:20110,localhost:20220,localhost:20330,localhost:20440,localhost:8010|1,localhost:20111,localhost:20221,localhost:20331,localhost:20441,localhost:8011|2,localhost:20112,localhost:20222,localhost:20332,localhost:20442,localhost:8012" \
29 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
30 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=localhost:20552" \
31 | -Dio.scalecube.acpoc.instanceId=n2 \
32 | -Dio.scalecube.acpoc.cleanStart=true \
33 | -Dio.scalecube.acpoc.cleanShutdown=true \
34 | ${JVM_OPTS} io.scalecube.acpoc.benchmarks.ClusteredServiceRunner
35 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/benchmarks/single-node-pong.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | export logLevel=ERROR
11 |
12 | java \
13 | -cp target/${JAR_FILE}:target/lib/* \
14 | -XX:+UnlockExperimentalVMOptions \
15 | -XX:+TrustFinalNonStaticFields \
16 | -XX:+UnlockDiagnosticVMOptions \
17 | -XX:GuaranteedSafepointInterval=300000 \
18 | -XX:BiasedLockingStartupDelay=0 \
19 | -XX:+UseParallelOldGC \
20 | -Daeron.term.buffer.sparse.file=false \
21 | -Daeron.socket.so_sndbuf=2m \
22 | -Daeron.socket.so_rcvbuf=2m \
23 | -Daeron.rcv.initial.window.length=2m \
24 | -Daeron.dir=/dev/shm/aeron-single-pong-0 \
25 | -Daeron.threading.mode=DEDICATED \
26 | -Daeron.archive.threading.mode=DEDICATED \
27 | -Daeron.shared.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy \
28 | -Daeron.sharednetwork.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy \
29 | -Daeron.sender.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy \
30 | -Daeron.receiver.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy \
31 | -Daeron.conductor.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy \
32 | -Daeron.archive.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy \
33 | -Daeron.archive.recorder.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy \
34 | -Daeron.archive.replayer.idle.strategy=org.agrona.concurrent.BusySpinIdleStrategy \
35 | -Daeron.archive.file.sync.level=0 \
36 | -Daeron.archive.segment.file.length=1g \
37 | -Daeron.archive.control.mtu.length=4k \
38 | -Daeron.spies.simulate.connection=true \
39 | -Dagrona.disable.bounds.checks=true \
40 | -Daeron.mtu.length=8k \
41 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=localhost:8010" \
42 | -Daeron.archive.control.stream.id="100" \
43 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=localhost:8020" \
44 | -Daeron.archive.control.response.stream.id="100" \
45 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=localhost:8030" \
46 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
47 | -Daeron.cluster.member.id="0" \
48 | -Daeron.cluster.members="0,localhost:20110,localhost:20220,localhost:20330,localhost:20440,localhost:8010" \
49 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
50 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=localhost:20550" \
51 | -Dio.scalecube.acpoc.instanceId=n0 \
52 | -Dio.scalecube.acpoc.cleanStart=true \
53 | -Dio.scalecube.acpoc.cleanShutdown=true \
54 | ${JVM_OPTS} io.scalecube.acpoc.benchmarks.ClusteredServiceRunner
55 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/client-interactive.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Daeron.dir=/dev/shm/aeron-client-interactive-0 \
13 | -Daeron.threading.mode=SHARED \
14 | -Daeron.cluster.member.endpoints="0=localhost:20110,1=localhost:20111,2=localhost:20112" \
15 | ${JVM_OPTS} io.scalecube.acpoc.InteractiveClient
16 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/client.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Daeron.dir=/dev/shm/aeron-client-0 \
13 | -Daeron.threading.mode=SHARED \
14 | -Daeron.cluster.member.endpoints="0=localhost:20110,1=localhost:20111,2=localhost:20112" \
15 | ${JVM_OPTS} io.scalecube.acpoc.ClusterClientRunner
16 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/d-node-3.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Daeron.dir=/dev/shm/aeron-d3 \
13 | -Dio.scalecube.acpoc.instanceId=d3 \
14 | -Daeron.threading.mode=SHARED \
15 | -Daeron.archive.threading.mode=SHARED \
16 | -Daeron.cluster.member.id="-1" \
17 | -Daeron.cluster.members="" \
18 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=localhost:8013" \
19 | -Daeron.archive.control.stream.id="100" \
20 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=localhost:8023" \
21 | -Daeron.archive.control.response.stream.id="113" \
22 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=localhost:8033" \
23 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
24 | -Daeron.cluster.member.endpoints="localhost:20113,localhost:20223,localhost:20333,localhost:20443,localhost:8013" \
25 | -Daeron.cluster.members.status.endpoints="localhost:20220" \
26 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
27 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=localhost:20553" \
28 | -Dio.scalecube.acpoc.cleanStart=true \
29 | -Dio.scalecube.acpoc.cleanShutdown=true \
30 | ${JVM_OPTS} io.scalecube.acpoc.ClusteredServiceRunner
31 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/d-node-4.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Daeron.dir=/dev/shm/aeron-d4 \
13 | -Dio.scalecube.acpoc.instanceId=d4 \
14 | -Daeron.threading.mode=SHARED \
15 | -Daeron.archive.threading.mode=SHARED \
16 | -Daeron.cluster.member.id="-1" \
17 | -Daeron.cluster.members="" \
18 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=localhost:8014" \
19 | -Daeron.archive.control.stream.id="100" \
20 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=localhost:8024" \
21 | -Daeron.archive.control.response.stream.id="114" \
22 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=localhost:8034" \
23 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
24 | -Daeron.cluster.member.endpoints="localhost:20114,localhost:20224,localhost:20334,localhost:20444,localhost:8014" \
25 | -Daeron.cluster.members.status.endpoints="localhost:20220" \
26 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
27 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=localhost:20554" \
28 | -Dio.scalecube.acpoc.cleanStart=true \
29 | -Dio.scalecube.acpoc.cleanShutdown=true \
30 | ${JVM_OPTS} io.scalecube.acpoc.ClusteredServiceRunner
31 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/d-node-5.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Daeron.dir=/dev/shm/aeron-d5 \
13 | -Daeron.threading.mode=SHARED \
14 | -Daeron.archive.threading.mode=SHARED \
15 | -Daeron.cluster.member.id="-1" \
16 | -Daeron.cluster.members="" \
17 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=localhost:8015" \
18 | -Daeron.archive.control.stream.id="100" \
19 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=localhost:8025" \
20 | -Daeron.archive.control.response.stream.id="115" \
21 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=localhost:8035" \
22 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
23 | -Daeron.cluster.member.endpoints="localhost:20115,localhost:20225,localhost:20335,localhost:20445,localhost:8015" \
24 | -Daeron.cluster.members.status.endpoints="localhost:20223" \
25 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
26 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=localhost:20555" \
27 | -Dio.scalecube.acpoc.cleanStart=true \
28 | -Dio.scalecube.acpoc.cleanShutdown=true \
29 | ${JVM_OPTS} io.scalecube.acpoc.ClusteredServiceRunner
30 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/docker/client-interactive.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Daeron.cluster.member.endpoints="0=node0:20110,1=node1:20111,2=node2:20112" \
13 | -Daeron.cluster.egress.channel="aeron:udp?endpoint=node${NUMBER}:9020" \
14 | ${JVM_OPTS} io.scalecube.acpoc.InteractiveClient
15 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/docker/keep-alive-client-interactive.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Daeron.cluster.member.endpoints="0=node0:20110,1=node1:20111,2=node2:20112" \
13 | -Daeron.cluster.egress.channel="aeron:udp?endpoint=node${NUMBER}:9020" \
14 | -Daeron.client.liveness.timeout=200000000000 \
15 | -Daeron.publication.unblock.timeout=300000000000 \
16 | ${JVM_OPTS} io.scalecube.acpoc.InteractiveClient
17 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/docker/node-0.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Dnetworkaddress.cache.ttl=0 \
13 | -Dnetworkaddress.cache.negative.ttl=0 \
14 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=node0:8010" \
15 | -Daeron.archive.control.stream.id="100" \
16 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=node0:8020" \
17 | -Daeron.archive.control.response.stream.id="110" \
18 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=node0:8030" \
19 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
20 | -Daeron.cluster.member.id="0" \
21 | -Daeron.cluster.members="0,node0:20110,node0:20220,node0:20330,node0:20440,node0:8010
22 | |1,node1:20111,node1:20221,node1:20331,node1:20441,node1:8011
23 | |2,node2:20112,node2:20222,node2:20332,node2:20442,node2:8012" \
24 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
25 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=node0:20550" \
26 | -Dio.scalecube.acpoc.instanceId=n0 \
27 | -Dio.scalecube.acpoc.cleanStart=false \
28 | -Dio.scalecube.acpoc.cleanShutdown=false \
29 | -Dio.scalecube.acpoc.snapshotPeriodSecs=99999 \
30 | ${JVM_OPTS} io.scalecube.acpoc.ClusteredServiceRunner
31 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/docker/node-1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Dnetworkaddress.cache.ttl=0 \
13 | -Dnetworkaddress.cache.negative.ttl=0 \
14 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=node1:8011" \
15 | -Daeron.archive.control.stream.id="100" \
16 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=node1:8021" \
17 | -Daeron.archive.control.response.stream.id="111" \
18 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=node1:8031" \
19 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
20 | -Daeron.cluster.member.id="1" \
21 | -Daeron.cluster.members="0,node0:20110,node0:20220,node0:20330,node0:20440,node0:8010
22 | |1,node1:20111,node1:20221,node1:20331,node1:20441,node1:8011
23 | |2,node2:20112,node2:20222,node2:20332,node2:20442,node2:8012" \
24 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
25 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=node1:20551" \
26 | -Dio.scalecube.acpoc.instanceId=n1 \
27 | -Dio.scalecube.acpoc.cleanStart=false \
28 | -Dio.scalecube.acpoc.cleanShutdown=false \
29 | -Dio.scalecube.acpoc.snapshotPeriodSecs=99999 \
30 | ${JVM_OPTS} io.scalecube.acpoc.ClusteredServiceRunner
31 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/docker/node-2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Dnetworkaddress.cache.ttl=0 \
13 | -Dnetworkaddress.cache.negative.ttl=0 \
14 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=node2:8012" \
15 | -Daeron.archive.control.stream.id="100" \
16 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=node2:8022" \
17 | -Daeron.archive.control.response.stream.id="112" \
18 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=node2:8032" \
19 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
20 | -Daeron.cluster.member.id="2" \
21 | -Daeron.cluster.members="0,node0:20110,node0:20220,node0:20330,node0:20440,node0:8010
22 | |1,node1:20111,node1:20221,node1:20331,node1:20441,node1:8011
23 | |2,node2:20112,node2:20222,node2:20332,node2:20442,node2:8012" \
24 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
25 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=node2:20552" \
26 | -Dio.scalecube.acpoc.instanceId=n2 \
27 | -Dio.scalecube.acpoc.cleanStart=false \
28 | -Dio.scalecube.acpoc.cleanShutdown=false \
29 | -Dio.scalecube.acpoc.snapshotPeriodSecs=99999 \
30 | ${JVM_OPTS} io.scalecube.acpoc.ClusteredServiceRunner
31 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/node-0-single.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=localhost:8010" \
13 | -Daeron.archive.control.stream.id="100" \
14 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=localhost:8020" \
15 | -Daeron.archive.control.response.stream.id="100" \
16 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=localhost:8030" \
17 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
18 | -Daeron.cluster.member.id="0" \
19 | -Daeron.cluster.members="0,localhost:20110,localhost:20220,localhost:20330,localhost:20440,localhost:8010" \
20 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
21 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=localhost:20550" \
22 | -Daeron.dir=/dev/shm/aeron-n0-single \
23 | -Daeron.threading.mode=SHARED \
24 | -Daeron.archive.threading.mode=SHARED \
25 | -Dio.scalecube.acpoc.instanceId=n0-single \
26 | -Dio.scalecube.acpoc.cleanStart=false \
27 | -Dio.scalecube.acpoc.cleanShutdown=false \
28 | -Dio.scalecube.acpoc.snapshotPeriodSecs=99999 \
29 | -Daeron.cluster.session.timeout=30000000000 \
30 | -Daeron.cluster.leader.heartbeat.timeout=2000000000 \
31 | ${JVM_OPTS} io.scalecube.acpoc.ClusteredServiceRunner
32 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/node-0.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=localhost:8010" \
13 | -Daeron.archive.control.stream.id="100" \
14 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=localhost:8020" \
15 | -Daeron.archive.control.response.stream.id="100" \
16 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=localhost:8030" \
17 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
18 | -Daeron.cluster.member.id="0" \
19 | -Daeron.cluster.members="0,localhost:20110,localhost:20220,localhost:20330,localhost:20440,localhost:8010|1,localhost:20111,localhost:20221,localhost:20331,localhost:20441,localhost:8011|2,localhost:20112,localhost:20222,localhost:20332,localhost:20442,localhost:8012" \
20 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
21 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=localhost:20550" \
22 | -Daeron.dir=/dev/shm/aeron-n0 \
23 | -Daeron.threading.mode=SHARED \
24 | -Daeron.archive.threading.mode=SHARED \
25 | -Dio.scalecube.acpoc.instanceId=n0 \
26 | -Dio.scalecube.acpoc.cleanStart=false \
27 | -Dio.scalecube.acpoc.cleanShutdown=false \
28 | -Dio.scalecube.acpoc.snapshotPeriodSecs=99999 \
29 | -Daeron.cluster.session.timeout=30000000000 \
30 | -Daeron.cluster.leader.heartbeat.timeout=2000000000 \
31 | ${JVM_OPTS} io.scalecube.acpoc.ClusteredServiceRunner
32 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/node-1.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=localhost:8011" \
13 | -Daeron.archive.control.stream.id="100" \
14 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=localhost:8021" \
15 | -Daeron.archive.control.response.stream.id="101" \
16 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=localhost:8031" \
17 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
18 | -Daeron.cluster.member.id="1" \
19 | -Daeron.cluster.members="0,localhost:20110,localhost:20220,localhost:20330,localhost:20440,localhost:8010|1,localhost:20111,localhost:20221,localhost:20331,localhost:20441,localhost:8011|2,localhost:20112,localhost:20222,localhost:20332,localhost:20442,localhost:8012" \
20 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
21 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=localhost:20551" \
22 | -Daeron.dir=/dev/shm/aeron-n1 \
23 | -Daeron.threading.mode=SHARED \
24 | -Daeron.archive.threading.mode=SHARED \
25 | -Dio.scalecube.acpoc.instanceId=n1 \
26 | -Dio.scalecube.acpoc.cleanStart=false \
27 | -Dio.scalecube.acpoc.cleanShutdown=false \
28 | -Dio.scalecube.acpoc.snapshotPeriodSecs=99999 \
29 | -Daeron.cluster.session.timeout=30000000000 \
30 | -Daeron.cluster.leader.heartbeat.timeout=2000000000 \
31 | ${JVM_OPTS} io.scalecube.acpoc.ClusteredServiceRunner
32 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/scripts/node-2.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)
4 | cd ../
5 |
6 | JAR_FILE=$(ls target |grep jar)
7 |
8 | echo $JAR_FILE
9 |
10 | java \
11 | -cp target/${JAR_FILE}:target/lib/* \
12 | -Daeron.archive.control.channel="aeron:udp?term-length=64k|endpoint=localhost:8012" \
13 | -Daeron.archive.control.stream.id="100" \
14 | -Daeron.archive.control.response.channel="aeron:udp?term-length=64k|endpoint=localhost:8022" \
15 | -Daeron.archive.control.response.stream.id="102" \
16 | -Daeron.archive.recording.events.channel="aeron:udp?control-mode=dynamic|control=localhost:8032" \
17 | -Daeron.archive.local.control.channel="aeron:ipc?term-length=64k" \
18 | -Daeron.cluster.member.id="2" \
19 | -Daeron.cluster.members="0,localhost:20110,localhost:20220,localhost:20330,localhost:20440,localhost:8010|1,localhost:20111,localhost:20221,localhost:20331,localhost:20441,localhost:8011|2,localhost:20112,localhost:20222,localhost:20332,localhost:20442,localhost:8012" \
20 | -Daeron.cluster.ingress.channel="aeron:udp?term-length=64k" \
21 | -Daeron.cluster.log.channel="aeron:udp?term-length=256k|control-mode=manual|control=localhost:20552" \
22 | -Daeron.dir=/dev/shm/aeron-n2 \
23 | -Daeron.threading.mode=SHARED \
24 | -Daeron.archive.threading.mode=SHARED \
25 | -Dio.scalecube.acpoc.instanceId=n2 \
26 | -Dio.scalecube.acpoc.cleanStart=false \
27 | -Dio.scalecube.acpoc.cleanShutdown=false \
28 | -Dio.scalecube.acpoc.snapshotPeriodSecs=99999 \
29 | -Daeron.cluster.session.timeout=30000000000 \
30 | -Daeron.cluster.leader.heartbeat.timeout=2000000000 \
31 | ${JVM_OPTS} io.scalecube.acpoc.ClusteredServiceRunner
32 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/ArgsPrinter.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc;
2 |
3 | public class ArgsPrinter {
4 |
5 | /**
6 | * Main program runner.
7 | *
8 | * @param args arguments
9 | */
10 | public static void main(String[] args) {
11 | int staticNodesCnt = 3;
12 | int dynamicNodesCount = 2;
13 |
14 | String[] endpoints = clusterMembersEndpoints(staticNodesCnt + dynamicNodesCount);
15 |
16 | System.out.println("******* Dynamic nodes *********");
17 | System.out.println("aeron.cluster.members=");
18 |
19 | for (int i = staticNodesCnt; i < staticNodesCnt + dynamicNodesCount; i++) {
20 | System.out.println("DynamicNode[" + i + "]");
21 | System.out.println("aeron.cluster.nodes=\"" + endpoints[i] + "\"");
22 | }
23 | System.out.println("Cluster members status endpoints");
24 | System.out.println(
25 | "aeron.cluster.members.status.endpoints=\"" + clusterMembersStatusEndpoints(3) + "\"");
26 |
27 | System.out.println("******* Static nodes *********");
28 | System.out.println("aeron.cluster.members=\"" + clusterMembersString(staticNodesCnt) + "\"");
29 | }
30 |
31 | private static String[] clusterMembersEndpoints(final int maxMemberCount) {
32 | final String[] clusterMembersEndpoints = new String[maxMemberCount];
33 |
34 | for (int i = 0; i < maxMemberCount; i++) {
35 | clusterMembersEndpoints[i] =
36 | "localhost:2011"
37 | + i
38 | + ','
39 | + "localhost:2022"
40 | + i
41 | + ','
42 | + "localhost:2033"
43 | + i
44 | + ','
45 | + "localhost:2044"
46 | + i
47 | + ','
48 | + "localhost:801"
49 | + i;
50 | }
51 |
52 | return clusterMembersEndpoints;
53 | }
54 |
55 | private static String clusterMembersString(final int memberCount) {
56 | final StringBuilder builder = new StringBuilder();
57 |
58 | for (int i = 0; i < memberCount; i++) {
59 | builder
60 | .append(i)
61 | .append(',')
62 | .append("localhost:2011")
63 | .append(i)
64 | .append(',')
65 | .append("localhost:2022")
66 | .append(i)
67 | .append(',')
68 | .append("localhost:2033")
69 | .append(i)
70 | .append(',')
71 | .append("localhost:2044")
72 | .append(i)
73 | .append(',')
74 | .append("localhost:801")
75 | .append(i)
76 | .append('|');
77 | }
78 |
79 | builder.setLength(builder.length() - 1);
80 |
81 | return builder.toString();
82 | }
83 |
84 | private static String clusterMembersStatusEndpoints(final int staticMemberCount) {
85 | final StringBuilder builder = new StringBuilder();
86 |
87 | for (int i = 0; i < staticMemberCount; i++) {
88 | builder.append("localhost:2022").append(i).append(',');
89 | }
90 |
91 | builder.setLength(builder.length() - 1);
92 |
93 | return builder.toString();
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/ClusterBackupEventsListenerImpl.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc;
2 |
3 | import io.aeron.cluster.ClusterBackupEventsListener;
4 | import io.aeron.cluster.ClusterMember;
5 | import io.aeron.cluster.RecordingLog;
6 | import io.aeron.cluster.RecordingLog.Snapshot;
7 | import java.util.List;
8 | import org.slf4j.Logger;
9 | import org.slf4j.LoggerFactory;
10 |
11 | final class ClusterBackupEventsListenerImpl implements ClusterBackupEventsListener {
12 |
13 | private static final Logger LOGGER =
14 | LoggerFactory.getLogger(ClusterBackupEventsListenerImpl.class);
15 |
16 | @Override
17 | public void onBackupQuery() {
18 | LOGGER.info("[onBackupQuery]");
19 | }
20 |
21 | @Override
22 | public void onPossibleClusterFailure() {
23 | LOGGER.info("[onPossibleClusterFailure]");
24 | }
25 |
26 | @Override
27 | public void onBackupResponse(
28 | ClusterMember[] clusterMembers,
29 | ClusterMember leaderMember,
30 | List snapshotsToRetrieve) {
31 | LOGGER.info(
32 | "[onBackupResponse] clusterMembers: {}, leader: {}, snapshotsToRetrieve: {}",
33 | clusterMembers,
34 | leaderMember,
35 | snapshotsToRetrieve);
36 | }
37 |
38 | @Override
39 | public void onUpdatedRecordingLog(RecordingLog recordingLog, List snapshotsRetrieved) {
40 | LOGGER.info(
41 | "[onUpdatedRecordingLog] recordingLog: {}, snapshotsRetrieved: {}",
42 | recordingLog,
43 | snapshotsRetrieved);
44 | }
45 |
46 | @Override
47 | public void onLiveLogProgress(long recordingId, long recordingPosCounterId, long logPosition) {
48 | LOGGER.info(
49 | "[onLiveLogProgress] recordingId: {}, recordingPosCounterId: {}, logPosition: {}",
50 | recordingId,
51 | recordingPosCounterId,
52 | logPosition);
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/ClusterBackupRunner.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc;
2 |
3 | import static io.scalecube.acpoc.Configurations.CLEAN_SHUTDOWN;
4 | import static io.scalecube.acpoc.Configurations.CLEAN_START;
5 |
6 | import io.aeron.archive.Archive;
7 | import io.aeron.archive.ArchiveThreadingMode;
8 | import io.aeron.archive.client.AeronArchive;
9 | import io.aeron.cluster.ClusterBackup;
10 | import io.aeron.cluster.ClusterBackupMediaDriver;
11 | import io.aeron.driver.DefaultAllowTerminationValidator;
12 | import io.aeron.driver.MediaDriver;
13 | import io.aeron.driver.MinMulticastFlowControlSupplier;
14 | import io.aeron.driver.ThreadingMode;
15 | import java.io.File;
16 | import java.nio.file.Paths;
17 | import org.agrona.CloseHelper;
18 | import org.agrona.IoUtil;
19 | import org.slf4j.Logger;
20 | import org.slf4j.LoggerFactory;
21 | import reactor.core.publisher.Mono;
22 |
23 | public class ClusterBackupRunner {
24 |
25 | private static final Logger LOGGER = LoggerFactory.getLogger(ClusterBackupRunner.class);
26 |
27 | /**
28 | * Main function runner.
29 | *
30 | * @param args arguments
31 | */
32 | public static void main(String[] args) {
33 | String nodeId = "cluster-backup-" + Utils.instanceId();
34 | String nodeDirName = Paths.get("target", "aeron", "cluster", nodeId).toString();
35 |
36 | if (CLEAN_START) {
37 | IoUtil.delete(new File(nodeDirName), true);
38 | }
39 |
40 | LOGGER.info("Cluster node directory: " + nodeDirName);
41 |
42 | String aeronDirectoryName = Paths.get(nodeDirName, "media").toString();
43 |
44 | MediaDriver.Context mediaDriverContext =
45 | new MediaDriver.Context()
46 | .aeronDirectoryName(aeronDirectoryName)
47 | .dirDeleteOnStart(true)
48 | .errorHandler(ex -> LOGGER.error("Exception occurred at MediaDriver: ", ex))
49 | .multicastFlowControlSupplier(new MinMulticastFlowControlSupplier())
50 | .terminationHook(() -> LOGGER.info("TerminationHook called on MediaDriver "))
51 | .terminationValidator(new DefaultAllowTerminationValidator())
52 | .threadingMode(ThreadingMode.SHARED)
53 | .warnIfDirectoryExists(true);
54 |
55 | AeronArchive.Context aeronArchiveContext =
56 | new AeronArchive.Context().aeronDirectoryName(aeronDirectoryName);
57 |
58 | Archive.Context archiveContext =
59 | new Archive.Context()
60 | .aeronDirectoryName(aeronDirectoryName)
61 | .archiveDir(new File(nodeDirName, "archive"))
62 | .controlChannel(aeronArchiveContext.controlRequestChannel())
63 | .controlStreamId(aeronArchiveContext.controlRequestStreamId())
64 | .errorHandler(ex -> LOGGER.error("Exception occurred at Archive: ", ex))
65 | .localControlStreamId(aeronArchiveContext.controlRequestStreamId())
66 | .maxCatalogEntries(Configurations.MAX_CATALOG_ENTRIES)
67 | .recordingEventsChannel(aeronArchiveContext.recordingEventsChannel())
68 | .threadingMode(ArchiveThreadingMode.SHARED);
69 |
70 | ClusterBackup.Context clusterBackupContext =
71 | new ClusterBackup.Context()
72 | .aeronDirectoryName(aeronDirectoryName)
73 | .archiveContext(aeronArchiveContext.clone())
74 | .clusterDir(new File(nodeDirName, "cluster-backup"))
75 | .eventsListener(new ClusterBackupEventsListenerImpl());
76 |
77 | ClusterBackupMediaDriver clusterBackupMediaDriver =
78 | ClusterBackupMediaDriver.launch(mediaDriverContext, archiveContext, clusterBackupContext);
79 |
80 | Mono onShutdown =
81 | Utils.onShutdown(
82 | () -> {
83 | CloseHelper.close(clusterBackupMediaDriver);
84 | if (CLEAN_SHUTDOWN) {
85 | IoUtil.delete(new File(nodeDirName), true);
86 | }
87 | return null;
88 | });
89 | onShutdown.block();
90 | }
91 | }
92 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/ClusterClientRunner.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc;
2 |
3 | import io.aeron.cluster.client.AeronCluster;
4 | import io.aeron.driver.DefaultAllowTerminationValidator;
5 | import io.aeron.driver.MediaDriver;
6 | import java.nio.charset.StandardCharsets;
7 | import java.time.Duration;
8 | import org.agrona.CloseHelper;
9 | import org.agrona.concurrent.UnsafeBuffer;
10 | import org.slf4j.Logger;
11 | import org.slf4j.LoggerFactory;
12 | import reactor.core.Disposable;
13 | import reactor.core.publisher.Flux;
14 | import reactor.core.publisher.Mono;
15 |
16 | /** Runner to start the cluster client that continuously sends requests to cluster. */
17 | public class ClusterClientRunner {
18 |
19 | public static final Logger logger = LoggerFactory.getLogger(ClusterClientRunner.class);
20 |
21 | /**
22 | * Main method.
23 | *
24 | * @param args program arguments.
25 | */
26 | public static void main(String[] args) {
27 | MediaDriver clientMediaDriver =
28 | MediaDriver.launch(
29 | new MediaDriver.Context()
30 | .errorHandler(ex -> logger.error("Exception occurred at MediaDriver: ", ex))
31 | .terminationHook(() -> logger.info("TerminationHook called on MediaDriver "))
32 | .terminationValidator(new DefaultAllowTerminationValidator())
33 | .warnIfDirectoryExists(true)
34 | .dirDeleteOnStart(true)
35 | .dirDeleteOnShutdown(true));
36 |
37 | AeronCluster client =
38 | AeronCluster.connect(
39 | new AeronCluster.Context()
40 | .errorHandler(ex -> logger.error("Exception occurred at AeronCluster: ", ex))
41 | .egressListener(new EgressListenerImpl())
42 | .aeronDirectoryName(clientMediaDriver.aeronDirectoryName())
43 | .ingressChannel("aeron:udp"));
44 |
45 | Disposable sender =
46 | Flux.interval(Duration.ofSeconds(1))
47 | .subscribe(
48 | i -> {
49 | String request = "Hello to cluster " + i;
50 |
51 | byte[] bytes = request.getBytes(StandardCharsets.UTF_8);
52 | UnsafeBuffer buffer = new UnsafeBuffer(bytes);
53 | long l = client.offer(buffer, 0, bytes.length);
54 |
55 | logger.info("Client: REQUEST {} send, result={}", i, l);
56 | });
57 |
58 | Disposable receiver =
59 | Flux.interval(Duration.ofMillis(100)) //
60 | .subscribe(i -> client.pollEgress());
61 |
62 | Mono onShutdown =
63 | Utils.onShutdown(
64 | () -> {
65 | sender.dispose();
66 | receiver.dispose();
67 | CloseHelper.close(client);
68 | CloseHelper.close(clientMediaDriver);
69 | return null;
70 | });
71 | onShutdown.block();
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/ClusteredServiceImpl.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc;
2 |
3 | import io.aeron.ExclusivePublication;
4 | import io.aeron.Image;
5 | import io.aeron.cluster.ClusterControl;
6 | import io.aeron.cluster.ClusterControl.ToggleState;
7 | import io.aeron.cluster.codecs.CloseReason;
8 | import io.aeron.cluster.service.ClientSession;
9 | import io.aeron.cluster.service.Cluster;
10 | import io.aeron.cluster.service.Cluster.Role;
11 | import io.aeron.cluster.service.ClusteredService;
12 | import io.aeron.logbuffer.FragmentHandler;
13 | import io.aeron.logbuffer.Header;
14 | import java.util.Date;
15 | import java.util.concurrent.atomic.AtomicInteger;
16 | import org.agrona.DirectBuffer;
17 | import org.agrona.concurrent.UnsafeBuffer;
18 | import org.agrona.concurrent.status.AtomicCounter;
19 | import org.agrona.concurrent.status.CountersManager;
20 | import org.slf4j.Logger;
21 | import org.slf4j.LoggerFactory;
22 |
23 | public class ClusteredServiceImpl implements ClusteredService {
24 |
25 | private static final Logger logger = LoggerFactory.getLogger(ClusteredServiceImpl.class);
26 |
27 | public static final String TIMER_COMMAND = "SCHEDULE_TIMER";
28 | public static final String SNAPSHOT_COMMAND = "SNAPSHOT";
29 |
30 | private final CountersManager countersManager;
31 |
32 | private Cluster cluster;
33 |
34 | // State
35 |
36 | private final AtomicInteger serviceCounter = new AtomicInteger();
37 | private long timeIdCounter;
38 |
39 | public ClusteredServiceImpl(CountersManager countersManager) {
40 | this.countersManager = countersManager;
41 | }
42 |
43 | @Override
44 | public void onStart(Cluster cluster, Image snapshotImage) {
45 | this.cluster = cluster;
46 | logger.info(
47 | "onStart => memberId: {}, role: {}, client-sessions: {}",
48 | cluster.memberId(),
49 | cluster.role(),
50 | cluster.clientSessions().size());
51 | if (snapshotImage != null) {
52 | onLoadSnapshot(snapshotImage);
53 | }
54 | }
55 |
56 | @Override
57 | public void onSessionOpen(ClientSession session, long timestampMs) {
58 | logger.info(
59 | "onSessionOpen, timestampMs: {} => memberId: {}, sessionId: {}, "
60 | + "responseChannel: {}, responseStreamId: {}",
61 | timestampMs,
62 | cluster.memberId(),
63 | session.id(),
64 | session.responseChannel(),
65 | session.responseStreamId());
66 | }
67 |
68 | @Override
69 | public void onSessionClose(ClientSession session, long timestampMs, CloseReason closeReason) {
70 | logger.info(
71 | "onSessionClose, timestampMs: {} => memberId: {}, "
72 | + "sessionId: {}, responseChannel: {}, responseStreamId: {}, reason: {}",
73 | timestampMs,
74 | cluster.memberId(),
75 | session.id(),
76 | session.responseChannel(),
77 | session.responseStreamId(),
78 | closeReason);
79 | }
80 |
81 | @Override
82 | public void onSessionMessage(
83 | ClientSession session,
84 | long timestampMs,
85 | DirectBuffer buffer,
86 | int offset,
87 | int length,
88 | Header header) {
89 | byte[] bytes = new byte[length];
90 | buffer.getBytes(offset, bytes);
91 |
92 | String message = new String(bytes);
93 |
94 | logger.info(
95 | "### onSessionMessage, timestampMs: {} => memberId: {}, "
96 | + "sessionId: {}, position: {}, content: '{}'",
97 | new Date(timestampMs),
98 | cluster.memberId(),
99 | session,
100 | header.position(),
101 | message);
102 |
103 | // Updated service state
104 | int value = serviceCounter.incrementAndGet();
105 |
106 | if (message.toUpperCase().startsWith(TIMER_COMMAND)) {
107 | long interval = Long.parseLong(message.substring(TIMER_COMMAND.length()).trim());
108 | scheduleTimer(++timeIdCounter, interval);
109 | }
110 |
111 | if (SNAPSHOT_COMMAND.equalsIgnoreCase(message)) {
112 | AtomicCounter controlToggle = ClusterControl.findControlToggle(countersManager);
113 | toggle(controlToggle, ToggleState.SNAPSHOT);
114 | }
115 |
116 | if (session != null) {
117 | if (cluster.role() == Role.LEADER) {
118 | // Send response back
119 | String response = message + ", ClusteredService.serviceCounter(value=" + value + ")";
120 | UnsafeBuffer buffer1 = new UnsafeBuffer(response.getBytes());
121 | long l = session.offer(buffer1, 0, buffer1.capacity());
122 | if (l > 0) {
123 | logger.info("Service: RESPONSE send result={}, serviceCounter(value={})", l, value);
124 | }
125 | }
126 | }
127 | }
128 |
129 | @Override
130 | public void onTimerEvent(long correlationId, long timestampMs) {
131 | logger.info(
132 | "*** onTimerEvent timestampMs: {} => memberId: {}, correlationId: {}",
133 | new Date(timestampMs),
134 | cluster.memberId(),
135 | correlationId);
136 | }
137 |
138 | @Override
139 | public void onTakeSnapshot(ExclusivePublication snapshotPublication) {
140 | logger.info(
141 | "onTakeSnapshot => publication: memberId: {}, sessionId: {}, channel: {}, "
142 | + "streamId: {}, position: {}",
143 | cluster.memberId(),
144 | snapshotPublication.sessionId(),
145 | snapshotPublication.channel(),
146 | snapshotPublication.streamId(),
147 | snapshotPublication.position());
148 |
149 | UnsafeBuffer buffer = new UnsafeBuffer(new byte[Integer.BYTES + Long.BYTES]);
150 | int value = serviceCounter.get();
151 | buffer.putInt(0, value);
152 | buffer.putLong(Integer.BYTES, timeIdCounter);
153 | long offer = snapshotPublication.offer(buffer);
154 |
155 | logger.info(
156 | "onTakeSnapshot => memberId: {}, serviceCounter(value={}) snapshot taken: {}",
157 | cluster.memberId(),
158 | value,
159 | offer);
160 | }
161 |
162 | private void onLoadSnapshot(Image snapshotImage) {
163 | logger.info(
164 | "onLoadSnapshot => image: memberId: {}, sessionId: {}, channel: {}, "
165 | + "streamId: {}, position: {}",
166 | cluster.memberId(),
167 | snapshotImage.sessionId(),
168 | snapshotImage.subscription().channel(),
169 | snapshotImage.subscription().streamId(),
170 | snapshotImage.position());
171 |
172 | FragmentHandler handler =
173 | (buffer, offset, length, header) -> {
174 | serviceCounter.set(buffer.getInt(offset));
175 | timeIdCounter = buffer.getLong(offset + Integer.BYTES);
176 | };
177 |
178 | while (true) {
179 | int fragments = snapshotImage.poll(handler, 1);
180 |
181 | if (fragments == 1) {
182 | break;
183 | }
184 | cluster.idle();
185 | System.out.print(".");
186 | }
187 |
188 | logger.info(
189 | "onLoadSnapshot => memberId: {}, applied new serviceCounter(value={})",
190 | cluster.memberId(),
191 | serviceCounter.get());
192 | }
193 |
194 | @Override
195 | public void onRoleChange(Cluster.Role newRole) {
196 | logger.info(
197 | "onRoleChange => memberId: {}, new role: {}, timestampMs: {}",
198 | cluster.memberId(),
199 | newRole,
200 | new Date(cluster.time()));
201 | }
202 |
203 | @Override
204 | public void onTerminate(Cluster cluster) {
205 | logger.info(
206 | "onTerminate => memberId: {}, role: {}, client-sessions: {}",
207 | cluster.memberId(),
208 | cluster.role(),
209 | cluster.clientSessions().size());
210 | }
211 |
212 | private void toggle(AtomicCounter controlToggle, ToggleState target) {
213 | ToggleState oldToggleState = ToggleState.get(controlToggle);
214 | boolean result = target.toggle(controlToggle);
215 | ToggleState newToggleState = ToggleState.get(controlToggle);
216 | logger.info(
217 | "ToggleState changed {}: {}->{}",
218 | result ? "successfully" : "unsuccessfully",
219 | oldToggleState,
220 | newToggleState);
221 | }
222 |
223 | private void scheduleTimer(long correlationId, long offset) {
224 | final long deadlineMs = cluster.time() + offset;
225 | boolean scheduleTimer = cluster.scheduleTimer(correlationId, deadlineMs);
226 | if (scheduleTimer) {
227 | logger.info("Timer ({}) scheduled at {}", correlationId, new Date(deadlineMs));
228 | }
229 | }
230 | }
231 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/ClusteredServiceRunner.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc;
2 |
3 | import io.aeron.agent.EventLogAgent;
4 | import io.aeron.archive.Archive;
5 | import io.aeron.archive.client.AeronArchive;
6 | import io.aeron.cluster.ClusteredMediaDriver;
7 | import io.aeron.cluster.ConsensusModule;
8 | import io.aeron.cluster.ConsensusModule.Configuration;
9 | import io.aeron.cluster.service.ClusteredService;
10 | import io.aeron.cluster.service.ClusteredServiceContainer;
11 | import io.aeron.driver.DefaultAllowTerminationValidator;
12 | import io.aeron.driver.MediaDriver;
13 | import io.aeron.driver.MinMulticastFlowControlSupplier;
14 | import java.io.File;
15 | import java.nio.file.Paths;
16 | import net.bytebuddy.agent.ByteBuddyAgent;
17 | import org.agrona.CloseHelper;
18 | import org.slf4j.Logger;
19 | import org.slf4j.LoggerFactory;
20 | import reactor.core.publisher.Mono;
21 |
22 | /**
23 | * Main class that starts single node in cluster, though expecting most of cluster configuration
24 | * passed via VM args.
25 | */
26 | public class ClusteredServiceRunner {
27 |
28 | private static final Logger logger = LoggerFactory.getLogger(ClusteredServiceRunner.class);
29 |
30 | /**
31 | * Main function runner.
32 | *
33 | * @param args arguments
34 | */
35 | public static void main(String[] args) {
36 | System.setProperty("aeron.event.cluster.log", "all");
37 | System.setProperty("aeron.event.archive.log", "all");
38 | System.setProperty("aeron.event.log", "admin");
39 | EventLogAgent.agentmain("", ByteBuddyAgent.install());
40 |
41 | String clusterMemberId = Integer.toHexString(Configuration.clusterMemberId());
42 | String nodeId = "node-" + clusterMemberId + "-" + Utils.instanceId();
43 | String nodeDirName = Paths.get("target", "aeron", "cluster", nodeId).toString();
44 |
45 | System.out.println("Cluster node directory: " + nodeDirName);
46 |
47 | MediaDriver.Context mediaDriverContext =
48 | new MediaDriver.Context()
49 | .errorHandler(ex -> logger.error("Exception occurred at MediaDriver: ", ex))
50 | .terminationHook(() -> logger.info("TerminationHook called on MediaDriver "))
51 | .terminationValidator(new DefaultAllowTerminationValidator())
52 | .warnIfDirectoryExists(true)
53 | .dirDeleteOnStart(true)
54 | .dirDeleteOnShutdown(true)
55 | .multicastFlowControlSupplier(new MinMulticastFlowControlSupplier());
56 |
57 | AeronArchive.Context aeronArchiveContext =
58 | new AeronArchive.Context().aeronDirectoryName(mediaDriverContext.aeronDirectoryName());
59 |
60 | Archive.Context archiveContext =
61 | new Archive.Context()
62 | .errorHandler(ex -> logger.error("Exception occurred at Archive: ", ex))
63 | .maxCatalogEntries(Configurations.MAX_CATALOG_ENTRIES)
64 | .aeronDirectoryName(mediaDriverContext.aeronDirectoryName())
65 | .archiveDir(new File(nodeDirName, "archive"))
66 | .controlChannel(aeronArchiveContext.controlRequestChannel())
67 | .controlStreamId(aeronArchiveContext.controlRequestStreamId())
68 | .localControlStreamId(aeronArchiveContext.controlRequestStreamId())
69 | .recordingEventsChannel(aeronArchiveContext.recordingEventsChannel());
70 |
71 | ConsensusModule.Context consensusModuleContext =
72 | new ConsensusModule.Context()
73 | .errorHandler(ex -> logger.error("Exception occurred at ConsensusModule: ", ex))
74 | .terminationHook(() -> logger.info("TerminationHook called on ConsensusModule"))
75 | .aeronDirectoryName(mediaDriverContext.aeronDirectoryName())
76 | .clusterDir(new File(nodeDirName, "consensus"))
77 | .archiveContext(aeronArchiveContext.clone());
78 |
79 | ClusteredMediaDriver clusteredMediaDriver =
80 | ClusteredMediaDriver.launch(mediaDriverContext, archiveContext, consensusModuleContext);
81 |
82 | ClusteredService clusteredService =
83 | new ClusteredServiceImpl(clusteredMediaDriver.mediaDriver().context().countersManager());
84 |
85 | ClusteredServiceContainer.Context clusteredServiceCtx =
86 | new ClusteredServiceContainer.Context()
87 | .errorHandler(ex -> logger.error("Exception occurred: " + ex, ex))
88 | .aeronDirectoryName(clusteredMediaDriver.mediaDriver().aeronDirectoryName())
89 | .archiveContext(aeronArchiveContext.clone())
90 | .clusterDir(new File(nodeDirName, "service"))
91 | .clusteredService(clusteredService);
92 |
93 | ClusteredServiceContainer clusteredServiceContainer =
94 | ClusteredServiceContainer.launch(clusteredServiceCtx);
95 |
96 | Mono onShutdown =
97 | Utils.onShutdown(
98 | () -> {
99 | CloseHelper.close(clusteredServiceContainer);
100 | CloseHelper.close(clusteredMediaDriver);
101 | return null;
102 | });
103 | onShutdown.block();
104 | }
105 | }
106 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/Configurations.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc;
2 |
3 | public class Configurations {
4 |
5 | public static final long MAX_CATALOG_ENTRIES = 1024;
6 |
7 | public static final String INSTANCE_ID =
8 | System.getProperty("io.scalecube.acpoc.instanceId", null);
9 |
10 | public static final boolean CLEAN_START = Boolean.getBoolean("io.scalecube.acpoc.cleanStart");
11 | public static final boolean CLEAN_SHUTDOWN =
12 | Boolean.getBoolean("io.scalecube.acpoc.cleanShutdown");
13 |
14 | private Configurations() {
15 | // no-op
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/EgressListenerImpl.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc;
2 |
3 | import io.aeron.cluster.client.EgressListener;
4 | import io.aeron.cluster.codecs.EventCode;
5 | import io.aeron.logbuffer.Header;
6 | import org.agrona.DirectBuffer;
7 | import org.slf4j.Logger;
8 | import org.slf4j.LoggerFactory;
9 |
10 | class EgressListenerImpl implements EgressListener {
11 |
12 | private static final Logger logger = LoggerFactory.getLogger(EgressListenerImpl.class);
13 |
14 | @Override
15 | public void onMessage(
16 | long clusterSessionId,
17 | long timestamp,
18 | DirectBuffer buffer,
19 | int offset,
20 | int length,
21 | Header header) {
22 | logger.info(
23 | "[onMessage]: timestamp: {}; from clusterSession: {}, position: {}, content: '{}'",
24 | timestamp,
25 | clusterSessionId,
26 | header.position(),
27 | buffer.getStringWithoutLengthAscii(offset, length));
28 | }
29 |
30 | @Override
31 | public void sessionEvent(
32 | long correlationId,
33 | long clusterSessionId,
34 | long leadershipTermId,
35 | int leaderMemberId,
36 | EventCode code,
37 | String detail) {
38 | logger.info(
39 | "[onSessionEvent]: correlationId: {}, clusterSessionId: {}, "
40 | + "leadershipTermId: {}, leaderMemberId: {}, eventCode: {}, detail: {}",
41 | correlationId,
42 | clusterSessionId,
43 | leadershipTermId,
44 | leaderMemberId,
45 | code,
46 | detail);
47 | }
48 |
49 | @Override
50 | public void newLeader(
51 | long clusterSessionId, long leadershipTermId, int leaderMemberId, String memberEndpoints) {
52 | logger.info(
53 | "[newLeader]: clusterSessionId: {}, "
54 | + "leadershipTermId: {}, leaderMemberId: {}, memberEndpoints: {}",
55 | clusterSessionId,
56 | leadershipTermId,
57 | leaderMemberId,
58 | memberEndpoints);
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/InteractiveClient.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc;
2 |
3 | import io.aeron.agent.EventLogAgent;
4 | import io.aeron.cluster.client.AeronCluster;
5 | import io.aeron.driver.DefaultAllowTerminationValidator;
6 | import io.aeron.driver.MediaDriver;
7 | import java.nio.charset.StandardCharsets;
8 | import java.time.Duration;
9 | import java.util.Scanner;
10 | import java.util.concurrent.Callable;
11 | import java.util.concurrent.Executors;
12 | import java.util.function.BiConsumer;
13 | import net.bytebuddy.agent.ByteBuddyAgent;
14 | import org.agrona.CloseHelper;
15 | import org.agrona.concurrent.UnsafeBuffer;
16 | import org.slf4j.Logger;
17 | import org.slf4j.LoggerFactory;
18 | import reactor.core.Disposable;
19 | import reactor.core.publisher.Flux;
20 | import reactor.core.publisher.Mono;
21 |
22 | public class InteractiveClient {
23 |
24 | private static final Logger logger = LoggerFactory.getLogger(InteractiveClient.class);
25 |
26 | private static MediaDriver clientMediaDriver;
27 | private static AeronCluster client;
28 | private static Disposable receiver;
29 |
30 | private static final BiConsumer stringSender =
31 | (str, client) -> {
32 | byte[] bytes = str.getBytes(StandardCharsets.UTF_8);
33 | UnsafeBuffer buffer = new UnsafeBuffer(bytes);
34 | long l = client.offer(buffer, 0, bytes.length);
35 | logger.info("Client: REQUEST '{}' sent, result={}", str, l);
36 | };
37 |
38 | /**
39 | * Main method.
40 | *
41 | * @param args program arguments.
42 | */
43 | public static void main(String[] args) {
44 | System.setProperty("aeron.event.cluster.log", "all");
45 | System.setProperty("aeron.event.archive.log", "all");
46 | System.setProperty("aeron.event.log", "admin");
47 | EventLogAgent.agentmain("", ByteBuddyAgent.install());
48 |
49 | startClient();
50 |
51 | Executors.newSingleThreadExecutor().submit(inputPollJob());
52 |
53 | receiver =
54 | Flux.interval(Duration.ofMillis(100)) //
55 | .subscribe(i -> client.pollEgress());
56 | Mono onShutdown = Utils.onShutdown(shutdownHook());
57 | onShutdown.block();
58 | }
59 |
60 | private static Runnable inputPollJob() {
61 | return () -> {
62 | while (true) {
63 | Scanner scanner = new Scanner(System.in);
64 | System.out.println(
65 | "Type request body and press enter to send to Aeron cluster. Q to quit... ");
66 | String payload = scanner.nextLine();
67 | if ("Q".equals(payload)) {
68 | client.close();
69 | break;
70 | }
71 | stringSender.accept(payload, client);
72 | }
73 | System.exit(0);
74 | };
75 | }
76 |
77 | private static Callable shutdownHook() {
78 | return () -> {
79 | System.out.println("Shutting down");
80 | CloseHelper.close(client);
81 | CloseHelper.close(clientMediaDriver);
82 | receiver.dispose();
83 | return null;
84 | };
85 | }
86 |
87 | private static void startClient() {
88 | System.out.println("Client starting.");
89 | clientMediaDriver =
90 | MediaDriver.launch(
91 | new MediaDriver.Context()
92 | .errorHandler(ex -> logger.error("Exception occurred at MediaDriver: ", ex))
93 | .terminationHook(() -> logger.info("TerminationHook called on MediaDriver "))
94 | .terminationValidator(new DefaultAllowTerminationValidator())
95 | .warnIfDirectoryExists(true)
96 | .dirDeleteOnStart(true)
97 | .dirDeleteOnShutdown(true));
98 | client =
99 | AeronCluster.connect(
100 | new AeronCluster.Context()
101 | .errorHandler(ex -> logger.error("Exception occurred at AeronCluster: ", ex))
102 | .egressListener(new EgressListenerImpl())
103 | .aeronDirectoryName(clientMediaDriver.aeronDirectoryName())
104 | .egressChannel("aeron:udp?endpoint=localhost:10020")
105 | .ingressChannel("aeron:udp?endpoint=localhost:10010"));
106 | System.out.println("Client started.");
107 | logger.debug("client: {}", client.context().clusterMemberEndpoints());
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/Utils.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc;
2 |
3 | import java.util.Optional;
4 | import java.util.concurrent.Callable;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 | import reactor.core.publisher.Mono;
8 | import reactor.core.publisher.MonoProcessor;
9 | import sun.misc.Signal;
10 | import sun.misc.SignalHandler;
11 |
12 | public class Utils {
13 |
14 | public static final Logger logger = LoggerFactory.getLogger(Utils.class);
15 |
16 | private Utils() {
17 | // no-op
18 | }
19 |
20 | /** In order to let interrupt the process, thi method is regularly called in 'waiting' loops. */
21 | public static void checkInterruptedStatus() {
22 | if (Thread.currentThread().isInterrupted()) {
23 | fail("unexpected interrupt - test likely to have timed out");
24 | }
25 | }
26 |
27 | /**
28 | * Fail for a reason.
29 | *
30 | * @param reason to fail
31 | */
32 | public static void fail(String reason) {
33 | throw new IllegalStateException(reason);
34 | }
35 |
36 | /**
37 | * Listens to jvm signas SIGTERM and SIGINT and applies shutdown lambda function.
38 | *
39 | * @param callable shutdown lambda
40 | * @return mono result
41 | */
42 | public static Mono onShutdown(Callable callable) {
43 | MonoProcessor onShutdown = MonoProcessor.create();
44 |
45 | SignalHandler handler =
46 | signal -> {
47 | try {
48 | callable.call();
49 | } catch (Exception e) {
50 | logger.warn("Exception occurred at onShutdown callback: " + e, e);
51 | } finally {
52 | onShutdown.onComplete();
53 | }
54 | };
55 | Signal.handle(new Signal("INT"), handler);
56 | Signal.handle(new Signal("TERM"), handler);
57 |
58 | return onShutdown;
59 | }
60 |
61 | /**
62 | * Returns instance id.
63 | *
64 | * @return instance id
65 | */
66 | public static String instanceId() {
67 | return Optional.ofNullable(Configurations.INSTANCE_ID)
68 | .orElseGet(() -> "" + System.currentTimeMillis());
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/benchmarks/BenchmarkClusteredService.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import io.aeron.ExclusivePublication;
4 | import io.aeron.Image;
5 | import io.aeron.Publication;
6 | import io.aeron.cluster.codecs.CloseReason;
7 | import io.aeron.cluster.service.ClientSession;
8 | import io.aeron.cluster.service.Cluster;
9 | import io.aeron.cluster.service.Cluster.Role;
10 | import io.aeron.cluster.service.ClusteredService;
11 | import io.aeron.logbuffer.Header;
12 | import org.agrona.DirectBuffer;
13 | import org.slf4j.Logger;
14 | import org.slf4j.LoggerFactory;
15 |
16 | public class BenchmarkClusteredService implements ClusteredService {
17 |
18 | private static final Logger logger = LoggerFactory.getLogger(BenchmarkClusteredService.class);
19 |
20 | private Cluster cluster;
21 |
22 | @Override
23 | public void onStart(Cluster cluster, Image snapshotImage) {
24 | this.cluster = cluster;
25 | logger.info(
26 | "onStart => memberId: {}, role: {}, client-sessions: {}",
27 | cluster.memberId(),
28 | cluster.role(),
29 | cluster.clientSessions().size());
30 | if (snapshotImage != null) {
31 | onLoadSnapshot(snapshotImage);
32 | }
33 | }
34 |
35 | @Override
36 | public void onSessionOpen(ClientSession session, long timestampMs) {
37 | logger.info(
38 | "onSessionOpen, timestampMs: {} => memberId: {}, sessionId: {}, "
39 | + "responseChannel: {}, responseStreamId: {}",
40 | timestampMs,
41 | cluster.memberId(),
42 | session.id(),
43 | session.responseChannel(),
44 | session.responseStreamId());
45 | }
46 |
47 | @Override
48 | public void onSessionClose(ClientSession session, long timestampMs, CloseReason closeReason) {
49 | logger.info(
50 | "onSessionClose, timestampMs: {} => memberId: {}, "
51 | + "sessionId: {}, responseChannel: {}, responseStreamId: {}, reason: {}",
52 | timestampMs,
53 | cluster.memberId(),
54 | session.id(),
55 | session.responseChannel(),
56 | session.responseStreamId(),
57 | closeReason);
58 | }
59 |
60 | @Override
61 | public void onSessionMessage(
62 | ClientSession session,
63 | long timestampMs,
64 | DirectBuffer buffer,
65 | int offset,
66 | int length,
67 | Header header) {
68 | if (cluster.role() == Role.LEADER) {
69 | // Send response back
70 | while (true) {
71 | long result = session.offer(buffer, offset, length);
72 | if (result > 0) {
73 | break;
74 | }
75 | checkResultAndIdle(result);
76 | }
77 | }
78 | }
79 |
80 | @Override
81 | public void onTimerEvent(long correlationId, long timestampMs) {
82 | logger.info(
83 | "onTimerEvent, timestampMs: {} => memberId: {}, correlationId: {}",
84 | timestampMs,
85 | cluster.memberId(),
86 | correlationId);
87 | }
88 |
89 | @Override
90 | public void onTakeSnapshot(ExclusivePublication snapshotPublication) {
91 | logger.info(
92 | "onTakeSnapshot => publication: memberId: {}, sessionId: {}, channel: {}, "
93 | + "streamId: {}, position: {}",
94 | cluster.memberId(),
95 | snapshotPublication.sessionId(),
96 | snapshotPublication.channel(),
97 | snapshotPublication.streamId(),
98 | snapshotPublication.position());
99 | }
100 |
101 | private void onLoadSnapshot(Image snapshotImage) {
102 | logger.info(
103 | "onLoadSnapshot => image: memberId: {}, sessionId: {}, channel: {}, "
104 | + "streamId: {}, position: {}",
105 | cluster.memberId(),
106 | snapshotImage.sessionId(),
107 | snapshotImage.subscription().channel(),
108 | snapshotImage.subscription().streamId(),
109 | snapshotImage.position());
110 | }
111 |
112 | @Override
113 | public void onRoleChange(Role newRole) {
114 | logger.info("onRoleChange => memberId: {}, new role: {}", cluster.memberId(), newRole);
115 | }
116 |
117 | @Override
118 | public void onTerminate(Cluster cluster) {
119 | logger.info(
120 | "onTerminate => memberId: {}, role: {}, client-sessions: {}",
121 | cluster.memberId(),
122 | cluster.role(),
123 | cluster.clientSessions().size());
124 | }
125 |
126 | private void checkResultAndIdle(long result) {
127 | if (result == Publication.NOT_CONNECTED
128 | || result == Publication.CLOSED
129 | || result == Publication.MAX_POSITION_EXCEEDED) {
130 | throw new IllegalStateException("unexpected publication state: " + result);
131 | }
132 | if (Thread.currentThread().isInterrupted()) {
133 | throw new IllegalStateException("Unexpected interrupt");
134 | }
135 | cluster.idle();
136 | }
137 | }
138 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/benchmarks/BenchmarkConfigurations.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | public interface BenchmarkConfigurations {
4 |
5 | int MESSAGE_LENGTH = Integer.getInteger("io.scalecube.acpoc.messageLength", 32);
6 | long REPORT_INTERVAL = Long.getLong("io.scalecube.acpoc.report.interval", 1);
7 | long WARMUP_REPORT_DELAY = Long.getLong("io.scalecube.acpoc.report.delay", REPORT_INTERVAL);
8 | long TRACE_REPORTER_INTERVAL = Long.getLong("io.scalecube.acpoc.trace.report.interval", 60);
9 | String TARGET_FOLDER_FOLDER_LATENCY =
10 | System.getProperty(
11 | "io.scalecube.acpoc.report.traces.folder.latency", "./target/traces/reports/latency/");
12 | String REPORT_NAME =
13 | System.getProperty("io.scalecube.acpoc.report.name", String.valueOf(System.nanoTime()));
14 | long NUMBER_OF_MESSAGES = Long.getLong("io.scalecube.acpoc.messages", 100_000_000);
15 | int REQUESTED = Integer.getInteger("io.scalecube.acpoc.request", 16);
16 | }
17 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/benchmarks/ClusterClientPing.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import io.aeron.cluster.client.AeronCluster;
4 | import io.aeron.driver.MediaDriver;
5 | import io.aeron.driver.MediaDriver.Context;
6 | import io.aeron.logbuffer.Header;
7 | import java.util.concurrent.TimeUnit;
8 | import org.HdrHistogram.Recorder;
9 | import org.agrona.BitUtil;
10 | import org.agrona.BufferUtil;
11 | import org.agrona.DirectBuffer;
12 | import org.agrona.concurrent.IdleStrategy;
13 | import org.agrona.concurrent.UnsafeBuffer;
14 | import org.agrona.concurrent.YieldingIdleStrategy;
15 | import org.agrona.console.ContinueBarrier;
16 | import reactor.core.Disposable;
17 |
18 | /** Runner to start the cluster client that continuously sends requests to cluster. */
19 | public class ClusterClientPing {
20 |
21 | private static final int MESSAGE_LENGTH = BenchmarkConfigurations.MESSAGE_LENGTH;
22 | private static final long NUMBER_OF_MESSAGES = BenchmarkConfigurations.NUMBER_OF_MESSAGES;
23 |
24 | private static final int REQUESTED = BenchmarkConfigurations.REQUESTED;
25 |
26 | private static final UnsafeBuffer OFFER_BUFFER =
27 | new UnsafeBuffer(BufferUtil.allocateDirectAligned(MESSAGE_LENGTH, BitUtil.CACHE_LINE_LENGTH));
28 |
29 | private static final Recorder HISTOGRAM = new Recorder(TimeUnit.SECONDS.toNanos(10), 3);
30 | private static final LatencyReporter latencyReporter = new LatencyReporter(HISTOGRAM);
31 | private static final IdleStrategy IDLE_STRATEGY = new YieldingIdleStrategy();
32 |
33 | /**
34 | * Main method.
35 | *
36 | * @param args program arguments.
37 | */
38 | public static void main(String[] args) throws InterruptedException {
39 | try (MediaDriver clientMediaDriver =
40 | MediaDriver.launch(
41 | new Context()
42 | .warnIfDirectoryExists(true)
43 | .dirDeleteOnStart(true)
44 | .dirDeleteOnShutdown(true)
45 | .printConfigurationOnStart(true)
46 | .errorHandler(Throwable::printStackTrace));
47 | AeronCluster client =
48 | AeronCluster.connect(
49 | new AeronCluster.Context()
50 | .egressListener(
51 | (clusterSessionId, timestampMs, buffer, offset, length, header) ->
52 | pongHandler(buffer, offset, length, header))
53 | .aeronDirectoryName(clientMediaDriver.aeronDirectoryName())
54 | .ingressChannel("aeron:udp")
55 | .errorHandler(Throwable::printStackTrace))) {
56 |
57 | Thread.sleep(100);
58 | ContinueBarrier barrier = new ContinueBarrier("Execute again?");
59 |
60 | do {
61 | System.out.println("Pinging " + NUMBER_OF_MESSAGES + " messages");
62 | Disposable reporterDisposable = latencyReporter.start();
63 | roundTripMessages(client);
64 | Thread.sleep(100);
65 | reporterDisposable.dispose();
66 | System.out.println("Histogram of RTT latencies in microseconds.");
67 | } while (barrier.await());
68 | }
69 | }
70 |
71 | private static void roundTripMessages(AeronCluster client) {
72 | HISTOGRAM.reset();
73 |
74 | int produced = 0;
75 | int received = 0;
76 |
77 | for (long i = 0; i < NUMBER_OF_MESSAGES; ) {
78 | int workCount = 0;
79 |
80 | if (produced < REQUESTED) {
81 | OFFER_BUFFER.putLong(0, System.nanoTime());
82 |
83 | final long offeredPosition = client.offer(OFFER_BUFFER, 0, MESSAGE_LENGTH);
84 |
85 | if (offeredPosition > 0) {
86 | i++;
87 | workCount = 1;
88 | produced++;
89 | }
90 | }
91 |
92 | final int poll = client.pollEgress();
93 |
94 | workCount += poll;
95 | received += poll;
96 | produced -= poll;
97 |
98 | IDLE_STRATEGY.idle(workCount);
99 | }
100 |
101 | while (received < NUMBER_OF_MESSAGES) {
102 | final int poll = client.pollEgress();
103 | received += poll;
104 | IDLE_STRATEGY.idle(poll);
105 | }
106 | }
107 |
108 | private static void pongHandler(
109 | final DirectBuffer buffer, final int offset, final int length, final Header header) {
110 | final long pingTimestamp = buffer.getLong(offset);
111 | final long rttNs = System.nanoTime() - pingTimestamp;
112 |
113 | HISTOGRAM.recordValue(rttNs);
114 | }
115 | }
116 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/benchmarks/ClusteredServiceRunner.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import io.aeron.archive.Archive;
4 | import io.aeron.archive.client.AeronArchive;
5 | import io.aeron.cluster.ClusteredMediaDriver;
6 | import io.aeron.cluster.ConsensusModule;
7 | import io.aeron.cluster.ConsensusModule.Configuration;
8 | import io.aeron.cluster.service.ClusteredServiceContainer;
9 | import io.aeron.driver.MediaDriver;
10 | import io.aeron.driver.MinMulticastFlowControlSupplier;
11 | import io.scalecube.acpoc.Configurations;
12 | import io.scalecube.acpoc.Utils;
13 | import java.io.File;
14 | import java.nio.file.Paths;
15 | import org.agrona.CloseHelper;
16 | import org.agrona.IoUtil;
17 | import reactor.core.publisher.Mono;
18 |
19 | /**
20 | * Main class that starts single node in cluster, though expecting most of cluster configuration
21 | * passed via VM args.
22 | */
23 | public class ClusteredServiceRunner {
24 |
25 | /**
26 | * Main function runner.
27 | *
28 | * @param args arguments
29 | */
30 | public static void main(String[] args) {
31 | String clusterMemberId = Integer.toHexString(Configuration.clusterMemberId());
32 | String nodeId = "node-" + clusterMemberId + "-" + Utils.instanceId();
33 | String nodeDirName = Paths.get(IoUtil.tmpDirName(), "aeron", "cluster", nodeId).toString();
34 |
35 | if (Configurations.CLEAN_START) {
36 | IoUtil.delete(new File(nodeDirName), true);
37 | }
38 |
39 | System.out.println("Cluster node directory: " + nodeDirName);
40 |
41 | MediaDriver.Context mediaDriverContest =
42 | new MediaDriver.Context()
43 | .warnIfDirectoryExists(true)
44 | .dirDeleteOnStart(true)
45 | .dirDeleteOnShutdown(true)
46 | .printConfigurationOnStart(true)
47 | .errorHandler(Throwable::printStackTrace)
48 | .multicastFlowControlSupplier(new MinMulticastFlowControlSupplier());
49 |
50 | AeronArchive.Context aeronArchiveContext =
51 | new AeronArchive.Context().aeronDirectoryName(mediaDriverContest.aeronDirectoryName());
52 |
53 | Archive.Context archiveContext =
54 | new Archive.Context()
55 | .maxCatalogEntries(Configurations.MAX_CATALOG_ENTRIES)
56 | .deleteArchiveOnStart(true)
57 | .aeronDirectoryName(mediaDriverContest.aeronDirectoryName())
58 | .archiveDir(new File(nodeDirName, "archive"))
59 | .controlChannel(aeronArchiveContext.controlRequestChannel())
60 | .controlStreamId(aeronArchiveContext.controlRequestStreamId())
61 | .localControlStreamId(aeronArchiveContext.controlRequestStreamId())
62 | .recordingEventsChannel(aeronArchiveContext.recordingEventsChannel());
63 |
64 | ConsensusModule.Context consensusModuleCtx =
65 | new ConsensusModule.Context()
66 | .errorHandler(Throwable::printStackTrace)
67 | .aeronDirectoryName(mediaDriverContest.aeronDirectoryName())
68 | .clusterDir(new File(nodeDirName, "consensus-module"))
69 | .archiveContext(aeronArchiveContext.clone());
70 |
71 | ClusteredMediaDriver clusteredMediaDriver =
72 | ClusteredMediaDriver.launch(mediaDriverContest, archiveContext, consensusModuleCtx);
73 |
74 | ClusteredServiceContainer.Context clusteredServiceCtx =
75 | new ClusteredServiceContainer.Context()
76 | .errorHandler(Throwable::printStackTrace)
77 | .aeronDirectoryName(clusteredMediaDriver.mediaDriver().aeronDirectoryName())
78 | .archiveContext(aeronArchiveContext.clone())
79 | .clusterDir(new File(nodeDirName, "service"))
80 | .clusteredService(new BenchmarkClusteredService());
81 |
82 | ClusteredServiceContainer clusteredServiceContainer =
83 | ClusteredServiceContainer.launch(clusteredServiceCtx);
84 |
85 | Mono onShutdown =
86 | Utils.onShutdown(
87 | () -> {
88 | CloseHelper.quietClose(clusteredServiceContainer);
89 | CloseHelper.quietClose(clusteredMediaDriver);
90 | if (Configurations.CLEAN_SHUTDOWN) {
91 | IoUtil.delete(new File(nodeDirName), true);
92 | }
93 | return null;
94 | });
95 | onShutdown.block();
96 | }
97 | }
98 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/java/io/scalecube/acpoc/benchmarks/LatencyReporter.java:
--------------------------------------------------------------------------------
1 | package io.scalecube.acpoc.benchmarks;
2 |
3 | import io.scalecube.trace.TraceReporter;
4 | import io.scalecube.trace.jsonbin.JsonbinResponse;
5 | import java.time.Duration;
6 | import org.HdrHistogram.Recorder;
7 | import reactor.core.Disposable;
8 | import reactor.core.Disposables;
9 | import reactor.core.publisher.Flux;
10 | import reactor.core.scheduler.Schedulers;
11 |
12 | public class LatencyReporter {
13 |
14 | private static final TraceReporter reporter = new TraceReporter();
15 |
16 | private final Recorder histogram;
17 | private final String name;
18 |
19 | public LatencyReporter(Recorder histogram) {
20 | this(histogram, BenchmarkConfigurations.REPORT_NAME);
21 | }
22 |
23 | public LatencyReporter(Recorder histogram, String name) {
24 | this.histogram = histogram;
25 | this.name = name;
26 | }
27 |
28 | /**
29 | * Starts the reporter.
30 | *
31 | * @return disposable result.
32 | */
33 | public Disposable start() {
34 | if (reporter.isActive()) {
35 | return Disposables.composite(
36 | Flux.interval(
37 | Duration.ofSeconds(BenchmarkConfigurations.WARMUP_REPORT_DELAY),
38 | Duration.ofSeconds(BenchmarkConfigurations.TRACE_REPORTER_INTERVAL))
39 | .publishOn(Schedulers.single())
40 | .flatMap(
41 | i ->
42 | reporter
43 | .sendToJsonbin()
44 | .filter(JsonbinResponse::success)
45 | .flatMap(
46 | res ->
47 | reporter.dumpToFile(
48 | BenchmarkConfigurations.TARGET_FOLDER_FOLDER_LATENCY,
49 | res.name(),
50 | res)))
51 | .subscribe(),
52 | Flux.interval(
53 | Duration.ofSeconds(BenchmarkConfigurations.WARMUP_REPORT_DELAY),
54 | Duration.ofSeconds(BenchmarkConfigurations.REPORT_INTERVAL))
55 | .publishOn(Schedulers.single())
56 | .doOnNext(
57 | i ->
58 | reporter.addY(this.name, histogram.getIntervalHistogram().getMean() / 1000.0))
59 | .subscribe());
60 | }
61 | return Flux.interval(
62 | Duration.ofSeconds(BenchmarkConfigurations.WARMUP_REPORT_DELAY),
63 | Duration.ofSeconds(BenchmarkConfigurations.REPORT_INTERVAL))
64 | .publishOn(Schedulers.single())
65 | .doOnNext(
66 | i -> {
67 | System.out.println("---- PING/PONG HISTO ----");
68 | histogram
69 | .getIntervalHistogram()
70 | .outputPercentileDistribution(System.out, 5, 1000.0, false);
71 | System.out.println("---- PING/PONG HISTO ----");
72 | })
73 | .subscribe();
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/aeron-cluster-poc-examples/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | %level{length=1} %d{ISO8601} %c{1.} %m [%t]%n
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 | services:
3 | node0:
4 | build: .
5 | ports:
6 | - "20110:20110"
7 | - "20120:20120"
8 | - "20130:20130"
9 | - "20140:20140"
10 | - "20150:20150"
11 | - "8010:8010"
12 | volumes:
13 | - aeron-fs0:/app/target/aeron/cluster
14 | environment:
15 | - NUMBER=0
16 | networks:
17 | app_net:
18 | ipv4_address: 10.5.0.31
19 | shm_size: '1gb'
20 |
21 | node1:
22 | build: .
23 | ports:
24 | - "20111:20111"
25 | - "20121:20121"
26 | - "20131:20131"
27 | - "20141:20141"
28 | - "20151:20151"
29 | - "8011:8011"
30 | volumes:
31 | - aeron-fs1:/app/target/aeron/cluster
32 | environment:
33 | - NUMBER=1
34 | networks:
35 | app_net:
36 | ipv4_address: 10.5.0.41
37 | shm_size: '1gb'
38 |
39 | node2:
40 | build: .
41 | ports:
42 | - "20112:20112"
43 | - "20122:20122"
44 | - "20132:20132"
45 | - "20142:20142"
46 | - "20152:20152"
47 | - "8012:8012"
48 | volumes:
49 | - aeron-fs2:/app/target/aeron/cluster
50 | environment:
51 | - NUMBER=2
52 | networks:
53 | app_net:
54 | ipv4_address: 10.5.0.51
55 | shm_size: '1gb'
56 |
57 |
58 | networks:
59 | app_net:
60 | ipam:
61 | driver: default
62 | config:
63 | - subnet: 10.5.0.0/24
64 | # gateway: 10.5.0.1
65 |
66 |
67 | volumes:
68 | aeron-fs0:
69 | aeron-fs1:
70 | aeron-fs2:
71 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
4 | 4.0.0
5 |
6 |
7 | io.scalecube
8 | scalecube-parent-pom
9 | 0.1.0-RC1
10 |
11 |
12 | aeron-cluster-poc-parent
13 |
14 | This project main goal is to get to know aeron-cluster and raft algorithm
15 |
16 |
17 | 0.0.2-SNAPSHOT
18 | aeron-cluster-poc
19 | pom
20 |
21 |
22 | https://github.com/scalecube/aeron-cluster-poc
23 | scm:git:git@github.com:scalecube/aeron-cluster-poc.git
24 | scm:git:git@github.com:scalecube/aeron-cluster-poc.git
25 |
26 | HEAD
27 |
28 |
29 |
30 | 1.25.1
31 | 1.16.1
32 | 1.3.0
33 | 2.10.1
34 | Californium-SR5
35 | 1.7.7
36 | 2.11.0
37 | 3.4.2
38 | 2.1.10
39 | 1.21
40 | 5.0
41 | 1.0.1
42 | 2.8.9
43 | 0.0.7
44 |
45 |
46 |
47 | aeron-cluster-poc-client
48 | aeron-cluster-poc-examples
49 | aeron-cluster-poc-benchmarks
50 |
51 |
52 |
53 |
54 |
55 | io.aeron
56 | aeron-driver
57 | ${aeron.version}
58 |
59 |
60 | io.aeron
61 | aeron-client
62 | ${aeron.version}
63 |
64 |
65 | io.aeron
66 | aeron-samples
67 | ${aeron.version}
68 |
69 |
70 | io.aeron
71 | aeron-cluster
72 | ${aeron.version}
73 |
74 |
75 | io.aeron
76 | aeron-agent
77 | ${aeron.version}
78 |
79 |
80 |
81 | net.bytebuddy
82 | byte-buddy
83 | 1.9.10
84 |
85 |
86 |
87 | net.bytebuddy
88 | byte-buddy-agent
89 | 1.9.10
90 |
91 |
92 |
93 |
94 | org.openjdk.jmh
95 | jmh-core
96 | ${jmh.version}
97 |
98 |
99 | org.openjdk.jmh
100 | jmh-generator-annprocess
101 | ${jmh.version}
102 |
103 |
104 |
105 |
106 | io.scalecube
107 | scalecube-commons
108 | ${scalecube-commons.version}
109 |
110 |
111 | io.scalecube
112 | scalecube-services-discovery
113 | ${scalecube-services.version}
114 |
115 |
116 |
117 | com.opencsv
118 | opencsv
119 | ${opencsv.version}
120 |
121 |
122 |
123 | io.projectreactor
124 | reactor-bom
125 | ${reactor.version}
126 | pom
127 | import
128 |
129 |
130 |
131 |
132 | com.fasterxml.jackson.core
133 | jackson-core
134 | ${jackson.version}
135 |
136 |
137 | com.fasterxml.jackson.core
138 | jackson-annotations
139 | ${jackson.version}
140 |
141 |
142 | com.fasterxml.jackson.core
143 | jackson-databind
144 | ${jackson.version}
145 |
146 |
147 | com.fasterxml.jackson.datatype
148 | jackson-datatype-jsr310
149 | ${jackson.version}
150 |
151 |
152 | com.fasterxml.jackson.datatype
153 | jackson-datatype-jdk8
154 | ${jackson.version}
155 |
156 |
157 |
158 |
159 | org.slf4j
160 | slf4j-api
161 | ${slf4j.version}
162 |
163 |
164 | org.apache.logging.log4j
165 | log4j-slf4j-impl
166 | ${log4j.version}
167 |
168 |
169 | org.apache.logging.log4j
170 | log4j-core
171 | ${log4j.version}
172 |
173 |
174 |
175 | com.lmax
176 | disruptor
177 | ${disruptor.version}
178 |
179 |
180 |
181 | org.hdrhistogram
182 | HdrHistogram
183 | ${hdrHistogram.version}
184 |
185 |
186 | io.scalecube
187 | trace-reporter
188 | ${scalecube.trace-reporter.version}
189 |
190 |
191 |
192 |
193 |
194 |
--------------------------------------------------------------------------------
/profiler/aeron-stat.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | java \
4 | -cp profiler/samples.jar \
5 | -Daeron.dir=$1 \
6 | io.aeron.samples.AeronStat
7 |
--------------------------------------------------------------------------------
/profiler/async-profiler.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/scalecube/aeron-cluster-poc/bdd977775d4dda6f981cf6086157c84026fb3a25/profiler/async-profiler.jar
--------------------------------------------------------------------------------
/profiler/jattach:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/scalecube/aeron-cluster-poc/bdd977775d4dda6f981cf6086157c84026fb3a25/profiler/jattach
--------------------------------------------------------------------------------
/profiler/libasyncProfiler.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/scalecube/aeron-cluster-poc/bdd977775d4dda6f981cf6086157c84026fb3a25/profiler/libasyncProfiler.so
--------------------------------------------------------------------------------
/profiler/profiler.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | usage() {
4 | echo "Usage: $0 [action] [options] "
5 | echo "Actions:"
6 | echo " start start profiling and return immediately"
7 | echo " resume resume profiling without resetting collected data"
8 | echo " stop stop profiling"
9 | echo " status print profiling status"
10 | echo " list list profiling events supported by the target JVM"
11 | echo " collect collect profile for the specified period of time"
12 | echo " and then stop (default action)"
13 | echo "Options:"
14 | echo " -e event profiling event: cpu|alloc|lock|cache-misses etc."
15 | echo " -d duration run profiling for seconds"
16 | echo " -f filename dump output to "
17 | echo " -i interval sampling interval in nanoseconds"
18 | echo " -j jstackdepth maximum Java stack depth"
19 | echo " -b bufsize frame buffer size"
20 | echo " -t profile different threads separately"
21 | echo " -s simple class names instead of FQN"
22 | echo " -g print method signatures"
23 | echo " -a annotate Java method names"
24 | echo " -o fmt output format: summary|traces|flat|collapsed|svg|tree|jfr"
25 | echo " -v, --version display version string"
26 | echo ""
27 | echo " --title string SVG title"
28 | echo " --width px SVG width"
29 | echo " --height px SVG frame height"
30 | echo " --minwidth px skip frames smaller than px"
31 | echo " --reverse generate stack-reversed FlameGraph / Call tree"
32 | echo ""
33 | echo " --all-kernel only include kernel-mode events"
34 | echo " --all-user only include user-mode events"
35 | echo ""
36 | echo " is a numeric process ID of the target JVM"
37 | echo " or 'jps' keyword to find running JVM automatically"
38 | echo ""
39 | echo "Example: $0 -d 30 -f profile.svg 3456"
40 | echo " $0 start -i 999000 jps"
41 | echo " $0 stop -o summary,flat jps"
42 | exit 1
43 | }
44 |
45 | mirror_output() {
46 | # Mirror output from temporary file to local terminal
47 | if [[ $USE_TMP ]]; then
48 | if [[ -f $FILE ]]; then
49 | cat "$FILE"
50 | rm "$FILE"
51 | fi
52 | fi
53 | }
54 |
55 | check_if_terminated() {
56 | if ! kill -0 $PID 2> /dev/null; then
57 | mirror_output
58 | exit 0
59 | fi
60 | }
61 |
62 | jattach() {
63 | "$JATTACH" $PID load "$PROFILER" true "$1" > /dev/null
64 | RET=$?
65 |
66 | # Check if jattach failed
67 | if [ $RET -ne 0 ]; then
68 | if [ $RET -eq 255 ]; then
69 | echo "Failed to inject profiler into $PID"
70 | if [ "$UNAME_S" == "Darwin" ]; then
71 | otool -L "$PROFILER"
72 | else
73 | ldd "$PROFILER"
74 | fi
75 | fi
76 | exit $RET
77 | fi
78 |
79 | mirror_output
80 | }
81 |
82 | function abspath() {
83 | if [ "$UNAME_S" == "Darwin" ]; then
84 | perl -MCwd -e 'print Cwd::abs_path shift' "$1"
85 | else
86 | readlink -f "$1"
87 | fi
88 | }
89 |
90 |
91 | OPTIND=1
92 | UNAME_S=$(uname -s)
93 | SCRIPT_DIR=$(dirname "$(abspath "$0")")
94 | JATTACH=$SCRIPT_DIR/jattach
95 | PROFILER=$SCRIPT_DIR/libasyncProfiler.so
96 | ACTION="collect"
97 | EVENT="cpu"
98 | DURATION="60"
99 | FILE=""
100 | USE_TMP="true"
101 | INTERVAL=""
102 | JSTACKDEPTH=""
103 | FRAMEBUF=""
104 | THREADS=""
105 | RING=""
106 | OUTPUT=""
107 | FORMAT=""
108 |
109 | while [[ $# -gt 0 ]]; do
110 | case $1 in
111 | -h|"-?")
112 | usage
113 | ;;
114 | start|resume|stop|status|list|collect)
115 | ACTION="$1"
116 | ;;
117 | -v|--version)
118 | ACTION="version"
119 | ;;
120 | -e)
121 | EVENT="$2"
122 | shift
123 | ;;
124 | -d)
125 | DURATION="$2"
126 | shift
127 | ;;
128 | -f)
129 | FILE="$2"
130 | unset USE_TMP
131 | shift
132 | ;;
133 | -i)
134 | INTERVAL=",interval=$2"
135 | shift
136 | ;;
137 | -j)
138 | JSTACKDEPTH=",jstackdepth=$2"
139 | shift
140 | ;;
141 | -b)
142 | FRAMEBUF=",framebuf=$2"
143 | shift
144 | ;;
145 | -t)
146 | THREADS=",threads"
147 | ;;
148 | -s)
149 | FORMAT="$FORMAT,simple"
150 | ;;
151 | -g)
152 | FORMAT="$FORMAT,sig"
153 | ;;
154 | -a)
155 | FORMAT="$FORMAT,ann"
156 | ;;
157 | -o)
158 | OUTPUT="$2"
159 | shift
160 | ;;
161 | --title)
162 | # escape XML special characters and comma
163 | TITLE=${2//&/&}
164 | TITLE=${TITLE//<}
165 | TITLE=${TITLE//>/>}
166 | TITLE=${TITLE//,/,}
167 | FORMAT="$FORMAT,title=$TITLE"
168 | shift
169 | ;;
170 | --width|--height|--minwidth)
171 | FORMAT="$FORMAT,${1:2}=$2"
172 | shift
173 | ;;
174 | --reverse)
175 | FORMAT="$FORMAT,reverse"
176 | ;;
177 | --all-kernel)
178 | RING=",allkernel"
179 | ;;
180 | --all-user)
181 | RING=",alluser"
182 | ;;
183 | [0-9]*)
184 | PID="$1"
185 | ;;
186 | jps)
187 | # A shortcut for getting PID of a running Java application
188 | # -XX:+PerfDisableSharedMem prevents jps from appearing in its own list
189 | PID=$(pgrep -n java || jps -q -J-XX:+PerfDisableSharedMem)
190 | ;;
191 | *)
192 | echo "Unrecognized option: $1"
193 | usage
194 | ;;
195 | esac
196 | shift
197 | done
198 |
199 | if [[ "$PID" == "" && "$ACTION" != "version" ]]; then
200 | usage
201 | fi
202 |
203 | # If no -f argument is given, use temporary file to transfer output to caller terminal.
204 | # Let the target process create the file in case this script is run by superuser.
205 | if [[ $USE_TMP ]]; then
206 | FILE=/tmp/async-profiler.$$.$PID
207 | elif [[ $FILE != /* ]]; then
208 | # Output file is written by the target process. Make the path absolute to avoid confusion.
209 | FILE=$PWD/$FILE
210 | fi
211 |
212 | case $ACTION in
213 | start|resume)
214 | jattach "$ACTION,event=$EVENT,file=$FILE$INTERVAL$JSTACKDEPTH$FRAMEBUF$THREADS$RING,$OUTPUT$FORMAT"
215 | ;;
216 | stop)
217 | jattach "stop,file=$FILE,$OUTPUT$FORMAT"
218 | ;;
219 | status)
220 | jattach "status,file=$FILE"
221 | ;;
222 | list)
223 | jattach "list,file=$FILE"
224 | ;;
225 | collect)
226 | jattach "start,event=$EVENT,file=$FILE$INTERVAL$JSTACKDEPTH$FRAMEBUF$THREADS$RING,$OUTPUT$FORMAT"
227 | while (( DURATION-- > 0 )); do
228 | check_if_terminated
229 | sleep 1
230 | done
231 | jattach "stop,file=$FILE,$OUTPUT$FORMAT"
232 | ;;
233 | version)
234 | if [[ "$PID" == "" ]]; then
235 | java "-agentpath:$PROFILER=version" -version 2> /dev/null
236 | else
237 | jattach "version,file=$FILE"
238 | fi
239 | ;;
240 | esac
241 |
--------------------------------------------------------------------------------
/profiler/samples.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/scalecube/aeron-cluster-poc/bdd977775d4dda6f981cf6086157c84026fb3a25/profiler/samples.jar
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | requests==2.20.1
2 |
--------------------------------------------------------------------------------
/src/main/scripts/cd/before-deploy.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DIRNAME=$(dirname $0)
4 | BEFORE_DEPLOY_EXEC_FILES=$(find $DIRNAME -name 'before-deploy-*.sh')
5 |
6 | echo Running $0
7 | echo *-*-*-*-*-*-*-*-*-*-*-*-*-*
8 |
9 | decryptsecrets() {
10 | echo decrypting secrets
11 | echo *-*-*-*-*-*-*-*-*-*-*-*
12 | mkdir -p ~/tmp
13 | openssl aes-256-cbc -K $encrypted_SOME_key -iv $encrypted_SOME_iv -in $TRAVIS_BUILD_DIR/src/main/scripts/cd/secrets.tar.enc -out ~/tmp/secrets.tar -d
14 | md5sum ~/tmp/secrets.tar
15 | tar -xvf ~/tmp/secrets.tar -C ~/.ssh
16 | shred -z -u ~/tmp/secrets.tar
17 | }
18 |
19 | importpgp() {
20 | echo importing pgp secret
21 | echo *-*-*-*-*-*-*-*-*-*-*-*
22 | eval $(gpg-agent --daemon --batch)
23 | gpg --batch --passphrase $GPG_PASSPHRASE --import ~/.ssh/codesigning.asc
24 | shred -z -u ~/.ssh/codesigning.asc
25 | }
26 |
27 | setupssh() {
28 | echo importing ssh secret
29 | echo *-*-*-*-*-*-*-*-*-*-*-*
30 | chmod 400 ~/.ssh/id_rsa
31 | touch ~/.ssh/config
32 |
33 | echo "Host github.com" >> $HOME/.ssh/config
34 | echo " IdentityFile $HOME/.ssh/id_rsa" >> $HOME/.ssh/config
35 | echo " StrictHostKeyChecking no" >> $HOME/.ssh/config
36 |
37 | eval "$(ssh-agent -s)"
38 | ssh-add ~/.ssh/id_rsa
39 | ssh -T git@github.com | true
40 | }
41 |
42 | setupgit() {
43 | echo setting git up
44 | echo *-*-*-*-*-*-*-*-*-*-*-*
45 | git remote set-url origin git@github.com:$TRAVIS_REPO_SLUG.git
46 | git config --global user.email "io.scalecube.ci@gmail.com"
47 | git config --global user.name "io-scalecube-ci"
48 | git checkout -B $TRAVIS_BRANCH | true
49 | }
50 |
51 | deployment() {
52 | if [ "$TRAVIS_PULL_REQUEST" = 'false' -a "$TRAVIS_BRANCH" = 'master' -o "$TRAVIS_BRANCH" = 'develop' -o -n "$TRAVIS_TAG" ]; then
53 | echo deployment
54 | echo *-*-*-*-*-*-*-*-*-*-*-*
55 | decryptsecrets
56 | importpgp
57 | setupssh
58 | setupgit
59 | fi
60 | }
61 |
62 | deployment
63 |
64 | # extends before-deploy.sh
65 | for script_file in $BEFORE_DEPLOY_EXEC_FILES; do
66 | . $script_file
67 | done
68 |
--------------------------------------------------------------------------------
/src/main/scripts/cd/deploy.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | DIRNAME=$(dirname $0)
4 | DEPLOY_EXEC_FILES=$(find $DIRNAME -name 'deploy-*.sh')
5 |
6 | echo Running $0
7 | echo *-*-*-*-*-*-*-*-*-*-*-*-*-*
8 |
9 | mvn -P release deploy -Darguments=-DskipTests -B -V -s travis-settings.xml
10 | pip install --user -r requirements.txt
11 | $(dirname $0)/external_build.sh
12 |
13 | # extends deploy.sh
14 | for script_file in $DEPLOY_EXEC_FILES; do
15 | . $script_file
16 | done
17 |
18 |
--------------------------------------------------------------------------------
/src/main/scripts/cd/external_build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import requests
4 | import urlparse
5 | import json
6 | import time
7 | import sys
8 | import os
9 |
10 | class TravisBuilds:
11 | """
12 | This class provides requests to builds and check their statuses
13 | """
14 | travis_api_url = 'https://api.travis-ci.org/'
15 | build_id = None
16 |
17 | def __init__(self, repo_name, auth_token):
18 | self.headers = {'Content-Type': 'application/json',
19 | 'Accept': 'application/json',
20 | 'Travis-API-Version': '3',
21 | 'Authorization': 'token {}'.format(auth_token)
22 | }
23 | self.repo_name = repo_name
24 |
25 | def start_build(self):
26 | data = {"request": {
27 | "branch": "master"
28 | }}
29 | url = urlparse.urljoin(self.travis_api_url,
30 | 'repo/{}/requests'.format(self.repo_name))
31 | response = requests.post(url=url, data=json.dumps(data), headers=self.headers)
32 | if response.status_code == 202:
33 | self.build_id = self.get_build_id(response.json()["request"]["id"])
34 | print self.build_id
35 | return True
36 |
37 | def get_build_id(self, request_id):
38 | time.sleep(10)
39 | url = urlparse.urljoin(self.travis_api_url,
40 | 'repo/{}/request/{}'.format(self.repo_name, request_id))
41 | response = requests.get(url=url, headers=self.headers)
42 | return response.json()["builds"][0]['id']
43 |
44 | def wait_for_build_result(self):
45 | attempts = 0
46 | tests_minutes = int(os.getenv('TESTS_MINUTES'))
47 | while attempts < tests_minutes:
48 | url = urlparse.urljoin(self.travis_api_url, 'build/{}'.format(self.build_id))
49 | response = requests.get(url=url, headers=self.headers)
50 | if response.json()['state'] == "passed":
51 | return True
52 | else:
53 | print "External build is running {} minutes".format(attempts)
54 | time.sleep(60)
55 | attempts += 1
56 | return False
57 |
58 |
59 | if __name__ == '__main__':
60 | external_build = os.getenv('TRIGGER_EXTERNAL_CI', '')
61 | if external_build:
62 | travis = TravisBuilds(external_build, os.getenv('TRAVIS_AUTH_TOKEN'))
63 | build = travis.start_build()
64 | result = travis.wait_for_build_result()
65 | if result:
66 | sys.exit(0)
67 | sys.exit(1)
68 | sys.exit(0)
69 |
--------------------------------------------------------------------------------
/src/main/scripts/cd/release.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | DIRNAME=$(dirname $0)
4 | RELEASE_EXEC_FILES=$(find $DIRNAME -name 'release-*.sh')
5 |
6 | echo Running $0
7 | echo *-*-*-*-*-*-*-*-*-*-*-*-*-*
8 |
9 | . $DIRNAME/before-deploy.sh
10 |
11 | commit_to_develop() {
12 | git fetch
13 | git branch -r
14 | git checkout -B develop
15 | git rebase $TRAVIS_BRANCH
16 | git commit --amend -m "++++ Prepare for next development iteration build: $TRAVIS_BUILD_NUMBER ++++"
17 | git push origin develop
18 | }
19 |
20 | check_next_version() {
21 | export NEXT_VERSION=$(echo $TRAVIS_COMMIT_MESSAGE | grep -E -o '[0-9]+\.[0-9]+\.[0-9]+-SNAPSHOT')
22 | if [ -n "$NEXT_VERSION" ] ; then
23 | export MVN_NEXT_VERSION=-DdevelopmentVersion=$NEXT_VERSION
24 | fi
25 | }
26 |
27 | check_tag_for_rc() {
28 | export VERSION=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout)
29 | if [ -n "$TRAVIS_TAG" ] ; then
30 | RC_VER=$(echo $TRAVIS_TAG | grep -E -o 'RC-?[0-9]+')
31 | RC_PREPARE=$(echo $TRAVIS_TAG | grep -o -i 'prepare')
32 | if [ -n "$RC_VER" -a -n "$RC_PREPARE" ] ; then
33 | export NEW_RC_VERSION=$(echo $VERSION | sed "s/SNAPSHOT/$RC_VER/g")
34 | echo Release candidate: $NEW_RC_VERSION
35 | echo *-*-*-*-*-*-*-*-*-*-*-*
36 | decryptsecrets
37 | importpgp
38 | setupssh
39 | setupgit
40 | export MVN_RELEASE_VERSION=-DreleaseVersion=$NEW_RC_VERSION
41 | if [ -n "$MVN_NEXT_VERSION" ] ; then
42 | export MVN_NEXT_VERSION=-DdevelopmentVersion=$VERSION;
43 | fi
44 | fi
45 | fi
46 | }
47 |
48 | check_next_version
49 | check_tag_for_rc
50 |
51 | mvn -P release -Darguments=-DskipTests release:prepare release:perform $MVN_RELEASE_VERSION $MVN_NEXT_VERSION -DautoVersionSubmodules=true -DscmCommentPrefix="$TRAVIS_COMMIT_MESSAGE [skip ci] " -B -V -s travis-settings.xml || exit 126
52 |
53 | mvn -B -q clean
54 |
55 | if [ -z "$NEW_RC_VERSION" ]; then
56 | commit_to_develop
57 | fi
58 |
59 | # extends release.sh
60 | for script_file in $RELEASE_EXEC_FILES; do
61 | . $script_file
62 | done
63 |
--------------------------------------------------------------------------------
/src/main/scripts/cd/secrets.tar.enc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/scalecube/aeron-cluster-poc/bdd977775d4dda6f981cf6086157c84026fb3a25/src/main/scripts/cd/secrets.tar.enc
--------------------------------------------------------------------------------
/src/main/scripts/ci/after-success.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DIRNAME=$(dirname $0)
4 | AFTER_SUCCESS_EXEC_FILES=$(find $DIRNAME -name 'after-success-*.sh')
5 |
6 | echo Running $0
7 | echo *-*-*-*-*-*-*-*-*-*-*-*-*-*
8 |
9 | if [ -z "$CODACY_PROJECT_TOKEN" ]; then
10 | echo [WARNING] Please go to https://app.codacy.com/app/$TRAVIS_REPO_SLUG/settings/coverage and add CODACY_PROJECT_TOKEN to travis settings
11 | else
12 | find -name jacoco.xml | xargs -i java -jar ~/codacy-coverage-reporter-assembly.jar report -l Java --partial -r {}
13 | java -jar ~/codacy-coverage-reporter-assembly.jar final
14 | fi;
15 |
16 | # extends after-success.sh
17 | for script_file in $AFTER_SUCCESS_EXEC_FILES; do
18 | . $script_file
19 | done
20 |
21 |
--------------------------------------------------------------------------------
/src/main/scripts/ci/before-install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DIRNAME=$(dirname $0)
4 | BEFORE_INSTALL_EXEC_FILES=$(find $DIRNAME -name 'before-install-*.sh')
5 |
6 | echo Running $0
7 | echo *-*-*-*-*-*-*-*-*-*-*-*-*-*
8 |
9 | echo logging to docker image repository:
10 | echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
11 |
12 | # get latest version of codacy reporter from sonatype
13 | latest=$(curl "https://oss.sonatype.org/service/local/repositories/releases/content/com/codacy/codacy-coverage-reporter/maven-metadata.xml" | xpath -e "/metadata/versioning/release/text()")
14 |
15 | echo Downloading latest version $latest of codacy reporter from sonatype
16 | # download latest assembly jar
17 | mvn -B -q dependency:get dependency:copy \
18 | -DoutputDirectory=$HOME \
19 | -DoutputAbsoluteArtifactFilename=true \
20 | -Dmdep.stripVersion=true \
21 | -DrepoUrl=https://oss.sonatype.org/service/local/repositories/releases/content/ \
22 | -Dartifact=com.codacy:codacy-coverage-reporter:$latest:jar:assembly
23 |
24 | echo local file md5sum:
25 | md5sum ~/codacy-coverage-reporter-assembly.jar
26 | echo remote file md5sum:
27 | curl "https://oss.sonatype.org/service/local/repositories/releases/content/com/codacy/codacy-coverage-reporter/$latest/codacy-coverage-reporter-$latest-assembly.jar.md5"
28 |
29 | # extends before-install.sh
30 | for script_file in $BEFORE_INSTALL_EXEC_FILES; do
31 | . $script_file
32 | done
33 |
34 |
--------------------------------------------------------------------------------
/travis-settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
8 | skipstyle
9 |
10 | true
11 |
12 |
13 |
14 | skippmd
15 |
16 | true
17 |
18 |
19 |
20 | bintray
21 |
22 |
23 | central
24 | bintray
25 | https://jcenter.bintray.com
26 |
27 | false
28 |
29 |
30 |
31 |
32 |
33 | ossrh
34 |
35 | ${env.GPG_KEY}
36 |
37 |
38 |
39 |
40 |
41 | ossrh
42 | ${env.SONATYPE_USERNAME}
43 | ${env.SONATYPE_PASSWORD}
44 |
45 |
46 | docker.io
47 | ${env.DOCKER_USERNAME}
48 | ${env.DOCKER_PASSWORD}
49 |
50 |
51 |
52 | bintray
53 | ossrh
54 |
55 |
56 |
--------------------------------------------------------------------------------