├── .github └── workflows │ ├── master.yml │ └── pull_request.yml ├── .gitignore ├── .hog.project ├── .lgtm.yml ├── LICENSE ├── README.md ├── ROADMAP.md ├── assurance ├── .gitignore ├── build.gradle └── src │ ├── main │ └── java │ │ └── com │ │ └── obsidiandynamics │ │ └── meteor │ │ ├── InstancePool.java │ │ └── TestProvider.java │ └── test │ └── java │ └── com │ └── obsidiandynamics │ └── meteor │ ├── AddAllAsyncOrderTest.java │ ├── AddAsyncOrderTest.java │ ├── InstancePoolTest.java │ ├── MapBandwidthSim.java │ ├── RingbufferBandwidthSim.java │ ├── RingbufferFailoverSim.java │ └── TestProviderTest.java ├── build.gradle ├── core ├── .gitignore ├── build.gradle └── src │ ├── main │ └── java │ │ └── com │ │ └── obsidiandynamics │ │ └── meteor │ │ ├── DefaultPublisher.java │ │ ├── DefaultReceiver.java │ │ ├── DefaultSubscriber.java │ │ ├── FuturePublishCallback.java │ │ ├── InitialOffsetScheme.java │ │ ├── InvalidInitialOffsetSchemeException.java │ │ ├── ListRecordBatch.java │ │ ├── LogAwareExceptionHandler.java │ │ ├── Namespace.java │ │ ├── OffsetLoadException.java │ │ ├── PublishCallback.java │ │ ├── Publisher.java │ │ ├── PublisherConfig.java │ │ ├── Receiver.java │ │ ├── Record.java │ │ ├── RecordBatch.java │ │ ├── StreamConfig.java │ │ ├── StreamHelper.java │ │ ├── Subscriber.java │ │ └── SubscriberConfig.java │ └── test │ └── java │ └── com │ └── obsidiandynamics │ └── meteor │ ├── AbstractPubSubTest.java │ ├── FuturePublishCallbackTest.java │ ├── LogAwareExceptionHandlerTest.java │ ├── PubSubOneWayTest.java │ ├── PubSubRoundTripTest.java │ ├── PubSubTest.java │ ├── PublisherConfigTest.java │ ├── PublisherTest.java │ ├── RecordBatchTest.java │ ├── RecordTest.java │ ├── SimpleLongMessage.java │ ├── StreamConfigTest.java │ ├── StreamHelperTest.java │ ├── SubscriberConfigTest.java │ ├── SubscriberGroupedTest.java │ ├── SubscriberUngroupedTest.java │ └── sample │ ├── AsyncPubSubSample.java │ └── SyncPubSubSample.java ├── elect ├── .gitignore ├── build.gradle └── src │ ├── main │ └── java │ │ └── com │ │ └── obsidiandynamics │ │ └── meteor │ │ ├── Election.java │ │ ├── ElectionConfig.java │ │ ├── Lease.java │ │ ├── LeaseView.java │ │ ├── LeaseViewImpl.java │ │ ├── NotTenantException.java │ │ ├── Registry.java │ │ └── ScavengeWatcher.java │ └── test │ └── java │ └── com │ └── obsidiandynamics │ └── meteor │ ├── ElectionConfigTest.java │ ├── ElectionTest.java │ ├── LeaseTest.java │ ├── LeaseViewTest.java │ └── RegistryTest.java ├── gradle.properties ├── gradle └── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── gradlew ├── gradlew.bat ├── settings.gradle └── src ├── main └── java │ └── com │ └── obsidiandynamics │ └── meteor │ ├── GridProvider.java │ ├── HazelcastProvider.java │ └── util │ ├── NamespaceEnum.java │ ├── RetryableMap.java │ └── RetryableRingbuffer.java └── test ├── java └── com │ └── obsidiandynamics │ └── meteor │ ├── GridProviderTest.java │ ├── HazelcastSample.java │ ├── HeapRingbufferStore.java │ ├── NopRingbufferStore.java │ ├── NopRingbufferStoreTest.java │ └── util │ ├── Bandwidth.java │ └── NamespaceEnumTest.java └── resources ├── log4j.properties └── zlg.properties /.github/workflows/master.yml: -------------------------------------------------------------------------------- 1 | name: Gradle build 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v3 12 | - uses: actions/setup-java@v3 13 | with: 14 | distribution: 'zulu' 15 | java-version: '11' 16 | - run: ./gradlew build 17 | - run: curl -Os https://uploader.codecov.io/latest/linux/codecov 18 | - run: chmod +x codecov 19 | - run: ./codecov -------------------------------------------------------------------------------- /.github/workflows/pull_request.yml: -------------------------------------------------------------------------------- 1 | name: Check pull request 2 | 3 | on: 4 | pull_request: 5 | branches: [ master ] 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v3 12 | - uses: actions/setup-java@v3 13 | with: 14 | distribution: 'zulu' 15 | java-version: '11' 16 | - run: ./gradlew build -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .gradle 2 | /build/ 3 | /bin/ 4 | /.settings/ 5 | .DS_Store 6 | .classpath 7 | .project 8 | .idea 9 | -------------------------------------------------------------------------------- /.hog.project: -------------------------------------------------------------------------------- 1 | commands: 2 | build: ./gradlew build --info --stacktrace --no-daemon 3 | publish: ./gradlew -x test publishToSonatype closeAndReleaseSonatypeStagingRepository --no-daemon 4 | modules: 5 | - path: . 6 | dependencies: 7 | - name: fulcrum 8 | groupId: com.obsidiandynamics.fulcrum 9 | artifactId: fulcrum-func 10 | - name: yconf 11 | groupId: com.obsidiandynamics.yconf 12 | artifactId: yconf-core 13 | - name: zerolog 14 | groupId: com.obsidiandynamics.zerolog 15 | artifactId: zerolog-core 16 | -------------------------------------------------------------------------------- /.lgtm.yml: -------------------------------------------------------------------------------- 1 | extraction: 2 | java: 3 | index: 4 | java_version: "16" 5 | queries: 6 | - include: "*" 7 | - exclude: java/uncaught-number-format-exception 8 | - exclude: java/notify-instead-of-notify-all 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2018, Obsidian Dynamics 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /ROADMAP.md: -------------------------------------------------------------------------------- 1 | Roadmap 2 | === 3 | 4 | * Load balancing of subscribers: **HIGH PRIORITY** 5 | - Currently the biggest limitation of Meteor, particularly when comparing to Kafka and Kinesis. 6 | - Currently there's no notion of parallelism within a stream or across multiple streams. Under the current model, messages would have to be mapped to multiple streams by publishers; however, due to the absence of load balancing, subscribers may congregate in a relatively few number of nodes (due to the first-come-first-serve stream lease assignment), leaving the cluster underutilised. 7 | * Record versioning and backward compatibility with rolling upgrades: **HIGH PRIORITY** 8 | - Any changes to the record structure will break older clients when doing a rolling update. This means that the only way of upgrading a grid is to either bring it offline, or to terminate subscribers (which requires bespoke code on the application end). 9 | - Add a version field to the head of a batch. A publisher always writes to the ring buffer in the latest (from its perspective) version. 10 | - Forward compatibility: if a subscriber sees an unsupported message, it will halt processing. Assumingly at some point in the near future the subscriber's JVM is restarted and a new version of Meteor is loaded. 11 | - We could even go as far as automatically unsubscribing the subscriber when it sees an unsupported version, causing a rebalancing of subscriber assignments. The lease might bounce around among old subscribers until eventually an upgraded subscriber is elected. (Over the upgrade window, the number of old subscribers should diminish rapidly.) 12 | - Backward compatibility: if a subscriber observes a message of an older version, it will apply a series of transforms to stage-wise upgrade the message to the current schema on the fly. As we can't rewrite the messages _in situ_, the Meteor codebase must retain all schemas and migration rules up to the current version. We could use separate project modules to store version-specific schemas and transforms; the apps can include only those dependencies they need. 13 | - If persistence is available, then upgrades could be made on data _in situ_ with write-back, thus avoiding the need to retain all schemas and transforms in the codebase. If the subscriber pull a ring buffer cell with a version that is less than N - 1, then it can fetch from the data store instead. The write-back upgrade could be done with older clients still connected to the grid; they would need to halt processing if they encounter a newer schema. 14 | - Alternatively, we could apply further versioning at the topic level. An upgrade would pump messages from one topic to the next, transforming the messages _en route_. This could be quite complicated in the presence of older publishers, who will continue to publish to the old stream, unless we atomically cut over all publishers (old and new) to the new stream, and support schema N - 1 in the new stream. Also, this approach would interfere with message offsets (although this could be corrected via `RingbufferStore`). We might have to bite the bullet and go with stage-wise upgrades. 15 | - Versioning only applies to the records' on-wire representation; not to their payload schema. Payload versioning is the application's concern. 16 | - Thought: we might make backward compatibility exclusions/dispensations for versions 0.x.x, as schema evolution will be particularly liberal in the beginning and the understanding is that the 0.x.x library hasn't reached a milestone that permits its use in systems with unbounded data retention requirements. 17 | * Micro-batching and LZ4 data compression: 18 | - The built-in batching offered by Hazelcast is limited to 1,000 records and isn't tunable; also no opportunity to pass the batch through a compression filter or perform any other pre-processing. 19 | - Proposal is to create an independent micro-batching layer with customisable stream filters; LZ4 support with configurable (possibly even self-tunable) block sizes should be out of the box. 20 | * Keys and key-based sharding 21 | * Client-level message serialization support: 22 | - Currently the API expects you to work directly with byte arrays, which is arbitrarily flexible but assumes experienced coders. 23 | - Manual byte-pushing minimises opportunities for pipelining within the client API. (The application becomes responsible for pipelining.) 24 | - Support for message serialization will be baked into the client APIs. 25 | - OOTB support for Jackson, Gson, Kryo and `java.io.Serializable`, as well as custom serializers. 26 | - Serialization will apply to both keys and values; different serializers may be used. 27 | * Pipelining of client Hazelcast API calls separately from message (de)serialization (using separate threads for I/O and serialization): 28 | - Approach is similar to how [Jackdaw](https://github.com/obsidiandynamics/jackdaw) pipelines Kafka I/O and serialization; only the pipelines will be integrated into the Meteor client API (because we can) and thus made completely transparent to the application. 29 | * JMX metrics 30 | * Auto-confirming of subscriber offsets. Currently this is a manual call to `Subscriber.confirm()`. 31 | * Metadata server: 32 | - Currently all publisher and subscribers to a stream must agree on all of the stream's parameters — capacity, number of sync/async, replicas, storage implementation, etc. There is no way to discover this information. The present design, however restrictive, ensures that _any_ cohort can auto-create the stream if one doesn't exist. (In other words, streams are always created lazily, upon first use.) In practice, this is acceptable for long-lived streams and where the stream configuration is static and can be agreed upon and disseminated out-of-band. 33 | - Ideally, publishers and subscribers should refer to stream solely by its name, without concerning themselves with its underlying configuration. Create a stream metedata service that holds a serialized `StreamConfig` (e.g. JSON with YConf mappings) for a given stream name. (A distributed hash map should do.) 34 | - The act of looking up the stream's metadata should be separate from the act of connecting to the stream for pub-sub. The lookup operation is done via a separate `MetadataService` API and may take an optional `Supplier`, in case the stream doesn't exist. 35 | - There would ideally be one application responsible for 'mastering' the stream; that application would house the stream config and pass it as the default value. Typically, that application would be one of the publishers. Other applications would perform the lookup without knowledge of the default value; if metadata is missing then the application would either back off or fail (or more pragmatically, fail after some number of back-offs). Perhaps the lookup API could take a timeout value, backing off and retrying behind the scenes. 36 | - Being a distributed hash map, the metadata map might itself be created lazily. For this reason, _all_ cohorts must agree on the metadata map configuration. Sensible defaults should be provided by Meteor, with the option to override. 37 | - Metadata persistence: this wouldn't be an issue for transient (non-persisted topics); however, persisted topics might survive their own metadata if the grid is reformed. The only problem is that there isn't a sensible default persistence configuration for the metadata hash map. The options are to either agree on a global configuration which is dispersed out-of-band, or to apply the `Supplier` pattern and make one 'pilot' process responsible for metadata 'bootstrapping'. If all metadata replicas are lost, the other procs would have to wait for the pilot proc to join the grid. The same pilot proc could also be used to 'master' the streams — acting as a central repository of configuration, which it immediately transfers to the grid. For as long as the grid is intact, the pilot proc is dormant. 38 | - The pilot is a simple process attached to the grid that can be remotely configured using a Hazelcast topic. 39 | * Parallel persistence engine: 40 | - Traditional challenge with persistence of ordered messages is that the writing a message blocks all other writers, waiting in a write queue. However, it's a simple model involving one large (albeit blocking) write per message. Reading a record is also done in one operation. 41 | - Proposed approach: publisher persists batch and obtains a unique (DB-assigned) store ID (slow operation, but done in parallel across publishers) before putting the compressed batch and the store ID on the ring buffer. `RingbufferStore` completes the loop by associating the store ID with the message offset (fast operation that is blocking within the shard leader), indexed by ring buffer offset, before acknowledging the write. 42 | - By the time the batch is observed by subscribers, the batch would have been persisted and linked back to the ring buffer offset. 43 | - If the ring buffer cell has lapsed, `RingbufferStore` looks up the store ID for the given ring buffer offset. Then the store ID is resolved to the batch data. (Two discrete operations are required for the read, which may be issued as one composite operation depending on the persistence stack and query language semantics.) 44 | - Persistence must also apply to subscriber offsets. Offsets may be persisted lazily; there's no need to fsync the offset before returning. 45 | * Background (semi-)compaction: 46 | - Persisted messages are obsoleted in the background based on key, and are thereby excluded from the batch, leaving a hole which may in theory be squashed (as long as the intra-batch message numbering is preserved). If the last message in a batch is obsoleted, then the batch is fed as a special _void batch_ to the subscriber, with a pointer to the next non-void batch. This way, if there is a large void in the message log (several contiguous void batches), the subscriber can rapidly skip over those ring buffer cells. 47 | - This might be called semi-compaction as it works at a batch level; it doesn't shuffle messages between batches or try to splice buddying batches to avoid external fragmentation. The algorithm reaches peak efficiency when entire batches can be reclaimed and the subscribers can begin to fast-forward their offsets, skipping over the ring buffer cells. Even if there are still lots of old non-void batches left due to sparsely distributed relevant/un-compacted messages, we can still realise performance gains by allowing subscribers to silently skip over the compacted messages. 48 | - We're ultimately limited by a ring buffer's rigid structure which isn't naturally prone to compaction (as opposed to chained log nodes, which can easily be buddied and spliced). At best, we can reduce a ring buffer to a skip list. -------------------------------------------------------------------------------- /assurance/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | /build/ 3 | /.settings/ 4 | .classpath 5 | .project 6 | -------------------------------------------------------------------------------- /assurance/build.gradle: -------------------------------------------------------------------------------- 1 | def packageName = "meteor-assurance" 2 | def packageDescription = "Mocking components and test utilities for Meteor" 3 | 4 | dependencies { 5 | compileOnly "com.hazelcast:hazelcast:${hazelcastVersion}:tests" 6 | api project(":") 7 | } 8 | 9 | publishing { 10 | publications { 11 | mavenJava(MavenPublication) { 12 | artifactId = packageName 13 | from components.java 14 | pom { 15 | name = packageName 16 | description = packageDescription 17 | url = 'https://github.com/obsidiandynamics/meteor' 18 | licenses { 19 | license { 20 | name = 'BSD 3-Clause License' 21 | url = 'https://opensource.org/licenses/BSD-3-Clause' 22 | } 23 | } 24 | developers { 25 | developer { 26 | id = 'ekoutanov' 27 | name = 'Emil Koutanov' 28 | email = 'ekoutanov@gmail.com' 29 | } 30 | } 31 | scm { 32 | connection = 'scm:git:git://github.com/obsidiandynamics/meteor.git' 33 | developerConnection = 'scm:git:ssh://github.com/obsidiandynamics/meteor.git' 34 | url = 'https://github.com/obsidiandynamics/meteor' 35 | } 36 | } 37 | } 38 | } 39 | } -------------------------------------------------------------------------------- /assurance/src/main/java/com/obsidiandynamics/meteor/InstancePool.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.util.concurrent.atomic.*; 4 | import java.util.function.*; 5 | 6 | import com.hazelcast.core.*; 7 | import com.obsidiandynamics.threads.*; 8 | 9 | /** 10 | * Pools Hazelcast instances to avoid repeated instantiation.

11 | * 12 | * Instances are returned from the pool in a round-robin fashion. If more invocations 13 | * to {@link InstancePool#get()} are made than there are instances in the pool, some 14 | * instances will be shared across callers.

15 | * 16 | * This class is thread-safe. 17 | */ 18 | public final class InstancePool { 19 | private final Supplier instanceSupplier; 20 | 21 | private final HazelcastInstance[] instances; 22 | 23 | private final Object instancesLock = new Object(); 24 | 25 | private final AtomicInteger position = new AtomicInteger(); 26 | 27 | public InstancePool(int size, Supplier instanceSupplier) { 28 | this.instanceSupplier = instanceSupplier; 29 | instances = new HazelcastInstance[size]; 30 | } 31 | 32 | public int size() { 33 | return instances.length; 34 | } 35 | 36 | public HazelcastInstance get() { 37 | return get(position.getAndIncrement() % size()); 38 | } 39 | 40 | private HazelcastInstance get(int index) { 41 | synchronized (instancesLock) { 42 | if (instances[index] == null) { 43 | return instances[index] = instanceSupplier.get(); 44 | } else { 45 | return instances[index]; 46 | } 47 | } 48 | } 49 | 50 | public void prestartAll() { 51 | prestart(size()); 52 | } 53 | 54 | public void prestart(int numInstances) { 55 | Parallel.blocking(numInstances, i -> get(i % size())).run(); 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /assurance/src/main/java/com/obsidiandynamics/meteor/TestProvider.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import com.hazelcast.config.*; 4 | import com.hazelcast.core.*; 5 | import com.hazelcast.test.*; 6 | 7 | public final class TestProvider implements HazelcastProvider { 8 | private final TestHazelcastInstanceFactory factory; 9 | 10 | public TestProvider() { 11 | this(new TestHazelcastInstanceFactory()); 12 | } 13 | 14 | public TestProvider(TestHazelcastInstanceFactory factory) { 15 | this.factory = factory; 16 | } 17 | 18 | @Override 19 | public HazelcastInstance createInstance(Config config) { 20 | return factory.newHazelcastInstance(config); 21 | } 22 | 23 | @Override 24 | public void shutdownAll() { 25 | factory.shutdownAll(); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /assurance/src/test/java/com/obsidiandynamics/meteor/AddAllAsyncOrderTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | import static org.junit.Assume.*; 5 | 6 | import java.util.*; 7 | import java.util.concurrent.*; 8 | import java.util.stream.*; 9 | 10 | import org.junit.*; 11 | 12 | import com.hazelcast.config.*; 13 | import com.hazelcast.core.*; 14 | import com.hazelcast.ringbuffer.*; 15 | import com.hazelcast.test.*; 16 | 17 | public final class AddAllAsyncOrderTest { 18 | @Test 19 | public void test() throws InterruptedException, ExecutionException { 20 | assumeTrue(false); 21 | 22 | final int numInstances = 4; // how many instances to utilise 23 | final int iterations = 10; // number of test cycles 24 | final int itemsPerIteration = 100; // how many items will be added to the buffer 25 | 26 | for (int iteration = 0; iteration < iterations; iteration++) { 27 | System.out.println("iteration=" + iteration); 28 | // configure and prestart a bunch of instances 29 | final List instances = new ArrayList<>(numInstances); 30 | final Config config = new Config() 31 | .setProperty("hazelcast.logging.type", "none") 32 | .addRingBufferConfig(new RingbufferConfig().setName("default").setBackupCount(3).setCapacity(itemsPerIteration)); 33 | final TestHazelcastInstanceFactory factory = new TestHazelcastInstanceFactory(); 34 | IntStream.range(0, numInstances).parallel().forEach(i -> instances.add(factory.newHazelcastInstance(config))); 35 | 36 | // get a ringbuffer from one of the instances 37 | final HazelcastInstance instance = instances.get(iteration % numInstances); 38 | final Ringbuffer ring = instance.getRingbuffer("buffer-" + iteration); 39 | 40 | // send all the items as a batch and await for sequence allocation 41 | final List items = IntStream.range(0, itemsPerIteration).boxed().collect(Collectors.toList()); 42 | final ICompletableFuture writeFuture = ring.addAllAsync(items, OverflowPolicy.FAIL); 43 | final long lastSequence = writeFuture.get(); 44 | assertEquals(itemsPerIteration - 1, lastSequence); 45 | 46 | // assert correct order by reading from the buffer 47 | final ICompletableFuture> readFuture = ring.readManyAsync(0, itemsPerIteration, 1000, null); 48 | final ReadResultSet readResultSet = readFuture.get(); 49 | assertEquals(itemsPerIteration, readResultSet.size()); 50 | for (int itemIndex = 0; itemIndex < itemsPerIteration; itemIndex++) { 51 | final int readItem = readResultSet.get(itemIndex); 52 | assertEquals(itemIndex, readItem); 53 | } 54 | 55 | // clean up 56 | instances.forEach(inst -> inst.getLifecycleService().terminate()); 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /assurance/src/test/java/com/obsidiandynamics/meteor/AddAsyncOrderTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | import static org.junit.Assume.*; 5 | 6 | import java.util.*; 7 | import java.util.concurrent.*; 8 | import java.util.concurrent.atomic.*; 9 | import java.util.stream.*; 10 | 11 | import org.junit.*; 12 | 13 | import com.hazelcast.config.*; 14 | import com.hazelcast.core.*; 15 | import com.hazelcast.ringbuffer.*; 16 | import com.hazelcast.test.*; 17 | 18 | public final class AddAsyncOrderTest { 19 | @Test 20 | public void test() throws InterruptedException, ExecutionException { 21 | assumeTrue(false); 22 | 23 | final int numInstances = 4; // only fails with multiple instances 24 | final int iterations = 10; // sometimes takes a few iterations to fail 25 | final int itemsPerIteration = 100; // how many items will be added to the buffer 26 | 27 | for (int iteration = 0; iteration < iterations; iteration++) { 28 | System.out.println("iteration=" + iteration); 29 | // configure and prestart a bunch of instances 30 | final List instances = new ArrayList<>(numInstances); 31 | final Config config = new Config() 32 | .setProperty("hazelcast.logging.type", "none") 33 | .addRingBufferConfig(new RingbufferConfig().setName("default").setBackupCount(3).setCapacity(itemsPerIteration)); 34 | final TestHazelcastInstanceFactory factory = new TestHazelcastInstanceFactory(); 35 | IntStream.range(0, numInstances).parallel().forEach(i -> instances.add(factory.newHazelcastInstance(config))); 36 | 37 | // get a ringbuffer from one of the instances 38 | final HazelcastInstance instance = instances.get(iteration % numInstances); 39 | final Ringbuffer ring = instance.getRingbuffer("buffer-" + iteration); 40 | 41 | // send all the items and await all callbacks with a countdown latch 42 | final CountDownLatch latch = new CountDownLatch(itemsPerIteration); 43 | final AtomicReference error = new AtomicReference<>(); 44 | final int _iteration = iteration; 45 | for (int item = 0; item < itemsPerIteration; item++) { 46 | // send each item one by one and compare its queued number with the allocated sequence number 47 | final ICompletableFuture writeFuture = ring.addAsync(item, OverflowPolicy.FAIL); 48 | final int _item = item; 49 | writeFuture.andThen(new ExecutionCallback() { 50 | @Override 51 | public void onResponse(Long sequence) { 52 | // the callback may be called out of order, which is perfectly fine; but the sequence numbers 53 | // must match the order in which the items were enqueued 54 | try { 55 | assertEquals(_item, (long) sequence); 56 | } catch (AssertionError e) { 57 | // if we detect a problem, save the AssertionError as the unit test is running in a different thread 58 | System.err.println("SEQUENCE OUT OF ORDER: item=" + _item + ", sequence=" + sequence + ", iteration=" + _iteration); 59 | error.set(e); 60 | } finally { 61 | latch.countDown(); 62 | } 63 | } 64 | 65 | @Override 66 | public void onFailure(Throwable t) { 67 | t.printStackTrace(); 68 | } 69 | }); 70 | } 71 | 72 | // wait for all callbacks 73 | latch.await(); 74 | 75 | // assert correct order by reading from the buffer 76 | final ICompletableFuture> readFuture = ring.readManyAsync(0, itemsPerIteration, 1000, null); 77 | final ReadResultSet readResultSet = readFuture.get(); 78 | assertEquals(itemsPerIteration, readResultSet.size()); 79 | for (int itemIndex = 0; itemIndex < itemsPerIteration; itemIndex++) { 80 | final int readItem = readResultSet.get(itemIndex); 81 | assertEquals(itemIndex, readItem); 82 | } 83 | 84 | // clean up 85 | instances.forEach(inst -> inst.getLifecycleService().terminate()); 86 | 87 | // check if any assertion errors were observed during the run 88 | if (error.get() != null) { 89 | throw error.get(); 90 | } 91 | } 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /assurance/src/test/java/com/obsidiandynamics/meteor/InstancePoolTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | import static org.mockito.Mockito.*; 5 | 6 | import java.util.*; 7 | import java.util.function.*; 8 | 9 | import org.junit.*; 10 | 11 | import com.hazelcast.core.*; 12 | import com.obsidiandynamics.func.*; 13 | 14 | public final class InstancePoolTest { 15 | @Test 16 | public void testSize() { 17 | final int size = 4; 18 | final InstancePool pool = new InstancePool(size, () -> null); 19 | assertEquals(size, pool.size()); 20 | } 21 | 22 | @Test 23 | public void testGet() { 24 | final int size = 4; 25 | final Supplier supplier = Classes.cast(mock(Supplier.class)); 26 | when(supplier.get()).thenAnswer(invocation -> mock(HazelcastInstance.class)); 27 | final InstancePool pool = new InstancePool(size, supplier); 28 | 29 | // get more instances than what's in the pool 30 | final List instances = new ArrayList<>(size * 2); 31 | for (int i = 0; i < size * 2; i++) { 32 | final HazelcastInstance instance = pool.get(); 33 | assertNotNull(instance); 34 | instances.add(instance); 35 | } 36 | 37 | verify(supplier, times(4)).get(); 38 | 39 | // verify the number of unique instances 40 | final Set set = new HashSet<>(instances); 41 | assertEquals(size, set.size()); 42 | } 43 | 44 | @Test 45 | public void testPrestart() { 46 | final int size = 4; 47 | final Supplier supplier = Classes.cast(mock(Supplier.class)); 48 | when(supplier.get()).thenAnswer(invocation -> mock(HazelcastInstance.class)); 49 | final InstancePool pool = new InstancePool(size, supplier); 50 | 51 | pool.prestartAll(); 52 | verify(supplier, times(4)).get(); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /assurance/src/test/java/com/obsidiandynamics/meteor/MapBandwidthSim.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.util.*; 4 | import java.util.Map.*; 5 | import java.util.function.*; 6 | import java.util.stream.*; 7 | 8 | import com.hazelcast.config.*; 9 | import com.hazelcast.core.*; 10 | import com.obsidiandynamics.worker.*; 11 | import com.obsidiandynamics.zerolog.*; 12 | 13 | public class MapBandwidthSim { 14 | private static final Zlg zlg = Zlg.forDeclaringClass().get(); 15 | 16 | private MapBandwidthSim() {} 17 | 18 | private static class TestWriter { 19 | private final int writeIntervalMillis; 20 | private final byte[] bytes; 21 | private final IMap map; 22 | private final int writes; 23 | private final int keys; 24 | private int written; 25 | 26 | TestWriter(Supplier instanceMaker, int writeIntervalMillis, int writes, int keys, int bytes) { 27 | this.writeIntervalMillis = writeIntervalMillis; 28 | this.writes = writes; 29 | this.keys = keys; 30 | this.bytes = new byte[bytes]; 31 | map = instanceMaker.get().getMap("map"); 32 | IntStream.range(0, keys).forEach(i -> map.put(i, this.bytes)); 33 | 34 | WorkerThread.builder() 35 | .withOptions(new WorkerOptions().daemon().withName(TestWriter.class)) 36 | .onCycle(this::writeCycle) 37 | .buildAndStart(); 38 | } 39 | 40 | private void writeCycle(WorkerThread t) throws InterruptedException { 41 | final Integer key = written % keys; 42 | map.replace(key, bytes, bytes); 43 | written++; 44 | zlg.i("Written %,d", z -> z.arg(written)); 45 | 46 | if (written == writes) { 47 | zlg.i("Writer: terminating"); 48 | t.terminate(); 49 | } else { 50 | Thread.sleep(writeIntervalMillis); 51 | } 52 | } 53 | } 54 | 55 | private static class TestReader { 56 | private final int readIntervalMillis; 57 | private final IMap map; 58 | 59 | TestReader(Supplier instanceMaker, int readIntervalMillis) { 60 | this.readIntervalMillis = readIntervalMillis; 61 | map = instanceMaker.get().getMap("map"); 62 | 63 | WorkerThread.builder() 64 | .withOptions(new WorkerOptions().daemon().withName(TestReader.class)) 65 | .onCycle(this::readCycle) 66 | .buildAndStart(); 67 | } 68 | 69 | private void readCycle(WorkerThread t) throws InterruptedException { 70 | final Set> entrySet = map.entrySet(); 71 | zlg.i("Read %,d entries", z -> z.arg(entrySet::size)); 72 | Thread.sleep(readIntervalMillis); 73 | } 74 | } 75 | 76 | public static void main(String[] args) { 77 | final int writeIntervalMillis = 100; 78 | final int writes = 1_000; 79 | final int keys = 3; 80 | final int bytes = 10; 81 | final int readIntervalMillis = 100; 82 | 83 | final Config config = new Config() 84 | .setProperty("hazelcast.logging.type", "none") 85 | .setProperty("hazelcast.shutdownhook.enabled", "false") 86 | .setProperty("hazelcast.graceful.shutdown.max.wait", String.valueOf(5)) 87 | .setProperty("hazelcast.wait.seconds.before.join", String.valueOf(0)) 88 | .setProperty("hazelcast.max.wait.seconds.before.join", String.valueOf(0)) 89 | .setNetworkConfig(new NetworkConfig() 90 | .setJoin(new JoinConfig() 91 | .setMulticastConfig(new MulticastConfig() 92 | .setEnabled(true) 93 | .setMulticastTimeoutSeconds(1)) 94 | .setTcpIpConfig(new TcpIpConfig() 95 | .setEnabled(false)))) 96 | .addMapConfig(new MapConfig() 97 | .setName("default") 98 | .setBackupCount(1) 99 | .setAsyncBackupCount(0)); 100 | 101 | final InstancePool instancePool = new InstancePool(4, () -> GridProvider.getInstance().createInstance(config)); 102 | zlg.i("Prestarting instances..."); 103 | instancePool.prestartAll(); 104 | zlg.i("Instances prestarted"); 105 | 106 | new MapBandwidthSim() {{ 107 | new TestWriter(instancePool::get, writeIntervalMillis, writes, keys, bytes); 108 | new TestReader(instancePool::get, readIntervalMillis); 109 | new TestReader(instancePool::get, readIntervalMillis); 110 | new TestReader(instancePool::get, readIntervalMillis); 111 | }}; 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /assurance/src/test/java/com/obsidiandynamics/meteor/RingbufferBandwidthSim.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.util.*; 4 | import java.util.concurrent.*; 5 | import java.util.function.*; 6 | 7 | import com.hazelcast.config.*; 8 | import com.hazelcast.core.*; 9 | import com.hazelcast.ringbuffer.*; 10 | import com.obsidiandynamics.worker.*; 11 | import com.obsidiandynamics.zerolog.*; 12 | 13 | public class RingbufferBandwidthSim { 14 | private static final Zlg zlg = Zlg.forDeclaringClass().get(); 15 | 16 | private final int messages; 17 | 18 | private RingbufferBandwidthSim(int messages) { 19 | this.messages = messages; 20 | } 21 | 22 | private class TestPublisher { 23 | private final int pubIntervalMillis; 24 | private final byte[] bytes; 25 | private final Ringbuffer buffer; 26 | private int published; 27 | 28 | TestPublisher(Supplier instanceMaker, int pubIntervalMillis, int bytes) { 29 | this.pubIntervalMillis = pubIntervalMillis; 30 | this.bytes = new byte[bytes]; 31 | buffer = instanceMaker.get().getRingbuffer("buffer"); 32 | 33 | WorkerThread.builder() 34 | .withOptions(new WorkerOptions().daemon().withName(TestPublisher.class)) 35 | .onCycle(this::publishCycle) 36 | .buildAndStart(); 37 | } 38 | 39 | private void publishCycle(WorkerThread t) throws InterruptedException { 40 | buffer.addAsync(bytes, OverflowPolicy.OVERWRITE); 41 | published++; 42 | zlg.i("Published %,d", z -> z.arg(published)); 43 | 44 | if (published == messages) { 45 | zlg.i("Publisher: terminating"); 46 | t.terminate(); 47 | } else { 48 | Thread.sleep(pubIntervalMillis); 49 | } 50 | } 51 | } 52 | 53 | private class TestSubscriber { 54 | private final int pollTimeoutMillis; 55 | private final Ringbuffer buffer; 56 | private int received; 57 | private long nextSequence; 58 | 59 | TestSubscriber(Supplier instanceMaker, int pollTimeoutMillis) { 60 | this.pollTimeoutMillis = pollTimeoutMillis; 61 | buffer = instanceMaker.get().getRingbuffer("buffer"); 62 | 63 | WorkerThread.builder() 64 | .withOptions(new WorkerOptions().daemon().withName(TestSubscriber.class)) 65 | .onCycle(this::receiveCycle) 66 | .buildAndStart(); 67 | } 68 | 69 | private void receiveCycle(WorkerThread t) throws InterruptedException { 70 | final ICompletableFuture> f = buffer.readManyAsync(nextSequence, 1, 1000, Objects::nonNull); 71 | try { 72 | final ReadResultSet results = f.get(pollTimeoutMillis, TimeUnit.MILLISECONDS); 73 | nextSequence = results.getSequence(results.size() - 1) + 1; 74 | received += results.size(); 75 | zlg.i("Received %,d records (total %,d)", z -> z.arg(results::size).arg(received)); 76 | } catch (ExecutionException e) { 77 | e.printStackTrace(); 78 | } catch (TimeoutException e) { 79 | zlg.w("Timed out"); 80 | } 81 | 82 | if (received == messages) { 83 | zlg.i("Subscriber: terminating"); 84 | t.terminate(); 85 | } 86 | } 87 | } 88 | 89 | public static void main(String[] args) { 90 | final int messages = 1_000; 91 | final int pubIntervalMillis = 100; 92 | final int bytes = 10; 93 | final int pollTimeoutMillis = 1_000; 94 | 95 | final Config config = new Config() 96 | .setProperty("hazelcast.logging.type", "none") 97 | .setProperty("hazelcast.shutdownhook.enabled", "false") 98 | .setProperty("hazelcast.graceful.shutdown.max.wait", String.valueOf(5)) 99 | .setProperty("hazelcast.wait.seconds.before.join", String.valueOf(0)) 100 | .setProperty("hazelcast.max.wait.seconds.before.join", String.valueOf(0)) 101 | .setNetworkConfig(new NetworkConfig() 102 | .setJoin(new JoinConfig() 103 | .setMulticastConfig(new MulticastConfig() 104 | .setEnabled(true) 105 | .setMulticastTimeoutSeconds(1)) 106 | .setTcpIpConfig(new TcpIpConfig() 107 | .setEnabled(false)))) 108 | .addRingBufferConfig(new RingbufferConfig() 109 | .setName("default") 110 | .setBackupCount(0) 111 | .setAsyncBackupCount(0)); 112 | 113 | final InstancePool instancePool = new InstancePool(3, () -> GridProvider.getInstance().createInstance(config)); 114 | zlg.i("Prestarting instances..."); 115 | instancePool.prestartAll(); 116 | zlg.i("Instances prestarted"); 117 | 118 | new RingbufferBandwidthSim(messages) {{ 119 | new TestPublisher(instancePool::get, pubIntervalMillis, bytes); 120 | new TestSubscriber(instancePool::get, pollTimeoutMillis); 121 | new TestSubscriber(instancePool::get, pollTimeoutMillis); 122 | }}; 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /assurance/src/test/java/com/obsidiandynamics/meteor/RingbufferFailoverSim.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.util.*; 4 | import java.util.concurrent.*; 5 | import java.util.concurrent.atomic.*; 6 | import java.util.function.*; 7 | 8 | import com.hazelcast.config.*; 9 | import com.hazelcast.core.*; 10 | import com.hazelcast.ringbuffer.*; 11 | import com.obsidiandynamics.worker.*; 12 | import com.obsidiandynamics.zerolog.*; 13 | 14 | /** 15 | * Uses a {@link NopRingbufferStore} to simulate a ringbuffer failover with loss of data. 16 | * Specifically, data is migrated from one member to another, but because 17 | * {@link RingbufferStore#getLargestSequence()} returns {@code -1}, the read from the ringbuffer 18 | * fails with a {@link IllegalArgumentException}. (The data is technically there, but the head and 19 | * tail sequences have been lost.)

20 | * 21 | * Depending on how the ringbuffer gets mapped to a partition owner, this simulation may take a 22 | * few cycles before data loss is observed. 23 | */ 24 | public class RingbufferFailoverSim { 25 | private static final Zlg zlg = Zlg.forDeclaringClass().get(); 26 | 27 | private static final String BUFFER_NAME = "buffer0"; 28 | 29 | private final int messages; 30 | 31 | private RingbufferFailoverSim(int messages) { 32 | this.messages = messages; 33 | } 34 | 35 | private class TestPublisher implements Joinable { 36 | private final int pubIntervalMillis; 37 | private final byte[] bytes; 38 | private final Ringbuffer buffer; 39 | private final WorkerThread thread; 40 | private int published; 41 | 42 | TestPublisher(Supplier instanceMaker, int pubIntervalMillis, int bytes) { 43 | this.pubIntervalMillis = pubIntervalMillis; 44 | this.bytes = new byte[bytes]; 45 | final HazelcastInstance instance = instanceMaker.get(); 46 | buffer = instance.getRingbuffer(BUFFER_NAME); 47 | zlg.i("serviceName=%s, partitionKey=%s", z -> z.arg(buffer::getServiceName).arg(buffer::getPartitionKey)); 48 | final Partition partition = instance.getPartitionService().getPartition(BUFFER_NAME); 49 | zlg.i("partitionId=%s, owner=%s", z -> z.arg(partition::getPartitionId).arg(partition::getOwner)); 50 | instance.getPartitionService().addMigrationListener(new MigrationListener() { 51 | @Override 52 | public void migrationStarted(MigrationEvent migrationEvent) { 53 | zlg.i("Migration started %s", z -> z.arg(migrationEvent)); 54 | } 55 | 56 | @Override 57 | public void migrationCompleted(MigrationEvent migrationEvent) { 58 | zlg.i("Migration compeleted %s", z -> z.arg(migrationEvent)); 59 | } 60 | 61 | @Override 62 | public void migrationFailed(MigrationEvent migrationEvent) { 63 | zlg.i("Migration failed %s", z -> z.arg(migrationEvent)); 64 | } 65 | }); 66 | 67 | thread = WorkerThread.builder() 68 | .withOptions(new WorkerOptions().daemon().withName(TestPublisher.class)) 69 | .onCycle(this::publishCycle) 70 | .buildAndStart(); 71 | } 72 | 73 | private void publishCycle(WorkerThread t) throws InterruptedException { 74 | buffer.addAsync(bytes, OverflowPolicy.OVERWRITE); 75 | published++; 76 | zlg.i("Published %s", z -> z.arg(published)); 77 | 78 | if (published == messages) { 79 | zlg.i("Publisher: terminating"); 80 | t.terminate(); 81 | } else { 82 | Thread.sleep(pubIntervalMillis); 83 | } 84 | } 85 | 86 | @Override 87 | public boolean join(long timeoutMillis) throws InterruptedException { 88 | return thread.join(timeoutMillis); 89 | } 90 | } 91 | 92 | private class TestSubscriber implements Joinable { 93 | private final int pollTimeoutMillis; 94 | private final Ringbuffer buffer; 95 | private final boolean terminateOnComplete; 96 | private final WorkerThread thread; 97 | private int received; 98 | private long nextSequence; 99 | 100 | TestSubscriber(Supplier instanceMaker, int pollTimeoutMillis, boolean terminateOnComplete) { 101 | this.pollTimeoutMillis = pollTimeoutMillis; 102 | this.terminateOnComplete = terminateOnComplete; 103 | buffer = instanceMaker.get().getRingbuffer(BUFFER_NAME); 104 | 105 | thread = WorkerThread.builder() 106 | .withOptions(new WorkerOptions().daemon().withName(TestSubscriber.class)) 107 | .onCycle(this::receiveCycle) 108 | .buildAndStart(); 109 | } 110 | 111 | private void receiveCycle(WorkerThread t) throws InterruptedException { 112 | final ICompletableFuture> f = buffer.readManyAsync(nextSequence, 1, 1000, Objects::nonNull); 113 | try { 114 | final ReadResultSet results = f.get(pollTimeoutMillis, TimeUnit.MILLISECONDS); 115 | nextSequence = results.getSequence(results.size() - 1) + 1; 116 | received += results.size(); 117 | zlg.i("Received %,d records (total %,d) next is %,d", z -> z.arg(results::size).arg(received).arg(nextSequence)); 118 | } catch (ExecutionException e) { 119 | e.printStackTrace(); 120 | } catch (TimeoutException e) { 121 | zlg.w("Timed out"); 122 | } 123 | 124 | if (received % messages == 0 && received > 0) { 125 | zlg.i("Subscriber: received one complete set"); 126 | if (terminateOnComplete) { 127 | t.terminate(); 128 | } 129 | } 130 | } 131 | 132 | @Override 133 | public boolean join(long timeoutMillis) throws InterruptedException { 134 | return thread.join(timeoutMillis); 135 | } 136 | } 137 | 138 | public static void main(String[] args) { 139 | final int messages = 10; 140 | final int pubIntervalMillis = 10; 141 | final int bytes = 10; 142 | final int pollTimeoutMillis = 100; 143 | final int cycles = 100; 144 | 145 | final Config config = new Config() 146 | .setProperty("hazelcast.logging.type", "none") 147 | .setProperty("hazelcast.shutdownhook.enabled", "false") 148 | .setProperty("hazelcast.graceful.shutdown.max.wait", String.valueOf(5)) 149 | .setProperty("hazelcast.wait.seconds.before.join", String.valueOf(0)) 150 | .setProperty("hazelcast.max.wait.seconds.before.join", String.valueOf(0)) 151 | .setNetworkConfig(new NetworkConfig() 152 | .setJoin(new JoinConfig() 153 | .setMulticastConfig(new MulticastConfig() 154 | .setEnabled(true) 155 | .setMulticastTimeoutSeconds(1)) 156 | .setTcpIpConfig(new TcpIpConfig() 157 | .setEnabled(false)))) 158 | .addRingBufferConfig(new RingbufferConfig() 159 | .setName(BUFFER_NAME) 160 | .setBackupCount(1) 161 | .setAsyncBackupCount(0) 162 | .setRingbufferStoreConfig(new RingbufferStoreConfig() 163 | .setEnabled(true) 164 | .setFactoryClassName(NopRingbufferStore.Factory.class.getName()))); 165 | 166 | final Supplier instanceSupplier = () -> GridProvider.getInstance().createInstance(config); 167 | 168 | zlg.i("Creating publisher instance..."); 169 | final AtomicReference instance = new AtomicReference<>(instanceSupplier.get()); 170 | instance.get().getRingbuffer(BUFFER_NAME); 171 | 172 | final InstancePool instancePool = new InstancePool(2, instanceSupplier); 173 | zlg.i("Prestarting subscriber instances..."); 174 | instancePool.prestartAll(); 175 | zlg.i("Instances prestarted"); 176 | 177 | new RingbufferFailoverSim(messages) {{ 178 | new TestSubscriber(instancePool::get, pollTimeoutMillis, false); 179 | new TestSubscriber(instancePool::get, pollTimeoutMillis, false); 180 | 181 | for (int i = 0; i < cycles; i++) { 182 | if (instance.get() == null) { 183 | zlg.i("Creating publisher instance..."); 184 | instance.set(instanceSupplier.get()); 185 | } 186 | 187 | zlg.i("Publisher instance created"); 188 | final TestPublisher pub = new TestPublisher(instance::get, pubIntervalMillis, bytes); 189 | final TestSubscriber sub = new TestSubscriber(instance::get, pollTimeoutMillis, true); 190 | Joiner.of(pub, sub).joinSilently(); 191 | instance.getAndSet(null).shutdown(); 192 | } 193 | }}; 194 | } 195 | } 196 | -------------------------------------------------------------------------------- /assurance/src/test/java/com/obsidiandynamics/meteor/TestProviderTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import org.junit.*; 6 | 7 | import com.hazelcast.config.*; 8 | 9 | public final class TestProviderTest { 10 | @Test 11 | public void test() { 12 | final Config config = new Config() 13 | .setProperty("hazelcast.shutdownhook.enabled", "false") 14 | .setProperty("hazelcast.logging.type", "none"); 15 | 16 | final TestProvider testProvider = new TestProvider(); 17 | assertNotNull(testProvider.createInstance(config)); 18 | testProvider.shutdownAll(); 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /build.gradle: -------------------------------------------------------------------------------- 1 | plugins { 2 | id "com.github.ben-manes.versions" version "0.22.0" 3 | id 'io.github.gradle-nexus.publish-plugin' version '1.1.0' 4 | } 5 | 6 | nexusPublishing { 7 | repositories { 8 | sonatype { 9 | nexusUrl.set(uri("https://s01.oss.sonatype.org/service/local/")) 10 | snapshotRepositoryUrl.set(uri("https://s01.oss.sonatype.org/content/repositories/snapshots/")) 11 | username = findProperty('sonatype.username') 12 | password = findProperty('sonatype.password') 13 | } 14 | } 15 | } 16 | 17 | def baseJvmArgs = [ 18 | "--add-opens=java.base/java.io=ALL-UNNAMED", 19 | "--add-opens=java.base/java.lang=ALL-UNNAMED", 20 | "--add-opens=java.base/java.lang.invoke=ALL-UNNAMED", 21 | "--add-opens=java.base/java.lang.reflect=ALL-UNNAMED", 22 | "--add-opens=java.base/java.nio=ALL-UNNAMED", 23 | "--add-opens=java.base/java.util=ALL-UNNAMED", 24 | "--add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED", 25 | "--add-opens=java.base/sun.nio.ch=ALL-UNNAMED", 26 | "-XX:-MaxFDLimit" 27 | ] 28 | project.ext.set("baseJvmArgs", baseJvmArgs) 29 | 30 | allprojects { 31 | apply plugin: "java-library" 32 | apply plugin: "jacoco" 33 | apply plugin: 'maven-publish' 34 | apply plugin: 'signing' 35 | 36 | group = "com.obsidiandynamics.meteor" 37 | version = "0.28.0-SNAPSHOT" 38 | 39 | sourceCompatibility = 1.8 40 | targetCompatibility = 1.8 41 | 42 | //TODO remove when Javadoc errors have been resolved 43 | javadoc.options.addStringOption('Xdoclint:none', '-quiet') 44 | 45 | repositories { 46 | mavenCentral() 47 | maven { 48 | url "https://oss.sonatype.org/content/repositories/snapshots" 49 | } 50 | } 51 | 52 | java { 53 | withJavadocJar() 54 | withSourcesJar() 55 | } 56 | 57 | signing { 58 | sign publishing.publications 59 | } 60 | 61 | ext { 62 | commonsLangVersion = "3.9" 63 | equalsverifierVersion = "3.10" 64 | fulcrumVersion = "0.39.0" 65 | hdrhistogramVersion = "2.1.11" 66 | junitVersion = "4.12" 67 | hazelcastVersion = "3.12.2" 68 | log4jVersion = "1.2.17" 69 | mockitoVersion = "3.0.0" 70 | slf4jVersion = "1.7.25" 71 | yconfVersion = "0.23.0" 72 | zerologVersion = "0.33.0" 73 | } 74 | 75 | dependencies { 76 | compileOnly "com.hazelcast:hazelcast:${hazelcastVersion}" 77 | 78 | testImplementation "com.hazelcast:hazelcast:${hazelcastVersion}" 79 | testImplementation "com.hazelcast:hazelcast:${hazelcastVersion}:tests" 80 | testImplementation "com.obsidiandynamics.fulcrum:fulcrum-assert:${fulcrumVersion}" 81 | testImplementation "com.obsidiandynamics.fulcrum:fulcrum-await:${fulcrumVersion}" 82 | testImplementation "com.obsidiandynamics.fulcrum:fulcrum-junit:${fulcrumVersion}" 83 | testImplementation "com.obsidiandynamics.yconf:yconf-snakeyaml:${yconfVersion}" 84 | testImplementation "junit:junit:${junitVersion}" 85 | testImplementation "org.mockito:mockito-core:${mockitoVersion}" 86 | testImplementation "nl.jqno.equalsverifier:equalsverifier:${equalsverifierVersion}" 87 | 88 | testImplementation "com.obsidiandynamics.zerolog:zerolog-slf4j17:${zerologVersion}" 89 | testImplementation "org.slf4j:slf4j-api:${slf4jVersion}" 90 | testImplementation "org.slf4j:slf4j-log4j12:${slf4jVersion}" 91 | testImplementation "log4j:log4j:${log4jVersion}" 92 | } 93 | 94 | test { 95 | exclude "**/*IT.class" 96 | jvmArgs += baseJvmArgs 97 | } 98 | 99 | jacoco { 100 | toolVersion = "0.8.8" 101 | } 102 | 103 | jacocoTestReport { 104 | additionalSourceDirs.from = files(sourceSets.main.allSource.srcDirs) 105 | sourceDirectories.from = files(sourceSets.main.allSource.srcDirs) 106 | classDirectories.from = files(sourceSets.main.output) 107 | reports { 108 | html.required = true 109 | xml.required = true 110 | csv.required = false 111 | } 112 | } 113 | } 114 | 115 | subprojects { 116 | dependencies { 117 | testImplementation project(":").sourceSets.test.output 118 | } 119 | } 120 | 121 | task jacocoRootReport(type: JacocoReport) { 122 | mustRunAfter = allprojects.test + allprojects.javadoc 123 | additionalSourceDirs.from = files(allprojects.sourceSets.main.allSource.srcDirs) 124 | sourceDirectories.from = files(allprojects.sourceSets.main.allSource.srcDirs) 125 | classDirectories.from = files(allprojects.sourceSets.main.output) 126 | executionData.setFrom project.fileTree(dir: '.', include: ['**/build/jacoco/test.exec']) 127 | reports { 128 | html.required = true 129 | xml.required = true 130 | csv.required = false 131 | } 132 | onlyIf = { 133 | true 134 | } 135 | afterEvaluate { 136 | classDirectories.from = files(classDirectories.files.collect { 137 | fileTree(dir: it, exclude: ["sample/**", "**/*Uncovered*"]) 138 | }) 139 | } 140 | } 141 | 142 | def packageName = "meteor-common" 143 | def packageDescription = "Common classes and utilities" 144 | 145 | dependencies { 146 | api "com.obsidiandynamics.fulcrum:fulcrum-retry:${fulcrumVersion}" 147 | api "com.obsidiandynamics.fulcrum:fulcrum-worker:${fulcrumVersion}" 148 | api "com.obsidiandynamics.yconf:yconf-core:${yconfVersion}" 149 | api "com.obsidiandynamics.zerolog:zerolog-core:${zerologVersion}" 150 | api "com.obsidiandynamics.zerolog:zerolog-bridge-hazelcast:${zerologVersion}" 151 | } 152 | 153 | jar { 154 | finalizedBy jacocoRootReport 155 | } 156 | 157 | publishing { 158 | publications { 159 | mavenJava(MavenPublication) { 160 | artifactId = packageName 161 | from components.java 162 | pom { 163 | name = packageName 164 | description = packageDescription 165 | url = 'https://github.com/obsidiandynamics/meteor' 166 | licenses { 167 | license { 168 | name = 'BSD 3-Clause License' 169 | url = 'https://opensource.org/licenses/BSD-3-Clause' 170 | } 171 | } 172 | developers { 173 | developer { 174 | id = 'ekoutanov' 175 | name = 'Emil Koutanov' 176 | email = 'ekoutanov@gmail.com' 177 | } 178 | } 179 | scm { 180 | connection = 'scm:git:git://github.com/obsidiandynamics/meteor.git' 181 | developerConnection = 'scm:git:ssh://github.com/obsidiandynamics/meteor.git' 182 | url = 'https://github.com/obsidiandynamics/meteor' 183 | } 184 | } 185 | } 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /core/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | /build/ 3 | /.settings/ 4 | .classpath 5 | .project 6 | -------------------------------------------------------------------------------- /core/build.gradle: -------------------------------------------------------------------------------- 1 | def packageName = "meteor-core" 2 | def packageDescription = "Real-time message streaming over Hazelcast IMDG" 3 | 4 | dependencies { 5 | api project(":meteor-elect") 6 | api "com.obsidiandynamics.fulcrum:fulcrum-nodequeue:${fulcrumVersion}" 7 | 8 | testImplementation project(":") 9 | testImplementation project(":meteor-assurance") 10 | testImplementation "com.obsidiandynamics.fulcrum:fulcrum-testmark:${fulcrumVersion}" 11 | testImplementation "org.hdrhistogram:HdrHistogram:${hdrhistogramVersion}" 12 | 13 | testImplementation "com.obsidiandynamics.fulcrum:fulcrum-launcher:${fulcrumVersion}" 14 | } 15 | 16 | task launch(dependsOn: [classes, testClasses]) { 17 | doLast { 18 | def launcherClasses = new ArrayList() 19 | launcherClasses += "com.obsidiandynamics.meteor.PubSubOneWayTest" 20 | launcherClasses += "com.obsidiandynamics.meteor.PubSubRoundTripTest" 21 | 22 | def perfJvmArgs = "-XX:-MaxFDLimit -XX:+TieredCompilation -XX:+UseNUMA -XX:+UseCondCardMark " + 23 | "-XX:+UseBiasedLocking -Xms2G -Xmx2G -Xss1M -XX:+UseG1GC -XX:MaxGCPauseMillis=200 " + 24 | "-XX:InitiatingHeapOccupancyPercent=0 -Djava.net.preferIPv4Stack=true " + 25 | "--add-modules java.se --add-exports java.base/jdk.internal.ref=ALL-UNNAMED " + 26 | "--add-opens java.base/java.lang=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED " + 27 | "--add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.management/sun.management=ALL-UNNAMED " + 28 | "--add-opens jdk.management/com.sun.management.internal=ALL-UNNAMED " + 29 | "-Dlauncher.package.compress.level=3" 30 | javaexec { 31 | systemProperties = System.properties 32 | classpath = sourceSets.main.compileClasspath + sourceSets.main.runtimeClasspath + sourceSets.test.compileClasspath + sourceSets.test.runtimeClasspath 33 | main = "com.obsidiandynamics.launcher.Launcher" 34 | args = launcherClasses 35 | jvmArgs = Arrays.asList(perfJvmArgs.split(" ")) + baseJvmArgs 36 | standardInput = System.in 37 | } 38 | } 39 | } 40 | 41 | publishing { 42 | publications { 43 | mavenJava(MavenPublication) { 44 | artifactId = packageName 45 | from components.java 46 | pom { 47 | name = packageName 48 | description = packageDescription 49 | url = 'https://github.com/obsidiandynamics/meteor' 50 | licenses { 51 | license { 52 | name = 'BSD 3-Clause License' 53 | url = 'https://opensource.org/licenses/BSD-3-Clause' 54 | } 55 | } 56 | developers { 57 | developer { 58 | id = 'ekoutanov' 59 | name = 'Emil Koutanov' 60 | email = 'ekoutanov@gmail.com' 61 | } 62 | } 63 | scm { 64 | connection = 'scm:git:git://github.com/obsidiandynamics/meteor.git' 65 | developerConnection = 'scm:git:ssh://github.com/obsidiandynamics/meteor.git' 66 | url = 'https://github.com/obsidiandynamics/meteor' 67 | } 68 | } 69 | } 70 | } 71 | } -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/DefaultPublisher.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static com.obsidiandynamics.retry.Retry.*; 4 | 5 | import java.util.*; 6 | import java.util.concurrent.*; 7 | 8 | import com.hazelcast.core.*; 9 | import com.hazelcast.ringbuffer.*; 10 | import com.obsidiandynamics.meteor.util.*; 11 | import com.obsidiandynamics.nodequeue.*; 12 | import com.obsidiandynamics.retry.*; 13 | import com.obsidiandynamics.worker.*; 14 | import com.obsidiandynamics.zerolog.util.*; 15 | 16 | final class DefaultPublisher implements Publisher, Joinable { 17 | private static final int PUBLISH_MAX_YIELDS = 100; 18 | private static final int PUBLISH_BACKOFF_MILLIS = 1; 19 | private static final int MAX_BATCH_SIZE = 1_000; 20 | 21 | private static class AsyncRecord { 22 | final Record record; 23 | final PublishCallback callback; 24 | 25 | AsyncRecord(Record record, PublishCallback callback) { 26 | this.record = record; 27 | this.callback = callback; 28 | } 29 | } 30 | 31 | private final HazelcastInstance instance; 32 | 33 | private final PublisherConfig config; 34 | 35 | private final WorkerThread publishThread; 36 | 37 | private final NodeQueue queue = new NodeQueue<>(); 38 | 39 | private final QueueConsumer queueConsumer = queue.consumer(); 40 | 41 | private final RetryableRingbuffer buffer; 42 | 43 | private int yields; 44 | 45 | DefaultPublisher(HazelcastInstance instance, PublisherConfig config) { 46 | this.instance = instance; 47 | this.config = config; 48 | final StreamConfig streamConfig = config.getStreamConfig(); 49 | 50 | final Retry retry = new Retry() 51 | .withExceptionMatcher(isA(HazelcastException.class)) 52 | .withAttempts(Integer.MAX_VALUE) 53 | .withBackoff(100) 54 | .withFaultHandler(config.getZlg()::w) 55 | .withErrorHandler(config.getZlg()::e); 56 | buffer = new RetryableRingbuffer<>(retry, StreamHelper.getRingbuffer(instance, streamConfig)); 57 | 58 | publishThread = WorkerThread.builder() 59 | .withOptions(new WorkerOptions().daemon().withName(Publisher.class, streamConfig.getName(), "publisher")) 60 | .onCycle(this::publisherCycle) 61 | .onUncaughtException(new ZlgWorkerExceptionHandler(config.getZlg())) 62 | .buildAndStart(); 63 | } 64 | 65 | @Override 66 | public PublisherConfig getConfig() { 67 | return config; 68 | } 69 | 70 | HazelcastInstance getInstance() { 71 | return instance; 72 | } 73 | 74 | private void publisherCycle(WorkerThread t) throws InterruptedException { 75 | List recs = null; 76 | 77 | for (;;) { 78 | final AsyncRecord rec = queueConsumer.poll(); 79 | if (rec != null) { 80 | if (recs == null) { 81 | recs = new ArrayList<>(); 82 | yields = 0; 83 | } 84 | recs.add(rec); 85 | if (recs.size() == MAX_BATCH_SIZE) { 86 | sendNow(recs); 87 | return; 88 | } 89 | } else { 90 | if (recs != null) { 91 | sendNow(recs); 92 | } else if (yields++ < PUBLISH_MAX_YIELDS) { 93 | Thread.yield(); 94 | } else { 95 | //noinspection BusyWait 96 | Thread.sleep(PUBLISH_BACKOFF_MILLIS); 97 | } 98 | return; 99 | } 100 | } 101 | } 102 | 103 | private void sendNow(List recs) throws InterruptedException { 104 | final List items = new ArrayList<>(recs.size()); 105 | final int size = recs.size(); 106 | for (int i = 0; i < size; i++) { 107 | items.add(recs.get(i).record.getData()); 108 | } 109 | 110 | final ICompletableFuture f = buffer.addAllAsync(items, OverflowPolicy.OVERWRITE); 111 | try { 112 | final long lastSequence = f.get(); 113 | final long firstSequence = lastSequence - size + 1; 114 | 115 | for (int i = 0; i < size; i++) { 116 | final AsyncRecord rec = recs.get(i); 117 | final long offset = firstSequence + i; 118 | rec.record.setOffset(offset); 119 | rec.callback.onComplete(offset, null); 120 | } 121 | } catch (ExecutionException e) { 122 | for (AsyncRecord rec : recs) { 123 | rec.callback.onComplete(Record.UNASSIGNED_OFFSET, e.getCause()); 124 | } 125 | } 126 | } 127 | 128 | @Override 129 | public long publishDirect(Record record) { 130 | final long sequence = buffer.add(record.getData()); 131 | record.setOffset(sequence); 132 | return sequence; 133 | } 134 | 135 | @Override 136 | public void publishAsync(Record record, PublishCallback callback) { 137 | queue.add(new AsyncRecord(record, callback)); 138 | } 139 | 140 | @Override 141 | public Joinable terminate() { 142 | publishThread.terminate(); 143 | return this; 144 | } 145 | 146 | @Override 147 | public boolean join(long timeoutMillis) throws InterruptedException { 148 | return publishThread.join(timeoutMillis); 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/DefaultReceiver.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import com.obsidiandynamics.worker.*; 4 | import com.obsidiandynamics.zerolog.util.*; 5 | 6 | public final class DefaultReceiver implements Receiver { 7 | private final Subscriber subscriber; 8 | 9 | private final RecordHandler recordHandler; 10 | 11 | private final int pollTimeoutMillis; 12 | 13 | private final WorkerThread pollerThread; 14 | 15 | DefaultReceiver(Subscriber subscriber, RecordHandler recordHandler, int pollTimeoutMillis) { 16 | this.subscriber = subscriber; 17 | this.recordHandler = recordHandler; 18 | this.pollTimeoutMillis = pollTimeoutMillis; 19 | pollerThread = WorkerThread.builder() 20 | .withOptions(new WorkerOptions() 21 | .daemon() 22 | .withName(Receiver.class, subscriber.getConfig().getStreamConfig().getName(), "poller")) 23 | .onCycle(this::pollerCycle) 24 | .onUncaughtException(new ZlgWorkerExceptionHandler(subscriber.getConfig().getZlg())) 25 | .buildAndStart(); 26 | } 27 | 28 | private void pollerCycle(WorkerThread thread) throws InterruptedException { 29 | final RecordBatch batch = subscriber.poll(pollTimeoutMillis); 30 | for (Record record : batch) { 31 | recordHandler.onRecord(record); 32 | } 33 | } 34 | 35 | @Override 36 | public Joinable terminate() { 37 | pollerThread.terminate(); 38 | return this; 39 | } 40 | 41 | @Override 42 | public boolean join(long timeoutMillis) throws InterruptedException { 43 | return pollerThread.join(timeoutMillis); 44 | } 45 | 46 | WorkerState getThreadState() { 47 | return pollerThread.getState(); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/DefaultSubscriber.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static com.obsidiandynamics.retry.Retry.*; 4 | 5 | import java.util.*; 6 | import java.util.concurrent.*; 7 | 8 | import com.hazelcast.core.*; 9 | import com.hazelcast.ringbuffer.*; 10 | import com.obsidiandynamics.func.*; 11 | import com.obsidiandynamics.meteor.Receiver.*; 12 | import com.obsidiandynamics.meteor.util.*; 13 | import com.obsidiandynamics.retry.*; 14 | import com.obsidiandynamics.worker.*; 15 | import com.obsidiandynamics.worker.Terminator; 16 | import com.obsidiandynamics.zerolog.util.*; 17 | 18 | public final class DefaultSubscriber implements Subscriber, Joinable { 19 | /** Cycle backoff for the keeper thread. */ 20 | private static final int KEEPER_BACKOFF_MILLIS = 1; 21 | 22 | /** The maximum amount of time an unassigned subscriber is allowed to sleep before re-checking the 23 | * assignment status. */ 24 | private static final int MAX_UNASSIGNED_SLEEP_MILLIS = 10; 25 | 26 | private final HazelcastInstance instance; 27 | 28 | private final SubscriberConfig config; 29 | 30 | private final RetryableRingbuffer buffer; 31 | 32 | private final RetryableMap offsets; 33 | 34 | private final Election election; 35 | 36 | private final UUID leaseCandidate; 37 | 38 | private final WorkerThread keeperThread; 39 | 40 | private final int readBatchSize; 41 | 42 | private volatile long nextReadOffset; 43 | 44 | private volatile long lastReadOffset; 45 | 46 | private volatile long scheduledConfirmOffset = Record.UNASSIGNED_OFFSET; 47 | 48 | private long lastConfirmedOffset = scheduledConfirmOffset; 49 | 50 | private volatile long scheduledExtendTimestamp = 0; 51 | 52 | private long lastExtendTimestamp = scheduledExtendTimestamp; 53 | 54 | private boolean active = true; 55 | 56 | private final Object activeLock = new Object(); 57 | 58 | private volatile Receiver receiver; 59 | 60 | DefaultSubscriber(HazelcastInstance instance, SubscriberConfig config) { 61 | this.instance = instance; 62 | this.config = config; 63 | 64 | final StreamConfig streamConfig = config.getStreamConfig(); 65 | final Retry retry = new Retry() 66 | .withExceptionMatcher(isA(HazelcastException.class)) 67 | .withAttempts(Integer.MAX_VALUE) 68 | .withBackoff(100) 69 | .withFaultHandler(config.getZlg()::w) 70 | .withErrorHandler(config.getZlg()::e); 71 | buffer = new RetryableRingbuffer<>(retry, StreamHelper.getRingbuffer(instance, streamConfig)); 72 | readBatchSize = Math.min(1_000, streamConfig.getHeapCapacity()); 73 | 74 | if (config.hasGroup()) { 75 | // checks for IllegalArgumentException; no initial assignment is made until poll() is called 76 | getInitialOffset(true); 77 | nextReadOffset = Record.UNASSIGNED_OFFSET; 78 | 79 | offsets = new RetryableMap<>(retry, StreamHelper.getOffsetsMap(instance, streamConfig, config.getMapStoreConfig())); 80 | 81 | final IMap leases = StreamHelper.getLeaseMap(instance, streamConfig, config.getMapStoreConfig()); 82 | leaseCandidate = UUID.randomUUID(); 83 | election = new Election(config.getElectionConfig(), leases); 84 | election.getRegistry().enrol(config.getGroup(), leaseCandidate); 85 | election.start(); 86 | 87 | keeperThread = WorkerThread.builder() 88 | .withOptions(new WorkerOptions().daemon().withName(Subscriber.class, streamConfig.getName(), "keeper")) 89 | .onCycle(this::keeperCycle) 90 | .onUncaughtException(new ZlgWorkerExceptionHandler(config.getZlg())) 91 | .buildAndStart(); 92 | } else { 93 | if (config.getInitialOffsetScheme() == InitialOffsetScheme.NONE) { 94 | throw new InvalidInitialOffsetSchemeException("Cannot use initial offset scheme " + InitialOffsetScheme.NONE + 95 | " in an ungrouped context"); 96 | } 97 | // performs initial offset assignment 98 | nextReadOffset = getInitialOffset(false); 99 | offsets = null; 100 | election = null; 101 | leaseCandidate = null; 102 | keeperThread = null; 103 | } 104 | lastReadOffset = nextReadOffset - 1; 105 | } 106 | 107 | @Override 108 | public SubscriberConfig getConfig() { 109 | return config; 110 | } 111 | 112 | HazelcastInstance getInstance() { 113 | return instance; 114 | } 115 | 116 | Election getElection() { 117 | return election; 118 | } 119 | 120 | private String getServiceInfo(DistributedObject obj) { 121 | final Partition partition = instance.getPartitionService().getPartition(obj.getPartitionKey()); 122 | return String.format("serviceName=%s, partitionId=%d, owner=%s", 123 | obj.getServiceName(), partition.getPartitionId(), partition.getOwner()); 124 | } 125 | 126 | private static long computeWait(long wakeTime, long maxSleepMillis) { 127 | return Math.min(Math.max(0, wakeTime - System.currentTimeMillis()), maxSleepMillis); 128 | } 129 | 130 | @Override 131 | public RecordBatch poll(long timeoutMillis) throws InterruptedException { 132 | final boolean isGroupSubscriber = leaseCandidate != null; 133 | 134 | final long wake = System.currentTimeMillis() + timeoutMillis; 135 | for (;;) { 136 | final boolean isCurrentTenant = isGroupSubscriber && isCurrentTenant(); 137 | 138 | if (! isGroupSubscriber || isCurrentTenant) { 139 | if (nextReadOffset == Record.UNASSIGNED_OFFSET) { 140 | nextReadOffset = loadConfirmedOffset() + 1; 141 | lastReadOffset = nextReadOffset - 1; 142 | } 143 | 144 | final ICompletableFuture> f = buffer.readManyAsync(nextReadOffset, 1, readBatchSize, StreamHelper::isNotNull); 145 | 146 | final long waitMillis = computeWait(wake, Long.MAX_VALUE); 147 | try { 148 | final ReadResultSet resultSet = f.get(waitMillis, TimeUnit.MILLISECONDS); 149 | lastReadOffset = resultSet.getSequence(resultSet.size() - 1); 150 | nextReadOffset = lastReadOffset + 1; 151 | return readBatch(resultSet); 152 | } catch (ExecutionException e) { 153 | if (e.getCause() instanceof StaleSequenceException) { 154 | // if a stale sequence exception is encountered, fast-forward the sequence to the last known head 155 | // sequence and add a further 'safety margin' so that the next read doesn't hit another stale offset 156 | final double safetyMarginFrac = config.getStaleReadSafetyMargin(); 157 | final long headSeq = ((StaleSequenceException) e.getCause()).getHeadSeq(); 158 | final long safetyMargin = (long) (config.getStreamConfig().getHeapCapacity() * safetyMarginFrac); 159 | final long ffNextReadOffset = headSeq + safetyMargin; 160 | config.getZlg().w("Sequence %,d was stale (head already at %,d), fast-forwarding to %,d", 161 | z -> z.arg(nextReadOffset).arg(headSeq).arg(ffNextReadOffset)); 162 | nextReadOffset = ffNextReadOffset; 163 | } else { 164 | final String serviceInfo = getServiceInfo(buffer.getRingbuffer()); 165 | final String m = String.format("Error reading at offset %,d from stream %s [%s]", 166 | nextReadOffset, config.getStreamConfig().getName(), serviceInfo); 167 | config.getExceptionHandler().onException(m, e.getCause()); 168 | f.cancel(true); 169 | //noinspection BusyWait 170 | Thread.sleep(waitMillis); 171 | return RecordBatch.empty(); 172 | } 173 | } catch (TimeoutException e) { 174 | f.cancel(true); 175 | return RecordBatch.empty(); 176 | } finally { 177 | if (isCurrentTenant) { 178 | scheduledExtendTimestamp = System.currentTimeMillis(); 179 | } 180 | } 181 | } else { 182 | nextReadOffset = Record.UNASSIGNED_OFFSET; 183 | final long sleepMillis = computeWait(wake, MAX_UNASSIGNED_SLEEP_MILLIS); 184 | if (sleepMillis > 0) { 185 | //noinspection BusyWait 186 | Thread.sleep(sleepMillis); 187 | } else { 188 | return RecordBatch.empty(); 189 | } 190 | } 191 | } 192 | } 193 | 194 | private long loadConfirmedOffset() { 195 | final Long confirmedOffset = offsets.get(config.getGroup()); 196 | if (confirmedOffset != null) { 197 | return confirmedOffset; 198 | } else { 199 | return getInitialOffset(true) - 1; 200 | } 201 | } 202 | 203 | private long getInitialOffset(boolean useGroups) { 204 | // resolve AUTO to the appropriate scheme (EARLIEST/LATEST/NONE) depending on group mode 205 | final InitialOffsetScheme concreteInitialOffsetScheme = 206 | config.getInitialOffsetScheme().resolveConcreteScheme(useGroups); 207 | if (concreteInitialOffsetScheme == InitialOffsetScheme.EARLIEST) { 208 | return 0; 209 | } else if (concreteInitialOffsetScheme == InitialOffsetScheme.LATEST) { 210 | return buffer.tailSequence() + 1; 211 | } else { 212 | throw new OffsetLoadException("No persisted offset"); 213 | } 214 | } 215 | 216 | private static RecordBatch readBatch(ReadResultSet resultSet) { 217 | final List records = new ArrayList<>(resultSet.size()); 218 | long offset = resultSet.getSequence(0); 219 | for (byte[] result : resultSet) { 220 | records.add(new Record(result, offset++)); 221 | } 222 | return new ListRecordBatch(records); 223 | } 224 | 225 | private void ensureGroupMode() { 226 | if (leaseCandidate == null) { 227 | throw new IllegalStateException("Cannot call this operation in an ungrouped context"); 228 | } 229 | } 230 | 231 | private void ensureGroupFreeMode() { 232 | if (leaseCandidate != null) { 233 | throw new IllegalStateException("Cannot call this operation in a grouped context"); 234 | } 235 | } 236 | 237 | @Override 238 | public void confirm() { 239 | ensureGroupMode(); 240 | 241 | if (lastReadOffset >= StreamHelper.SMALLEST_OFFSET) { 242 | confirm(lastReadOffset); 243 | } 244 | } 245 | 246 | @Override 247 | public void confirm(long offset) { 248 | ensureGroupMode(); 249 | 250 | if (offset < StreamHelper.SMALLEST_OFFSET || offset > lastReadOffset) { 251 | throw new IllegalArgumentException(String.format("Illegal offset %d; last read %d", offset, lastReadOffset)); 252 | } 253 | 254 | scheduledConfirmOffset = offset; 255 | } 256 | 257 | @Override 258 | public void seek(long offset) { 259 | ensureGroupFreeMode(); 260 | 261 | if (offset < StreamHelper.SMALLEST_OFFSET) throw new IllegalArgumentException("Invalid seek offset " + offset); 262 | nextReadOffset = offset; 263 | } 264 | 265 | private void keeperCycle(WorkerThread t) throws InterruptedException { 266 | final long scheduledConfirmOffset = this.scheduledConfirmOffset; 267 | final long scheduledExtendTimestamp = this.scheduledExtendTimestamp; 268 | 269 | boolean performedWork = false; 270 | synchronized (activeLock) { 271 | if (active) { 272 | if (scheduledConfirmOffset != lastConfirmedOffset) { 273 | performedWork = true; 274 | putOffset(scheduledConfirmOffset); 275 | lastConfirmedOffset = scheduledConfirmOffset; 276 | } 277 | 278 | if (scheduledExtendTimestamp != lastExtendTimestamp) { 279 | final long timeSinceLastExtend = System.currentTimeMillis() - lastExtendTimestamp; 280 | if (timeSinceLastExtend >= config.getMinLeaseExtendInterval()) { 281 | performedWork = true; 282 | extendLease(); 283 | lastExtendTimestamp = scheduledExtendTimestamp; 284 | } 285 | } 286 | } else { 287 | // avoid confirming offsets or extending the lease if this subscriber has been deactivated, 288 | // but update the timestamps to thwart future attempts 289 | lastConfirmedOffset = scheduledConfirmOffset; 290 | lastExtendTimestamp = scheduledExtendTimestamp; 291 | } 292 | } 293 | 294 | if (! performedWork) { 295 | Thread.sleep(KEEPER_BACKOFF_MILLIS); 296 | } 297 | } 298 | 299 | private void putOffset(long offset) { 300 | if (isCurrentTenant()) { 301 | doWithExceptionHandler(() -> offsets.put(config.getGroup(), offset), 302 | config.getExceptionHandler(), 303 | "Failed to update offset"); 304 | } else { 305 | final String m = String.format("Failed confirming offset %s for stream %s: %s is not the current tenant for group %s", 306 | offset, config.getStreamConfig().getName(), leaseCandidate, config.getGroup()); 307 | config.getExceptionHandler().onException(m, null); 308 | } 309 | } 310 | 311 | private void extendLease() { 312 | doWithExceptionHandler(() -> election.extend(config.getGroup(), leaseCandidate), 313 | config.getExceptionHandler(), 314 | "Failed to extend lease"); 315 | } 316 | 317 | private boolean isCurrentTenant() { 318 | return election.getLeaseView().isCurrentTenant(config.getGroup(), leaseCandidate); 319 | } 320 | 321 | @Override 322 | public boolean isAssigned() { 323 | return leaseCandidate == null || isCurrentTenant(); 324 | } 325 | 326 | @Override 327 | public void deactivate() { 328 | deactivate(config.getExceptionHandler()); 329 | } 330 | 331 | private void deactivate(ExceptionHandler errorHandler) { 332 | ensureGroupMode(); 333 | 334 | synchronized (activeLock) { 335 | election.getRegistry().unenrol(config.getGroup(), leaseCandidate); 336 | if (isCurrentTenant()) { 337 | doWithExceptionHandler(() -> election.yield(config.getGroup(), leaseCandidate), 338 | errorHandler, 339 | "Failed to yield lease"); 340 | } 341 | active = false; 342 | } 343 | } 344 | 345 | private static void doWithExceptionHandler(CheckedRunnable r, ExceptionHandler errorHandler, String message) { 346 | try { 347 | r.run(); 348 | } catch (Throwable e) { 349 | errorHandler.onException(message, e); 350 | } 351 | } 352 | 353 | @Override 354 | public void reactivate() { 355 | ensureGroupMode(); 356 | 357 | synchronized (activeLock) { 358 | election.getRegistry().enrol(config.getGroup(), leaseCandidate); 359 | active = true; 360 | } 361 | } 362 | 363 | @Override 364 | public Receiver attachReceiver(RecordHandler recordHandler, int pollTimeoutMillis) { 365 | if (receiver != null) { 366 | throw new IllegalStateException("A receiver has already been attached"); 367 | } 368 | 369 | return receiver = new DefaultReceiver(this, recordHandler, pollTimeoutMillis); 370 | } 371 | 372 | @Override 373 | public Joinable terminate() { 374 | if (leaseCandidate != null) { 375 | deactivate(ExceptionHandler.nop()); 376 | } 377 | 378 | Terminator.blank() 379 | .add(Optional.ofNullable(receiver)) 380 | .add(Optional.ofNullable(keeperThread)) 381 | .add(Optional.ofNullable(election)) 382 | .terminate(); 383 | return this; 384 | } 385 | 386 | @Override 387 | public boolean join(long timeoutMillis) throws InterruptedException { 388 | return Joiner.blank() 389 | .add(Optional.ofNullable(receiver)) 390 | .add(Optional.ofNullable(keeperThread)) 391 | .add(Optional.ofNullable(election)) 392 | .join(timeoutMillis); 393 | } 394 | } 395 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/FuturePublishCallback.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.util.concurrent.*; 4 | 5 | public final class FuturePublishCallback extends CompletableFuture implements PublishCallback { 6 | @Override 7 | public void onComplete(long offset, Throwable error) { 8 | if (error != null) { 9 | completeExceptionally(error); 10 | } else { 11 | complete(offset); 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/InitialOffsetScheme.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | public enum InitialOffsetScheme { 4 | EARLIEST, 5 | LATEST, 6 | AUTO, 7 | NONE; 8 | 9 | InitialOffsetScheme resolveConcreteScheme(boolean useGroups) { 10 | if (this == AUTO) { 11 | return useGroups ? EARLIEST : LATEST; 12 | } else { 13 | return this; 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/InvalidInitialOffsetSchemeException.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | public final class InvalidInitialOffsetSchemeException extends RuntimeException { 4 | private static final long serialVersionUID = 1L; 5 | 6 | InvalidInitialOffsetSchemeException(String m) { super(m); } 7 | } 8 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/ListRecordBatch.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.util.*; 4 | 5 | final class ListRecordBatch implements RecordBatch { 6 | private final List records; 7 | 8 | ListRecordBatch(List records) { 9 | this.records = records; 10 | } 11 | 12 | @Override 13 | public int size() { 14 | return records.size(); 15 | } 16 | 17 | @Override 18 | public Iterator iterator() { 19 | return records.iterator(); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/LogAwareExceptionHandler.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.util.function.*; 4 | 5 | import com.obsidiandynamics.func.*; 6 | import com.obsidiandynamics.zerolog.*; 7 | 8 | final class LogAwareExceptionHandler implements ExceptionHandler { 9 | private final Supplier zlgSupplier; 10 | 11 | LogAwareExceptionHandler(Supplier zlgSupplier) { 12 | this.zlgSupplier = zlgSupplier; 13 | } 14 | 15 | @Override 16 | public void onException(String summary, Throwable error) { 17 | zlgSupplier.get().w(summary, error); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/Namespace.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import com.obsidiandynamics.meteor.util.*; 4 | 5 | public enum Namespace implements NamespaceEnum { 6 | METEOR_META, 7 | METEOR_STREAM 8 | } 9 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/OffsetLoadException.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | public final class OffsetLoadException extends RuntimeException { 4 | private static final long serialVersionUID = 1L; 5 | 6 | OffsetLoadException(String m) { super(m); } 7 | } 8 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/PublishCallback.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | @FunctionalInterface 4 | public interface PublishCallback { 5 | static PublishCallback nop() { return (__offset, __error) -> {}; } 6 | 7 | void onComplete(long offset, Throwable error); 8 | } 9 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/Publisher.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import com.hazelcast.core.*; 4 | import com.obsidiandynamics.worker.*; 5 | 6 | public interface Publisher extends Terminable { 7 | PublisherConfig getConfig(); 8 | 9 | long publishDirect(Record record); 10 | 11 | void publishAsync(Record record, PublishCallback callback); 12 | 13 | default FuturePublishCallback publishAsync(Record record) { 14 | final FuturePublishCallback futureCallback = new FuturePublishCallback(); 15 | publishAsync(record, futureCallback); 16 | return futureCallback; 17 | } 18 | 19 | static Publisher createDefault(HazelcastInstance instance, PublisherConfig config) { 20 | return new DefaultPublisher(instance, config); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/PublisherConfig.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import com.obsidiandynamics.yconf.*; 4 | import com.obsidiandynamics.zerolog.*; 5 | 6 | @Y 7 | public final class PublisherConfig { 8 | @YInject 9 | private Zlg zlg = Zlg.forDeclaringClass().get(); 10 | 11 | @YInject 12 | private StreamConfig streamConfig = new StreamConfig(); 13 | 14 | Zlg getZlg() { 15 | return zlg; 16 | } 17 | 18 | public PublisherConfig withZlg(Zlg zlg) { 19 | this.zlg = zlg; 20 | return this; 21 | } 22 | 23 | StreamConfig getStreamConfig() { 24 | return streamConfig; 25 | } 26 | 27 | public PublisherConfig withStreamConfig(StreamConfig streamConfig) { 28 | this.streamConfig = streamConfig; 29 | return this; 30 | } 31 | 32 | @Override 33 | public String toString() { 34 | return PublisherConfig.class.getSimpleName() + " [streamConfig=" + streamConfig + "]"; 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/Receiver.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import com.obsidiandynamics.worker.*; 4 | 5 | public interface Receiver extends Terminable, Joinable { 6 | @FunctionalInterface 7 | interface RecordHandler { 8 | void onRecord(Record record); 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/Record.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | public final class Record { 4 | public static final long UNASSIGNED_OFFSET = -1; 5 | 6 | private long offset = UNASSIGNED_OFFSET; 7 | 8 | private final byte[] data; 9 | 10 | public Record(byte[] data) { 11 | this.data = data; 12 | } 13 | 14 | Record(byte[] data, long offset) { 15 | this.data = data; 16 | this.offset = offset; 17 | } 18 | 19 | public long getOffset() { 20 | return offset; 21 | } 22 | 23 | public void setOffset(long offset) { 24 | this.offset = offset; 25 | } 26 | 27 | public byte[] getData() { 28 | return data; 29 | } 30 | 31 | @Override 32 | public String toString() { 33 | return Record.class.getSimpleName() + " [offset=" + offset + ", data.length=" + data.length + "]"; 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/RecordBatch.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.util.*; 4 | 5 | public interface RecordBatch extends Iterable { 6 | RecordBatch empty = new RecordBatch() { 7 | @Override 8 | public int size() { 9 | return 0; 10 | } 11 | 12 | @Override 13 | public Iterator iterator() { 14 | return Collections.emptyIterator(); 15 | } 16 | }; 17 | 18 | static RecordBatch empty() { return empty; } 19 | 20 | int size(); 21 | 22 | @Override 23 | Iterator iterator(); 24 | 25 | default boolean isEmpty() { 26 | return size() == 0; 27 | } 28 | 29 | default List toList() { 30 | final List list = new ArrayList<>(size()); 31 | readInto(list); 32 | return list; 33 | } 34 | 35 | default void readInto(Collection sink) { 36 | iterator().forEachRemaining(sink::add); 37 | } 38 | 39 | default Record first() { 40 | final List records = toList(); 41 | if (records.isEmpty()) throw new NoSuchElementException(); 42 | return records.get(0); 43 | } 44 | 45 | default Record last() { 46 | final List records = toList(); 47 | if (records.isEmpty()) throw new NoSuchElementException(); 48 | return records.get(records.size() - 1); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/StreamConfig.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import com.hazelcast.config.*; 4 | import com.obsidiandynamics.yconf.*; 5 | 6 | @Y 7 | public final class StreamConfig { 8 | @YInject 9 | private String name; 10 | 11 | @YInject 12 | private int heapCapacity = RingbufferConfig.DEFAULT_CAPACITY; 13 | 14 | @YInject 15 | private int syncReplicas = RingbufferConfig.DEFAULT_SYNC_BACKUP_COUNT; 16 | 17 | @YInject 18 | private int asyncReplicas = RingbufferConfig.DEFAULT_ASYNC_BACKUP_COUNT; 19 | 20 | @YInject 21 | private RingbufferStoreConfig ringbufferStoreConfig = new RingbufferStoreConfig().setEnabled(false); 22 | 23 | String getName() { 24 | return name; 25 | } 26 | 27 | public StreamConfig withName(String name) { 28 | this.name = name; 29 | return this; 30 | } 31 | 32 | int getHeapCapacity() { 33 | return heapCapacity; 34 | } 35 | 36 | public StreamConfig withHeapCapacity(int heapCapacity) { 37 | this.heapCapacity = heapCapacity; 38 | return this; 39 | } 40 | 41 | int getSyncReplicas() { 42 | return syncReplicas; 43 | } 44 | 45 | public StreamConfig withSyncReplicas(int syncReplicas) { 46 | this.syncReplicas = syncReplicas; 47 | return this; 48 | } 49 | 50 | int getAsyncReplicas() { 51 | return asyncReplicas; 52 | } 53 | 54 | public StreamConfig withAsyncReplicas(int asyncReplicas) { 55 | this.asyncReplicas = asyncReplicas; 56 | return this; 57 | } 58 | 59 | RingbufferStoreConfig getRingbufferStoreConfig() { 60 | return ringbufferStoreConfig; 61 | } 62 | 63 | public StreamConfig withRingbufferStoreConfig(RingbufferStoreConfig ringbufferStoreConfig) { 64 | this.ringbufferStoreConfig = ringbufferStoreConfig; 65 | return this; 66 | } 67 | 68 | @Override 69 | public String toString() { 70 | return StreamConfig.class.getSimpleName() + " [name=" + name + ", heapCapacity=" + heapCapacity 71 | + ", syncReplicas=" + syncReplicas + ", asyncReplicas=" + asyncReplicas 72 | + ", ringbufferStoreConfig=" + ringbufferStoreConfig + "]"; 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/StreamHelper.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import com.hazelcast.config.*; 4 | import com.hazelcast.core.*; 5 | import com.hazelcast.ringbuffer.*; 6 | 7 | final class StreamHelper { 8 | static final long SMALLEST_OFFSET = 0; 9 | 10 | static boolean isNotNull(byte[] bytes) { 11 | return bytes != null; 12 | } 13 | 14 | private StreamHelper() {} 15 | 16 | static Ringbuffer getRingbuffer(HazelcastInstance instance, StreamConfig streamConfig) { 17 | final String streamFQName = Namespace.METEOR_STREAM.qualify(streamConfig.getName()); 18 | final RingbufferConfig ringbufferConfig = new RingbufferConfig(streamFQName) 19 | .setBackupCount(streamConfig.getSyncReplicas()) 20 | .setAsyncBackupCount(streamConfig.getAsyncReplicas()) 21 | .setCapacity(streamConfig.getHeapCapacity()) 22 | .setRingbufferStoreConfig(streamConfig.getRingbufferStoreConfig()); 23 | instance.getConfig().addRingBufferConfig(ringbufferConfig); 24 | return instance.getRingbuffer(streamFQName); 25 | } 26 | 27 | static IMap getLeaseMap(HazelcastInstance instance, StreamConfig streamConfig, 28 | MapStoreConfig mapStoreConfig) { 29 | return getMap(instance, Namespace.METEOR_META.qualify("lease." + streamConfig.getName()), 30 | streamConfig, mapStoreConfig); 31 | } 32 | 33 | static IMap getOffsetsMap(HazelcastInstance instance, StreamConfig streamConfig, 34 | MapStoreConfig mapStoreConfig) { 35 | return getMap(instance, Namespace.METEOR_META.qualify("offsets." + streamConfig.getName()), 36 | streamConfig, mapStoreConfig); 37 | } 38 | 39 | static IMap getMap(HazelcastInstance instance, String mapFQName, 40 | StreamConfig streamConfig, MapStoreConfig mapStoreConfig) { 41 | final MapConfig mapConfig = new MapConfig(mapFQName) 42 | .setBackupCount(streamConfig.getSyncReplicas()) 43 | .setAsyncBackupCount(streamConfig.getAsyncReplicas()) 44 | .setMapStoreConfig(mapStoreConfig); 45 | instance.getConfig().addMapConfig(mapConfig); 46 | return instance.getMap(mapFQName); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/Subscriber.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import com.hazelcast.core.*; 4 | import com.obsidiandynamics.meteor.Receiver.*; 5 | import com.obsidiandynamics.worker.*; 6 | 7 | public interface Subscriber extends Terminable { 8 | RecordBatch poll(long timeoutMillis) throws InterruptedException; 9 | 10 | SubscriberConfig getConfig(); 11 | 12 | void confirm(long offset); 13 | 14 | void confirm(); 15 | 16 | void seek(long offset); 17 | 18 | boolean isAssigned(); 19 | 20 | void deactivate(); 21 | 22 | void reactivate(); 23 | 24 | Receiver attachReceiver(RecordHandler recordHandler, int pollTimeoutMillis); 25 | 26 | static Subscriber createDefault(HazelcastInstance instance, SubscriberConfig config) { 27 | return new DefaultSubscriber(instance, config); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /core/src/main/java/com/obsidiandynamics/meteor/SubscriberConfig.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import com.hazelcast.config.*; 4 | import com.obsidiandynamics.func.*; 5 | import com.obsidiandynamics.yconf.*; 6 | import com.obsidiandynamics.zerolog.*; 7 | 8 | @Y 9 | public final class SubscriberConfig { 10 | @YInject 11 | private Zlg zlg = Zlg.forDeclaringClass().get(); 12 | 13 | @YInject 14 | private ExceptionHandler exceptionHandler = new LogAwareExceptionHandler(this::getZlg); 15 | 16 | @YInject 17 | private StreamConfig streamConfig = new StreamConfig(); 18 | 19 | @YInject 20 | private String group = null; 21 | 22 | @YInject 23 | private InitialOffsetScheme initialOffsetScheme = InitialOffsetScheme.AUTO; 24 | 25 | @YInject 26 | private ElectionConfig electionConfig = new ElectionConfig(); 27 | 28 | @YInject 29 | private double staleReadSafetyMargin = 0.1; 30 | 31 | @YInject 32 | private int minLeaseExtendIntervalMillis = 1_000; 33 | 34 | @YInject 35 | private MapStoreConfig mapStoreConfig = new MapStoreConfig().setEnabled(false); 36 | 37 | Zlg getZlg() { 38 | return zlg; 39 | } 40 | 41 | public SubscriberConfig withZlg(Zlg zlg) { 42 | this.zlg = zlg; 43 | return this; 44 | } 45 | 46 | ExceptionHandler getExceptionHandler() { 47 | return exceptionHandler; 48 | } 49 | 50 | public SubscriberConfig withExceptionHandler(ExceptionHandler exceptionHandler) { 51 | this.exceptionHandler = exceptionHandler; 52 | return this; 53 | } 54 | 55 | StreamConfig getStreamConfig() { 56 | return streamConfig; 57 | } 58 | 59 | public SubscriberConfig withStreamConfig(StreamConfig streamConfig) { 60 | this.streamConfig = streamConfig; 61 | return this; 62 | } 63 | 64 | boolean hasGroup() { 65 | return group != null; 66 | } 67 | 68 | String getGroup() { 69 | return group; 70 | } 71 | 72 | public SubscriberConfig withGroup(String group) { 73 | this.group = group; 74 | return this; 75 | } 76 | 77 | InitialOffsetScheme getInitialOffsetScheme() { 78 | return initialOffsetScheme; 79 | } 80 | 81 | public SubscriberConfig withInitialOffsetScheme(InitialOffsetScheme initialOffsetScheme) { 82 | this.initialOffsetScheme = initialOffsetScheme; 83 | return this; 84 | } 85 | 86 | ElectionConfig getElectionConfig() { 87 | return electionConfig; 88 | } 89 | 90 | public SubscriberConfig withElectionConfig(ElectionConfig electionConfig) { 91 | this.electionConfig = electionConfig; 92 | return this; 93 | } 94 | 95 | double getStaleReadSafetyMargin() { 96 | return staleReadSafetyMargin; 97 | } 98 | 99 | public SubscriberConfig withStaleReadSafetyMargin(double staleReadSafetyMargin) { 100 | this.staleReadSafetyMargin = staleReadSafetyMargin; 101 | return this; 102 | } 103 | 104 | int getMinLeaseExtendInterval() { 105 | return minLeaseExtendIntervalMillis; 106 | } 107 | 108 | public SubscriberConfig withMinLeaseExtendInterval(int minLeaseExtendIntervalMillis) { 109 | this.minLeaseExtendIntervalMillis = minLeaseExtendIntervalMillis; 110 | return this; 111 | } 112 | 113 | MapStoreConfig getMapStoreConfig() { 114 | return mapStoreConfig; 115 | } 116 | 117 | public SubscriberConfig withMapStoreConfig(MapStoreConfig mapStoreConfig) { 118 | this.mapStoreConfig = mapStoreConfig; 119 | return this; 120 | } 121 | 122 | @Override 123 | public String toString() { 124 | return SubscriberConfig.class.getSimpleName() + " [exceptionHandler=" + exceptionHandler 125 | + ", streamConfig=" + streamConfig 126 | + ", group=" + group + ", initialOffsetScheme=" + initialOffsetScheme 127 | + ", electionConfig=" + electionConfig + ", staleReadSafetyMargin=" + staleReadSafetyMargin 128 | + ", minLeaseExtendInterval=" + minLeaseExtendIntervalMillis 129 | + ", mapStoreConfig=" + mapStoreConfig + "]"; 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/AbstractPubSubTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.mockito.ArgumentMatchers.*; 4 | import static org.mockito.Mockito.*; 5 | 6 | import java.util.*; 7 | 8 | import org.junit.*; 9 | import org.junit.runners.*; 10 | 11 | import com.hazelcast.config.*; 12 | import com.hazelcast.core.*; 13 | import com.obsidiandynamics.await.*; 14 | import com.obsidiandynamics.func.*; 15 | import com.obsidiandynamics.worker.*; 16 | import com.obsidiandynamics.worker.Terminator; 17 | import com.obsidiandynamics.zerolog.*; 18 | 19 | @FixMethodOrder(MethodSorters.NAME_ASCENDING) 20 | public abstract class AbstractPubSubTest { 21 | /* 22 | * Simulates a slow system by creating auxiliary spinning threads, thereby thrashing the scheduler. Zero means no auxiliary load. 23 | */ 24 | private static final int MIN_AUX_LOAD_THREADS = 0; 25 | private static final int MAX_AUX_LOAD_THREADS = 0; 26 | private final List auxLoadThreads = new ArrayList<>(); 27 | 28 | protected HazelcastProvider defaultProvider; 29 | 30 | protected final Set instances = new HashSet<>(); 31 | 32 | protected final Set terminables = new HashSet<>(); 33 | 34 | protected final Timesert wait = Timesert.wait(10_000); 35 | 36 | @Before 37 | public final void beforeBase() { 38 | final int auxThreads = (int) (Math.random() * (MAX_AUX_LOAD_THREADS - MIN_AUX_LOAD_THREADS + 1)) + MIN_AUX_LOAD_THREADS; 39 | for (int i = 0; i < auxThreads; i++) { 40 | auxLoadThreads.add(WorkerThread.builder().onCycle(t -> {}).buildAndStart()); 41 | } 42 | 43 | defaultProvider = new TestProvider(); 44 | } 45 | 46 | @After 47 | public final void afterBase() { 48 | Terminator.blank() 49 | .add(terminables) 50 | .add(auxLoadThreads) 51 | .terminate() 52 | .joinSilently(); 53 | terminables.clear(); 54 | auxLoadThreads.clear(); 55 | instances.forEach(h -> h.getLifecycleService().terminate()); 56 | instances.clear(); 57 | } 58 | 59 | protected final HazelcastInstance newGridInstance() { 60 | return newInstance(GridProvider.getInstance()); 61 | } 62 | 63 | protected final HazelcastInstance newInstance() { 64 | return newInstance(defaultProvider); 65 | } 66 | 67 | protected final HazelcastInstance newInstance(HazelcastProvider provider) { 68 | final Config config = new Config() 69 | .setProperty("hazelcast.logging.type", "none"); 70 | return register(provider.createInstance(config), instances); 71 | } 72 | 73 | protected final DefaultPublisher configurePublisher(PublisherConfig config) { 74 | return configurePublisher(newInstance(), config); 75 | } 76 | 77 | protected final DefaultPublisher configurePublisher(HazelcastInstance instance, PublisherConfig config) { 78 | return (DefaultPublisher) register(Publisher.createDefault(instance, config), terminables); 79 | } 80 | 81 | protected final DefaultSubscriber configureSubscriber(SubscriberConfig config) { 82 | return configureSubscriber(newInstance(), config); 83 | } 84 | 85 | protected final DefaultSubscriber configureSubscriber(HazelcastInstance instance, SubscriberConfig config) { 86 | return (DefaultSubscriber) register(Subscriber.createDefault(instance, config), terminables); 87 | } 88 | 89 | protected static T register(T item, Collection container) { 90 | container.add(item); 91 | return item; 92 | } 93 | 94 | protected static String randomGroup() { 95 | final UUID random = UUID.randomUUID(); 96 | return "group-" + Long.toHexString(random.getMostSignificantBits() ^ random.getLeastSignificantBits()); 97 | } 98 | 99 | protected static ExceptionHandler mockExceptionHandler() { 100 | return mockExceptionHandler(Zlg.nop()); 101 | } 102 | 103 | protected static ExceptionHandler mockExceptionHandler(Zlg zlg) { 104 | final ExceptionHandler mock = mock(ExceptionHandler.class); 105 | doAnswer(invocation -> { 106 | final String summary = invocation.getArgument(0); 107 | final Throwable error = invocation.getArgument(1); 108 | zlg.w(summary, error); 109 | return null; 110 | }).when(mock).onException(any(), any()); 111 | return mock; 112 | } 113 | 114 | protected static void verifyNoError(ExceptionHandler... mockExceptionHandlers) { 115 | Arrays.stream(mockExceptionHandlers).forEach(AbstractPubSubTest::verifyNoError); 116 | } 117 | 118 | protected static void verifyNoError(ExceptionHandler mockExceptionHandler) { 119 | verify(mockExceptionHandler, never()).onException(any(), any()); 120 | } 121 | } -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/FuturePublishCallbackTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import java.util.concurrent.*; 6 | 7 | import org.junit.*; 8 | 9 | public final class FuturePublishCallbackTest { 10 | @Test 11 | public void testWithSuccess() throws InterruptedException, ExecutionException { 12 | final FuturePublishCallback f = new FuturePublishCallback(); 13 | f.onComplete(10, null); 14 | assertEquals(10, (long) f.get()); 15 | } 16 | 17 | @Test(expected=ExecutionException.class) 18 | public void testWithError() throws InterruptedException, ExecutionException { 19 | final FuturePublishCallback f = new FuturePublishCallback(); 20 | f.onComplete(Record.UNASSIGNED_OFFSET, new Exception("simulated error")); 21 | f.get(); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/LogAwareExceptionHandlerTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import org.junit.*; 4 | 5 | import com.obsidiandynamics.zerolog.*; 6 | 7 | public final class LogAwareExceptionHandlerTest { 8 | @Test 9 | public void test() { 10 | final MockLogTarget logTarget = new MockLogTarget(); 11 | final LogAwareExceptionHandler handler = new LogAwareExceptionHandler(logTarget::logger); 12 | final String summary = "summary"; 13 | final Throwable error = new RuntimeException("error"); 14 | 15 | handler.onException(summary, error); 16 | 17 | logTarget.entries().assertCount(1); 18 | logTarget.entries().forLevel(LogLevel.WARN).withMessage(summary).withThrowable(error).assertCount(1); 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/PubSubOneWayTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import java.lang.invoke.*; 6 | import java.util.*; 7 | import java.util.concurrent.atomic.*; 8 | import java.util.function.*; 9 | import java.util.stream.*; 10 | 11 | import org.junit.*; 12 | import org.junit.runner.*; 13 | import org.junit.runners.*; 14 | 15 | import com.hazelcast.core.*; 16 | import com.obsidiandynamics.func.*; 17 | import com.obsidiandynamics.meteor.util.*; 18 | import com.obsidiandynamics.testmark.*; 19 | import com.obsidiandynamics.threads.*; 20 | import com.obsidiandynamics.zerolog.*; 21 | 22 | @FixMethodOrder(MethodSorters.NAME_ASCENDING) 23 | public final class PubSubOneWayTest extends AbstractPubSubTest { 24 | private static final Zlg zlg = Zlg.forClass(MethodHandles.lookup().lookupClass()).get(); 25 | 26 | private final int scale = Testmark.getOptions(Scale.class, Scale.unity()).magnitude(); 27 | 28 | @Test 29 | public void testOneWay() { 30 | testOneWay(2, 4, 10_000 * scale, 10, true, new InstancePool(2, this::newInstance), new OneWayOptions()); 31 | } 32 | 33 | @Test 34 | public void testOneWayBenchmark() { 35 | Testmark.ifEnabled("one-way over grid", () -> { 36 | final OneWayOptions options = new OneWayOptions() {{ 37 | verbose = true; 38 | printBacklog = false; 39 | }}; 40 | final Supplier poolSupplier = () -> new InstancePool(4, this::newGridInstance); 41 | final int messageSize = 100; 42 | final boolean randomBytes = false; // without compression this really makes no difference 43 | 44 | testOneWay(1, 1, 2_000_000 * scale, messageSize, randomBytes, poolSupplier.get(), options); 45 | testOneWay(1, 2, 2_000_000 * scale, messageSize, randomBytes, poolSupplier.get(), options); 46 | testOneWay(1, 4, 2_000_000 * scale, messageSize, randomBytes, poolSupplier.get(), options); 47 | testOneWay(2, 4, 1_000_000 * scale, messageSize, randomBytes, poolSupplier.get(), options); 48 | testOneWay(2, 8, 1_000_000 * scale, messageSize, randomBytes, poolSupplier.get(), options); 49 | testOneWay(4, 8, 500_000 * scale, messageSize, randomBytes, poolSupplier.get(), options); 50 | testOneWay(4, 16, 500_000 * scale, messageSize, randomBytes, poolSupplier.get(), options); 51 | }); 52 | } 53 | 54 | private static class OneWayOptions { 55 | boolean verbose; 56 | boolean printBacklog; 57 | } 58 | 59 | private void testOneWay(int publishers, int subscribers, int messagesPerPublisher, int messageSize, 60 | boolean randomBytes, InstancePool instancePool, OneWayOptions options) { 61 | final int backlogTarget = 10_000; 62 | final int checkInterval = backlogTarget; 63 | final String stream = "s"; 64 | final byte[] fixedMessage = randomBytes ? null : new byte[messageSize]; 65 | final int capacity = backlogTarget * publishers * 2; 66 | final int pollTimeoutMillis = 100; 67 | 68 | // common configuration 69 | final StreamConfig streamConfig = new StreamConfig() 70 | .withName(stream) 71 | .withHeapCapacity(capacity); 72 | 73 | if (options.verbose) System.out.format("Prestarting instances for %d/%d pub/sub... ", publishers, subscribers); 74 | final int prestartInstances = Math.min(publishers + subscribers, instancePool.size()); 75 | instancePool.prestart(prestartInstances); 76 | if (options.verbose) System.out.format("ready (x%d). Starting run...\n", prestartInstances); 77 | 78 | // create subscribers with receivers 79 | final ExceptionHandler eh = mockExceptionHandler(); 80 | final SubscriberConfig subConfig = new SubscriberConfig() 81 | .withExceptionHandler(eh) 82 | .withElectionConfig(new ElectionConfig().withScavengeInterval(1)) 83 | .withStreamConfig(streamConfig); 84 | 85 | final AtomicLong[] receivedArray = new AtomicLong[subscribers]; 86 | for (int i = 0; i < subscribers; i++) { 87 | final AtomicLong received = new AtomicLong(); 88 | receivedArray[i] = received; 89 | 90 | final HazelcastInstance instance = instancePool.get(); 91 | final Subscriber s = configureSubscriber(instance, subConfig); 92 | s.attachReceiver(record -> received.incrementAndGet(), pollTimeoutMillis); 93 | } 94 | 95 | final LongSupplier totalReceived = () -> { 96 | long total = 0; 97 | for (AtomicLong received : receivedArray) { 98 | total += received.get(); 99 | } 100 | return total; 101 | }; 102 | 103 | final LongSupplier smallestReceived = () -> { 104 | long smallest = Long.MAX_VALUE; 105 | for (AtomicLong received : receivedArray) { 106 | final long r = received.get(); 107 | if (r < smallest) { 108 | smallest = r; 109 | } 110 | } 111 | return smallest; 112 | }; 113 | 114 | // create the publishers and send across several threads 115 | final PublisherConfig pubConfig = new PublisherConfig() 116 | .withStreamConfig(streamConfig); 117 | final List publishersList = IntStream.range(0, publishers).boxed() 118 | .map(i -> configurePublisher(instancePool.get(), pubConfig)).collect(Collectors.toList()); 119 | 120 | final AtomicLong totalSent = new AtomicLong(); 121 | final long tookMillis = Threads.tookMillis(() -> { 122 | Parallel.blocking(publishers, threadNo -> { 123 | final Publisher p = publishersList.get(threadNo); 124 | 125 | final Random random = new Random(); 126 | for (int i = 0; i < messagesPerPublisher; i++) { 127 | final byte[] bytes; 128 | if (randomBytes) { 129 | bytes = new byte[messageSize]; 130 | random.nextBytes(bytes); 131 | } else { 132 | bytes = fixedMessage; 133 | } 134 | p.publishAsync(new Record(bytes), PublishCallback.nop()); 135 | 136 | if (i != 0 && i % checkInterval == 0) { 137 | long lastLogTime = 0; 138 | final long sent = totalSent.addAndGet(checkInterval); 139 | for (;;) { 140 | final int backlog = (int) (sent - smallestReceived.getAsLong()); 141 | if (backlog >= backlogTarget) { 142 | Threads.sleep(1); 143 | if (options.printBacklog && System.currentTimeMillis() - lastLogTime > 5_000) { 144 | zlg.i("throttling... backlog @ %,d (%,d messages)", z -> z.arg(backlog).arg(sent)); 145 | lastLogTime = System.currentTimeMillis(); 146 | } 147 | } else { 148 | break; 149 | } 150 | } 151 | } 152 | } 153 | }).run(); 154 | 155 | wait.until(() -> { 156 | assertEquals(publishers * messagesPerPublisher * (long) subscribers, totalReceived.getAsLong()); 157 | }); 158 | }); 159 | 160 | final long totalMessages = (long) publishers * messagesPerPublisher * subscribers; 161 | final double rate = (double) totalMessages / tookMillis * 1000; 162 | final long bps = (long) (rate * messageSize * 8 * 2); 163 | 164 | if (options.verbose) { 165 | System.out.format("%,d msgs took %,d ms, %,.0f msg/s, %s\n", totalMessages, tookMillis, rate, Bandwidth.translate(bps)); 166 | } 167 | verifyNoError(eh); 168 | 169 | afterBase(); 170 | beforeBase(); 171 | } 172 | 173 | public static void main(String[] args) { 174 | Testmark.enable().withOptions(Scale.by(8)); 175 | JUnitCore.runClasses(PubSubOneWayTest.class); 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/PubSubRoundTripTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static java.util.concurrent.TimeUnit.*; 4 | import static org.junit.Assert.*; 5 | 6 | import java.util.concurrent.atomic.*; 7 | 8 | import org.HdrHistogram.*; 9 | import org.junit.*; 10 | import org.junit.runner.*; 11 | import org.junit.runners.*; 12 | 13 | import com.obsidiandynamics.func.*; 14 | import com.obsidiandynamics.testmark.*; 15 | 16 | @FixMethodOrder(MethodSorters.NAME_ASCENDING) 17 | public final class PubSubRoundTripTest extends AbstractPubSubTest { 18 | private final int scale = Testmark.getOptions(Scale.class, Scale.unity()).magnitude(); 19 | 20 | @Test 21 | public void testRoundTripAsync() { 22 | testRoundTrip(100 * scale, false, new InstancePool(2, this::newInstance), new RoundTripOptions()); 23 | } 24 | 25 | @Test 26 | public void testRoundTripDirect() { 27 | testRoundTrip(100 * scale, true, new InstancePool(2, this::newInstance), new RoundTripOptions()); 28 | } 29 | 30 | @Test 31 | public void testRoundTripAsyncBenchmark() { 32 | Testmark.ifEnabled("round trip async over grid", () -> { 33 | final RoundTripOptions options = new RoundTripOptions() {{ 34 | verbose = true; 35 | }}; 36 | testRoundTrip(100_000 * scale, false, new InstancePool(2, this::newGridInstance), options); 37 | }); 38 | } 39 | 40 | @Test 41 | public void testRoundTripDirectBenchmark() { 42 | Testmark.ifEnabled("round trip direct over grid", () -> { 43 | final RoundTripOptions options = new RoundTripOptions() {{ 44 | verbose = true; 45 | }}; 46 | testRoundTrip(100_000 * scale, true, new InstancePool(2, this::newGridInstance), options); 47 | }); 48 | } 49 | 50 | private static class RoundTripOptions { 51 | boolean verbose; 52 | } 53 | 54 | @FunctionalInterface 55 | private interface PublishStrategy { 56 | void go(Publisher publisher, Record record); 57 | } 58 | 59 | private void testRoundTrip(int numMessages, boolean direct, InstancePool instancePool, RoundTripOptions options) { 60 | final String streamRequest = "request"; 61 | final String streamReply = "reply"; 62 | final int capacity = numMessages; 63 | final int pollTimeoutMillis = 100; 64 | final int backlogTarget = 0; 65 | final PublishStrategy publishMechanic = (publisher, record) -> { 66 | if (direct) publisher.publishDirect(record); 67 | else publisher.publishAsync(record, PublishCallback.nop()); 68 | }; 69 | 70 | // common configuration for the request and response streams 71 | final StreamConfig requestStreamConfig = new StreamConfig() 72 | .withName(streamRequest) 73 | .withHeapCapacity(capacity); 74 | final StreamConfig replyStreamConfig = new StreamConfig() 75 | .withName(streamReply) 76 | .withHeapCapacity(capacity); 77 | 78 | if (options.verbose) System.out.format("Prestarting instances... "); 79 | final int prestartInstances = Math.min(4, instancePool.size()); 80 | instancePool.prestart(prestartInstances); 81 | if (options.verbose) System.out.format("ready (x%d). Starting run...\n", prestartInstances); 82 | 83 | // create publishers 84 | final PublisherConfig requestPubConfig = new PublisherConfig() 85 | .withStreamConfig(requestStreamConfig); 86 | final PublisherConfig replyPubConfig = new PublisherConfig() 87 | .withStreamConfig(replyStreamConfig); 88 | final DefaultPublisher requestPub = configurePublisher(instancePool.get(), requestPubConfig); 89 | final DefaultPublisher replyPub = configurePublisher(instancePool.get(), replyPubConfig); 90 | 91 | // create subscribers with receivers 92 | final ExceptionHandler eh = mockExceptionHandler(); 93 | final SubscriberConfig requestSubConfig = new SubscriberConfig() 94 | .withExceptionHandler(eh) 95 | .withElectionConfig(new ElectionConfig().withScavengeInterval(1)) 96 | .withStreamConfig(requestStreamConfig); 97 | final SubscriberConfig replySubConfig = new SubscriberConfig() 98 | .withExceptionHandler(eh) 99 | .withElectionConfig(new ElectionConfig().withScavengeInterval(1)) 100 | .withStreamConfig(replyStreamConfig); 101 | 102 | configureSubscriber(instancePool.get(), requestSubConfig).attachReceiver(record -> { 103 | publishMechanic.go(replyPub, record); 104 | }, pollTimeoutMillis); 105 | 106 | final AtomicInteger received = new AtomicInteger(); 107 | final Histogram hist = new Histogram(NANOSECONDS.toNanos(10), SECONDS.toNanos(10), 5); 108 | configureSubscriber(instancePool.get(), replySubConfig).attachReceiver(record -> { 109 | final SimpleLongMessage m = SimpleLongMessage.unpack(record.getData()); 110 | final long latency = System.nanoTime() - m.value; 111 | hist.recordValue(latency); 112 | received.incrementAndGet(); 113 | }, pollTimeoutMillis); 114 | 115 | // send the messages 116 | for (int i = 0; i < numMessages; i++) { 117 | publishMechanic.go(requestPub, new Record(new SimpleLongMessage(System.nanoTime()).pack())); 118 | while (i - received.get() >= backlogTarget) { 119 | Thread.yield(); 120 | } 121 | } 122 | 123 | wait.until(() -> assertEquals(numMessages, received.get())); 124 | 125 | if (options.verbose) { 126 | final long min = hist.getMinValue(); 127 | final double mean = hist.getMean(); 128 | final long p50 = hist.getValueAtPercentile(50.0); 129 | final long p95 = hist.getValueAtPercentile(95.0); 130 | final long p99 = hist.getValueAtPercentile(99.0); 131 | final long max = hist.getMaxValue(); 132 | System.out.format("min: %,d, mean: %,.0f, 50%%: %,d, 95%%: %,d, 99%%: %,d, max: %,d (ns)\n", 133 | min, mean, p50, p95, p99, max); 134 | } 135 | verifyNoError(eh); 136 | 137 | afterBase(); 138 | beforeBase(); 139 | } 140 | 141 | public static void main(String[] args) { 142 | Testmark.enable(); 143 | JUnitCore.runClasses(PubSubRoundTripTest.class); 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/PubSubTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static junit.framework.TestCase.assertEquals; 4 | import static org.junit.Assert.assertNull; 5 | 6 | import java.util.*; 7 | import java.util.concurrent.*; 8 | import java.util.stream.*; 9 | 10 | import org.junit.*; 11 | import org.junit.runners.*; 12 | 13 | import com.hazelcast.core.*; 14 | import com.obsidiandynamics.func.*; 15 | import com.obsidiandynamics.meteor.Receiver.*; 16 | 17 | @FixMethodOrder(MethodSorters.NAME_ASCENDING) 18 | public final class PubSubTest extends AbstractPubSubTest { 19 | private static class TestHandler implements RecordHandler { 20 | private final List received = new CopyOnWriteArrayList<>(); 21 | 22 | private volatile long lastId = -1; 23 | private volatile AssertionError error; 24 | 25 | @Override 26 | public void onRecord(Record record) { 27 | final SimpleLongMessage message = SimpleLongMessage.unpack(record.getData()); 28 | final long id = message.value; 29 | if (lastId == -1) { 30 | lastId = id; 31 | } else { 32 | final long expectedBallotId = lastId + 1; 33 | if (id != expectedBallotId) { 34 | error = new AssertionError("Expected ID " + expectedBallotId + ", got " + id); 35 | throw error; 36 | } else { 37 | lastId = id; 38 | } 39 | } 40 | received.add(message); 41 | } 42 | } 43 | 44 | @Test 45 | public void testPubSubUngrouped() { 46 | testPubSub(3, 5, new InstancePool(2, this::newInstance), null); 47 | } 48 | 49 | @Test 50 | public void testPubSubGrouped() { 51 | testPubSub(3, 5, new InstancePool(2, this::newInstance), randomGroup()); 52 | } 53 | 54 | private void testPubSub(int numReceivers, int numMessages, InstancePool instancePool, String group) { 55 | final String stream = "s"; 56 | 57 | // common configuration 58 | final StreamConfig streamConfig = new StreamConfig() 59 | .withName(stream) 60 | .withHeapCapacity(numMessages); 61 | 62 | // prestart the instance pool 63 | final int prestartInstances = Math.min(1 + numReceivers, instancePool.size()); 64 | instancePool.prestart(prestartInstances); 65 | 66 | // create subscribers with receivers 67 | final ExceptionHandler eh = mockExceptionHandler(); 68 | final SubscriberConfig subConfig = new SubscriberConfig() 69 | .withGroup(group) 70 | .withExceptionHandler(eh) 71 | .withElectionConfig(new ElectionConfig().withScavengeInterval(1)) 72 | .withStreamConfig(streamConfig); 73 | 74 | final List handlers = new ArrayList<>(numReceivers); 75 | for (int i = 0; i < numReceivers; i++) { 76 | final HazelcastInstance instance = instancePool.get(); 77 | final Subscriber s = configureSubscriber(instance, subConfig); 78 | s.attachReceiver(register(new TestHandler(), handlers), 10); 79 | } 80 | 81 | // create a publisher and publish the messages 82 | final PublisherConfig pubConfig = new PublisherConfig() 83 | .withStreamConfig(streamConfig); 84 | final HazelcastInstance instance = instancePool.get(); 85 | final Publisher p = configurePublisher(instance, pubConfig); 86 | 87 | final List futures = new ArrayList<>(numMessages); 88 | for (int i = 0; i < numMessages; i++) { 89 | register(p.publishAsync(new Record(new SimpleLongMessage(i).pack())), futures); 90 | } 91 | 92 | // wait until all publish confirmations have been processed 93 | wait.until(() -> { 94 | final int completedFutures = (int) futures.stream().filter(CompletableFuture::isDone).count(); 95 | assertEquals(numMessages, completedFutures); 96 | }); 97 | 98 | final int errorredFutures = (int) futures.stream().filter(CompletableFuture::isCompletedExceptionally).count(); 99 | assertEquals(0, errorredFutures); 100 | 101 | // verify received messages; if a failure is detected, deep dive into the contents for debugging 102 | boolean success = false; 103 | try { 104 | wait.until(() -> { 105 | // list of handlers that have received at least one message 106 | final List receivedHandlers = handlers.stream() 107 | .filter(h -> h.received.size() != 0).collect(Collectors.toList()); 108 | 109 | // the number of expected receivers depends on whether a group has been set 110 | if (group != null) { 111 | assertEquals(1, receivedHandlers.size()); 112 | } else { 113 | assertEquals(numReceivers, receivedHandlers.size()); 114 | } 115 | 116 | for (TestHandler handler : receivedHandlers) { 117 | assertNull(handler.error); 118 | assertEquals(numMessages, handler.received.size()); 119 | long index = 0; 120 | for (SimpleLongMessage m : handler.received) { 121 | assertEquals(index, m.value); 122 | index++; 123 | } 124 | } 125 | }); 126 | success = true; 127 | } finally { 128 | if (! success) { 129 | System.out.format("numReceivers=%d, numMessages=%d, instances.size=%d, group=%s\n", 130 | numReceivers, numMessages, instancePool.size(), group); 131 | for (TestHandler handler : handlers) { 132 | System.out.println("---"); 133 | for (SimpleLongMessage m : handler.received) { 134 | System.out.println("- " + m); 135 | } 136 | } 137 | } 138 | } 139 | 140 | verifyNoError(eh); 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/PublisherConfigTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import org.junit.*; 6 | 7 | import com.obsidiandynamics.assertion.*; 8 | import com.obsidiandynamics.zerolog.*; 9 | 10 | public final class PublisherConfigTest { 11 | @Test 12 | public void testConfig() { 13 | final Zlg zlg = Zlg.forDeclaringClass().get(); 14 | final StreamConfig streamConfig = new StreamConfig(); 15 | 16 | final PublisherConfig config = new PublisherConfig() 17 | .withZlg(zlg) 18 | .withStreamConfig(streamConfig); 19 | assertEquals(zlg, config.getZlg()); 20 | assertEquals(streamConfig, config.getStreamConfig()); 21 | 22 | Assertions.assertToStringOverride(config); 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/PublisherTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | import static org.mockito.ArgumentMatchers.*; 5 | import static org.mockito.Mockito.*; 6 | 7 | import java.util.*; 8 | import java.util.concurrent.*; 9 | import java.util.stream.*; 10 | 11 | import org.junit.*; 12 | 13 | import com.hazelcast.config.*; 14 | import com.hazelcast.core.*; 15 | import com.hazelcast.ringbuffer.*; 16 | import com.hazelcast.util.executor.*; 17 | 18 | public final class PublisherTest extends AbstractPubSubTest { 19 | private static class TestCallback implements PublishCallback { 20 | volatile long offset = Record.UNASSIGNED_OFFSET; 21 | volatile Throwable error; 22 | 23 | @Override 24 | public void onComplete(long offset, Throwable error) { 25 | this.offset = offset; 26 | this.error = error; 27 | } 28 | 29 | boolean isComplete() { 30 | return offset != Record.UNASSIGNED_OFFSET || error != null; 31 | } 32 | } 33 | 34 | /** 35 | * Publishes to a bounded buffer, where the backing store is a NOP. 36 | * 37 | */ 38 | @Test 39 | public void testPublishToBoundedBuffer() throws InterruptedException, ExecutionException { 40 | final String stream = "s"; 41 | final int capacity = 10; 42 | 43 | final DefaultPublisher p = 44 | configurePublisher(new PublisherConfig().withStreamConfig(new StreamConfig() 45 | .withName(stream) 46 | .withHeapCapacity(capacity))); 47 | final Ringbuffer buffer = p.getInstance().getRingbuffer(Namespace.METEOR_STREAM.qualify(stream)); 48 | final List records = new ArrayList<>(); 49 | final List callbacks = new ArrayList<>(); 50 | 51 | assertNotNull(p.getConfig()); 52 | 53 | final int initialMessages = 5; 54 | publish(initialMessages, p, records, callbacks); 55 | 56 | assertEquals(initialMessages, records.size()); 57 | assertEquals(initialMessages, callbacks.size()); 58 | wait.until(() -> assertEquals(initialMessages, completed(callbacks).size())); 59 | assertNoError(callbacks); 60 | for (int i = 0; i < initialMessages; i++) { 61 | assertEquals(i, records.get(i).getOffset()); 62 | } 63 | assertEquals(initialMessages, buffer.size()); 64 | final List initialItems = readRemaining(buffer, 0); 65 | assertEquals(initialMessages, initialItems.size()); 66 | 67 | final int furtherMessages = 20; 68 | publish(furtherMessages, p, records, callbacks); 69 | 70 | wait.until(() -> assertEquals(initialMessages + furtherMessages, completed(callbacks).size())); 71 | assertNoError(callbacks); 72 | assertEquals(capacity, buffer.size()); 73 | final List allItems = readRemaining(buffer, 0); 74 | assertEquals(capacity, allItems.size()); 75 | } 76 | 77 | /** 78 | * Publishes to a buffer that uses a simple {@link HeapRingbufferStore} as its backing store. 79 | * 80 | */ 81 | @Test 82 | public void testPublishToStoredBuffer() throws InterruptedException, ExecutionException { 83 | final String stream = "s"; 84 | final int capacity = 10; 85 | 86 | final DefaultPublisher p = 87 | configurePublisher(new PublisherConfig() 88 | .withStreamConfig(new StreamConfig() 89 | .withName(stream) 90 | .withHeapCapacity(capacity) 91 | .withRingbufferStoreConfig(new RingbufferStoreConfig() 92 | .setFactoryClassName(HeapRingbufferStore.Factory.class.getName())))); 93 | final Ringbuffer buffer = p.getInstance().getRingbuffer(Namespace.METEOR_STREAM.qualify(stream)); 94 | final List records = new ArrayList<>(); 95 | final List callbacks = new ArrayList<>(); 96 | 97 | final int initialMessages = 5; 98 | publish(initialMessages, p, records, callbacks); 99 | 100 | wait.until(() -> assertEquals(initialMessages, completed(callbacks).size())); 101 | assertNoError(callbacks); 102 | assertEquals(initialMessages, buffer.size()); 103 | final List initialItems = readRemaining(buffer, 0); 104 | assertEquals(initialMessages, initialItems.size()); 105 | 106 | final int furtherMessages = 20; 107 | publish(furtherMessages, p, records, callbacks); 108 | 109 | wait.until(() -> assertEquals(initialMessages + furtherMessages, completed(callbacks).size())); 110 | assertNoError(callbacks); 111 | assertEquals(capacity, buffer.size()); 112 | final List allItems = readRemaining(buffer, 0); 113 | assertEquals(initialMessages + furtherMessages, allItems.size()); 114 | } 115 | 116 | /** 117 | * Tests direct publishing. 118 | * 119 | */ 120 | @Test 121 | public void testPublishDirect() throws InterruptedException, ExecutionException { 122 | final String stream = "s"; 123 | final int capacity = 10; 124 | 125 | final DefaultPublisher p = 126 | configurePublisher(new PublisherConfig() 127 | .withStreamConfig(new StreamConfig() 128 | .withName(stream) 129 | .withHeapCapacity(capacity) 130 | .withRingbufferStoreConfig(new RingbufferStoreConfig() 131 | .setFactoryClassName(HeapRingbufferStore.Factory.class.getName())))); 132 | final Ringbuffer buffer = p.getInstance().getRingbuffer(Namespace.METEOR_STREAM.qualify(stream)); 133 | 134 | final long offset0 = p.publishDirect(new Record("h0".getBytes())); 135 | assertEquals(0, offset0); 136 | final long offset1 = p.publishDirect(new Record("h1".getBytes())); 137 | assertEquals(1, offset1); 138 | 139 | final List items = readRemaining(buffer, 0); 140 | assertEquals(2, items.size()); 141 | assertEquals("h0", new String(items.get(0))); 142 | assertEquals("h1", new String(items.get(1))); 143 | } 144 | 145 | /** 146 | * Tests publish failure by rigging a mock {@link Ringbuffer} to return a {@link CompletedFuture} with 147 | * an error. 148 | */ 149 | @Test 150 | public void testPublishFailure() { 151 | final String stream = "s"; 152 | final int capacity = 10; 153 | 154 | final HazelcastInstance realInstance = newInstance(); 155 | final HazelcastInstance mockInstance = mock(HazelcastInstance.class); 156 | @SuppressWarnings("unchecked") 157 | final Ringbuffer mockBuffer = mock(Ringbuffer.class); 158 | when(mockInstance.getRingbuffer(any())).thenReturn(mockBuffer); 159 | when(mockInstance.getConfig()).thenReturn(realInstance.getConfig()); 160 | final RuntimeException cause = new RuntimeException("error"); 161 | when(mockBuffer.addAllAsync(any(), any())).then(invocation -> { 162 | return new CompletedFuture<>(null, cause, Runnable::run); 163 | }); 164 | 165 | final DefaultPublisher p = 166 | configurePublisher(mockInstance, 167 | new PublisherConfig() 168 | .withStreamConfig(new StreamConfig() 169 | .withName(stream) 170 | .withHeapCapacity(capacity) 171 | .withRingbufferStoreConfig(new RingbufferStoreConfig() 172 | .setFactoryClassName(HeapRingbufferStore.Factory.class.getName())))); 173 | final List records = new ArrayList<>(); 174 | final List callbacks = new ArrayList<>(); 175 | 176 | publish(1, p, records, callbacks); 177 | wait.until(() -> assertEquals(1, completed(callbacks).size())); 178 | assertEquals(Record.UNASSIGNED_OFFSET, records.get(0).getOffset()); 179 | assertEquals(Record.UNASSIGNED_OFFSET, callbacks.get(0).offset); 180 | assertEquals(cause, callbacks.get(0).error); 181 | } 182 | 183 | private static void publish(int numMessages, Publisher publisher, List records, List callbacks) { 184 | for (int i = 0; i < numMessages; i++) { 185 | final TestCallback callback = new TestCallback(); 186 | callbacks.add(callback); 187 | final Record record = new Record("hello".getBytes()); 188 | records.add(record); 189 | publisher.publishAsync(record, callback); 190 | } 191 | } 192 | 193 | 194 | /** 195 | * Reads the remaining contents of the ringbuffer from a given starting point, automatically fast-forwarding 196 | * the starting point if a {@link StaleSequenceException} is caught. 197 | * 198 | */ 199 | private static List readRemaining(Ringbuffer buffer, long startSequence) throws InterruptedException, ExecutionException { 200 | long adjStartSequence = startSequence; 201 | final List items = new ArrayList<>(); 202 | for (;;) { 203 | final ReadResultSet results; 204 | try { 205 | final int toRead = (int) Math.min(1_000, buffer.capacity()); 206 | results = buffer.readManyAsync(adjStartSequence, 0, toRead, null).get(); 207 | } catch (ExecutionException e) { 208 | if (e.getCause() instanceof StaleSequenceException) { 209 | System.out.format("SSE: fast-forwarding start sequence to %d\n", buffer.headSequence()); 210 | adjStartSequence = buffer.headSequence(); 211 | continue; 212 | } else { 213 | throw e; 214 | } 215 | } 216 | 217 | if (results.size() > 0) { 218 | results.forEach(items::add); 219 | adjStartSequence += results.size(); 220 | } else { 221 | return items; 222 | } 223 | } 224 | } 225 | 226 | private static List completed(List callbacks) { 227 | return callbacks.stream().filter(TestCallback::isComplete).collect(Collectors.toList()); 228 | } 229 | 230 | private static void assertNoError(List callbacks) { 231 | for (TestCallback callback : callbacks) { 232 | if (callback.error != null) { 233 | throw new AssertionError(callback.error); 234 | } 235 | } 236 | } 237 | } -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/RecordBatchTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import java.util.*; 6 | 7 | import org.junit.*; 8 | 9 | public final class RecordBatchTest { 10 | @Test 11 | public void testDefaultMethods() { 12 | final Record r0 = new Record(new byte[0]); 13 | final Record r1 = new Record(new byte[0]); 14 | final RecordBatch b0 = new ListRecordBatch(Arrays.asList(r0, r1)); 15 | assertFalse(b0.isEmpty()); 16 | assertEquals(Arrays.asList(r0, r1), b0.toList()); 17 | assertEquals(r0, b0.first()); 18 | assertEquals(r1, b0.last()); 19 | 20 | final RecordBatch b1 = new ListRecordBatch(Collections.emptyList()); 21 | assertTrue(b1.isEmpty()); 22 | assertEquals(Collections.emptyList(), b1.toList()); 23 | } 24 | 25 | @Test 26 | public void testEmpty() { 27 | assertEquals(0, RecordBatch.empty().size()); 28 | assertFalse(RecordBatch.empty().iterator().hasNext()); 29 | } 30 | 31 | @Test(expected=NoSuchElementException.class) 32 | public void testEmptyFirst() { 33 | RecordBatch.empty().first(); 34 | } 35 | 36 | @Test(expected=NoSuchElementException.class) 37 | public void testEmptyLast() { 38 | RecordBatch.empty().last(); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/RecordTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import org.junit.*; 6 | 7 | import com.obsidiandynamics.assertion.*; 8 | 9 | public class RecordTest { 10 | @Test 11 | public void testCreateWithData() { 12 | final byte[] data = { (byte) 0x00, (byte) 0x01 }; 13 | final Record record = new Record(data); 14 | assertArrayEquals(data, record.getData()); 15 | assertEquals(Record.UNASSIGNED_OFFSET, record.getOffset()); 16 | 17 | record.setOffset(100); 18 | assertEquals(100, record.getOffset()); 19 | } 20 | 21 | @Test 22 | public void testCreateWithDataAndOffset() { 23 | final byte[] data = { (byte) 0x00, (byte) 0x01 }; 24 | final Record record = new Record(data, 100); 25 | assertArrayEquals(data, record.getData()); 26 | assertEquals(100, record.getOffset()); 27 | } 28 | 29 | @Test 30 | public void testToString() { 31 | Assertions.assertToStringOverride(new Record(new byte[0])); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/SimpleLongMessage.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.nio.*; 4 | 5 | final class SimpleLongMessage { 6 | final long value; 7 | 8 | SimpleLongMessage(long value) { this.value = value; } 9 | 10 | byte[] pack() { 11 | final ByteBuffer buf = ByteBuffer.allocate(8); 12 | buf.putLong(value); 13 | return buf.array(); 14 | } 15 | 16 | static SimpleLongMessage unpack(byte[] bytes) { 17 | return new SimpleLongMessage(ByteBuffer.wrap(bytes).getLong()); 18 | } 19 | 20 | @Override 21 | public String toString() { 22 | return SimpleLongMessage.class.getSimpleName() + " [id=" + value + "]"; 23 | } 24 | } -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/StreamConfigTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import org.junit.*; 6 | 7 | import com.hazelcast.config.*; 8 | import com.obsidiandynamics.assertion.*; 9 | 10 | public final class StreamConfigTest { 11 | @Test 12 | public void testConfig() { 13 | final String name = "name"; 14 | final int heapCapacity = 100; 15 | final int syncReplicas = 2; 16 | final int asyncReplicas = 3; 17 | 18 | final RingbufferStoreConfig ringbufferStoreConfig = new RingbufferStoreConfig() 19 | .setEnabled(true) 20 | .setFactoryClassName("TestClass"); 21 | final StreamConfig config = new StreamConfig() 22 | .withName(name) 23 | .withHeapCapacity(heapCapacity) 24 | .withSyncReplicas(syncReplicas) 25 | .withAsyncReplicas(asyncReplicas) 26 | .withRingbufferStoreConfig(ringbufferStoreConfig); 27 | assertEquals(name, config.getName()); 28 | assertEquals(heapCapacity, config.getHeapCapacity()); 29 | assertEquals(syncReplicas, config.getSyncReplicas()); 30 | assertEquals(asyncReplicas, config.getAsyncReplicas()); 31 | assertEquals(ringbufferStoreConfig, config.getRingbufferStoreConfig()); 32 | 33 | Assertions.assertToStringOverride(config); 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/StreamHelperTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | import static org.mockito.Mockito.*; 5 | 6 | import org.junit.*; 7 | 8 | import com.hazelcast.config.*; 9 | import com.hazelcast.core.*; 10 | 11 | public final class StreamHelperTest { 12 | @Test 13 | public void testIsNotNull() { 14 | assertTrue(StreamHelper.isNotNull(new byte[0])); 15 | assertFalse(StreamHelper.isNotNull(null)); 16 | } 17 | 18 | @Test 19 | public void testGetRingbuffer() { 20 | final HazelcastInstance instance = mock(HazelcastInstance.class); 21 | final Config config = new Config(); 22 | when(instance.getConfig()).thenReturn(config); 23 | 24 | final RingbufferStoreConfig ringbufferStoreConfig = new RingbufferStoreConfig() 25 | .setEnabled(true) 26 | .setClassName("TestClass"); 27 | final StreamConfig streamConfig = new StreamConfig() 28 | .withName("stream") 29 | .withAsyncReplicas(3) 30 | .withSyncReplicas(2) 31 | .withHeapCapacity(100) 32 | .withRingbufferStoreConfig(ringbufferStoreConfig); 33 | 34 | StreamHelper.getRingbuffer(instance, streamConfig); 35 | verify(instance).getConfig(); 36 | 37 | final RingbufferConfig r = config.getRingbufferConfig(Namespace.METEOR_STREAM.qualify(streamConfig.getName())); 38 | assertEquals(streamConfig.getAsyncReplicas(), r.getAsyncBackupCount()); 39 | assertEquals(streamConfig.getSyncReplicas(), r.getBackupCount()); 40 | assertEquals(streamConfig.getHeapCapacity(), r.getCapacity()); 41 | assertEquals(streamConfig.getRingbufferStoreConfig(), r.getRingbufferStoreConfig()); 42 | } 43 | 44 | @Test 45 | public void testGetMap() { 46 | final HazelcastInstance instance = mock(HazelcastInstance.class); 47 | final Config config = new Config(); 48 | when(instance.getConfig()).thenReturn(config); 49 | 50 | final MapStoreConfig mapStoreConfig = new MapStoreConfig() 51 | .setEnabled(true) 52 | .setClassName("TestClass"); 53 | final StreamConfig streamConfig = new StreamConfig() 54 | .withName("stream") 55 | .withAsyncReplicas(3) 56 | .withSyncReplicas(2) 57 | .withHeapCapacity(100); 58 | 59 | StreamHelper.getMap(instance, "map", streamConfig, mapStoreConfig); 60 | final MapConfig m = config.getMapConfig("map"); 61 | assertEquals(streamConfig.getAsyncReplicas(), m.getAsyncBackupCount()); 62 | assertEquals(streamConfig.getSyncReplicas(), m.getBackupCount()); 63 | assertEquals(mapStoreConfig, m.getMapStoreConfig()); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/SubscriberConfigTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import org.junit.*; 6 | 7 | import com.hazelcast.config.*; 8 | import com.obsidiandynamics.assertion.*; 9 | import com.obsidiandynamics.func.*; 10 | import com.obsidiandynamics.zerolog.*; 11 | 12 | public final class SubscriberConfigTest { 13 | @Test 14 | public void testConfig() { 15 | final ElectionConfig electionConfig = new ElectionConfig(); 16 | final ExceptionHandler exceptionHandler = ExceptionHandler.nop(); 17 | final String group = "group"; 18 | final InitialOffsetScheme initialOffsetScheme = InitialOffsetScheme.EARLIEST; 19 | final Zlg zlg = Zlg.forDeclaringClass().get(); 20 | final StreamConfig streamConfig = new StreamConfig(); 21 | final double staleReadSafetyMargin = 0.5; 22 | final int minLeaseExtendIntervalMillis = 500; 23 | final MapStoreConfig mapStoreConfig = new MapStoreConfig() 24 | .setEnabled(true) 25 | .setClassName("TestClass"); 26 | 27 | final SubscriberConfig config = new SubscriberConfig() 28 | .withElectionConfig(electionConfig) 29 | .withExceptionHandler(exceptionHandler) 30 | .withGroup(group) 31 | .withInitialOffsetScheme(initialOffsetScheme) 32 | .withZlg(zlg) 33 | .withStreamConfig(streamConfig) 34 | .withStaleReadSafetyMargin(staleReadSafetyMargin) 35 | .withMinLeaseExtendInterval(minLeaseExtendIntervalMillis) 36 | .withMapStoreConfig(mapStoreConfig); 37 | assertEquals(electionConfig, config.getElectionConfig()); 38 | assertEquals(exceptionHandler, config.getExceptionHandler()); 39 | assertEquals(group, config.getGroup()); 40 | assertEquals(initialOffsetScheme, config.getInitialOffsetScheme()); 41 | assertEquals(zlg, config.getZlg()); 42 | assertEquals(streamConfig, config.getStreamConfig()); 43 | assertEquals(staleReadSafetyMargin, config.getStaleReadSafetyMargin(), Double.MIN_VALUE); 44 | assertEquals(minLeaseExtendIntervalMillis, config.getMinLeaseExtendInterval()); 45 | assertEquals(mapStoreConfig, config.getMapStoreConfig()); 46 | 47 | Assertions.assertToStringOverride(config); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/SubscriberUngroupedTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | import static org.mockito.ArgumentMatchers.*; 5 | import static org.mockito.Mockito.*; 6 | 7 | import java.util.*; 8 | 9 | import org.junit.*; 10 | import org.junit.runner.*; 11 | import org.junit.runners.*; 12 | 13 | import com.hazelcast.core.*; 14 | import com.hazelcast.ringbuffer.*; 15 | import com.hazelcast.util.executor.*; 16 | import com.obsidiandynamics.func.*; 17 | import com.obsidiandynamics.junit.*; 18 | import com.obsidiandynamics.meteor.Receiver.*; 19 | import com.obsidiandynamics.worker.*; 20 | 21 | @RunWith(Parameterized.class) 22 | public final class SubscriberUngroupedTest extends AbstractPubSubTest { 23 | @Parameterized.Parameters 24 | public static List data() { 25 | return TestCycle.timesQuietly(1); 26 | } 27 | 28 | /** 29 | * Deactivation can only be performed in a grouped context. 30 | */ 31 | @Test(expected=IllegalStateException.class) 32 | public void testIllegalDeactivate() { 33 | final String stream = "s"; 34 | final int capacity = 1; 35 | 36 | final DefaultSubscriber s = 37 | configureSubscriber(new SubscriberConfig().withStreamConfig(new StreamConfig() 38 | .withName(stream) 39 | .withHeapCapacity(capacity))); 40 | s.deactivate(); 41 | } 42 | 43 | /** 44 | * Reactivation can only be performed in a grouped context. 45 | */ 46 | @Test(expected=IllegalStateException.class) 47 | public void testIllegalReactivate() { 48 | final String stream = "s"; 49 | final int capacity = 1; 50 | 51 | final DefaultSubscriber s = 52 | configureSubscriber(new SubscriberConfig().withStreamConfig(new StreamConfig() 53 | .withName(stream) 54 | .withHeapCapacity(capacity))); 55 | s.reactivate(); 56 | } 57 | 58 | /** 59 | * Offset confirmation can only be performed in a grouped context. 60 | */ 61 | @Test(expected=IllegalStateException.class) 62 | public void testIllegalConfirm() { 63 | final String stream = "s"; 64 | final int capacity = 1; 65 | 66 | final DefaultSubscriber s = 67 | configureSubscriber(new SubscriberConfig().withStreamConfig(new StreamConfig() 68 | .withName(stream) 69 | .withHeapCapacity(capacity))); 70 | s.confirm(); 71 | } 72 | 73 | /** 74 | * Tests consuming from an empty buffer. Should result in a zero-size batch. 75 | * 76 | */ 77 | @Test 78 | public void testConsumeEmpty() throws InterruptedException { 79 | final String stream = "s"; 80 | final int capacity = 10; 81 | 82 | final ExceptionHandler eh = mockExceptionHandler(); 83 | final DefaultSubscriber s = 84 | configureSubscriber(new SubscriberConfig() 85 | .withExceptionHandler(eh) 86 | .withStreamConfig(new StreamConfig() 87 | .withName(stream) 88 | .withHeapCapacity(capacity))); 89 | assertNotNull(s.getConfig()); 90 | assertTrue(s.isAssigned()); 91 | 92 | final RecordBatch b0 = s.poll(1); 93 | assertEquals(0, b0.size()); 94 | 95 | final RecordBatch b1 = s.poll(1); 96 | assertEquals(0, b1.size()); 97 | 98 | verifyNoError(eh); 99 | } 100 | 101 | /** 102 | * Simple scenario of consuming a single message. 103 | * 104 | */ 105 | @Test 106 | public void testConsumeOne() throws InterruptedException { 107 | final String stream = "s"; 108 | final int capacity = 10; 109 | 110 | final ExceptionHandler eh = mockExceptionHandler(); 111 | final DefaultSubscriber s = 112 | configureSubscriber(new SubscriberConfig() 113 | .withExceptionHandler(eh) 114 | .withStreamConfig(new StreamConfig() 115 | .withName(stream) 116 | .withHeapCapacity(capacity))); 117 | final Ringbuffer buffer = s.getInstance().getRingbuffer(Namespace.METEOR_STREAM.qualify(stream)); 118 | 119 | buffer.add("hello".getBytes()); 120 | 121 | final RecordBatch b0 = s.poll(1_000); 122 | assertEquals(1, b0.size()); 123 | assertArrayEquals("hello".getBytes(), b0.toList().get(0).getData()); 124 | 125 | final RecordBatch b1 = s.poll(10); 126 | assertEquals(0, b1.size()); 127 | 128 | verifyNoError(eh); 129 | } 130 | 131 | /** 132 | * Consumes two messages. 133 | * 134 | */ 135 | @Test 136 | public void testConsumeTwo() throws InterruptedException { 137 | final String stream = "s"; 138 | final int capacity = 10; 139 | 140 | final ExceptionHandler eh = mockExceptionHandler(); 141 | final DefaultSubscriber s = 142 | configureSubscriber(new SubscriberConfig() 143 | .withExceptionHandler(eh) 144 | .withStreamConfig(new StreamConfig() 145 | .withName(stream) 146 | .withHeapCapacity(capacity))); 147 | final Ringbuffer buffer = s.getInstance().getRingbuffer(Namespace.METEOR_STREAM.qualify(stream)); 148 | 149 | buffer.add("h0".getBytes()); 150 | buffer.add("h1".getBytes()); 151 | 152 | final RecordBatch b0 = s.poll(1_000); 153 | assertEquals(2, b0.size()); 154 | assertArrayEquals("h0".getBytes(), b0.toList().get(0).getData()); 155 | assertArrayEquals("h1".getBytes(), b0.toList().get(1).getData()); 156 | 157 | final RecordBatch b1 = s.poll(10); 158 | assertEquals(0, b1.size()); 159 | 160 | verifyNoError(eh); 161 | } 162 | 163 | /** 164 | * Tests the consumption of two messages and then a seek back by one position, so that the last 165 | * message can be consumed again. 166 | * 167 | */ 168 | @Test 169 | public void testSeek() throws InterruptedException { 170 | final String stream = "s"; 171 | final int capacity = 10; 172 | 173 | final ExceptionHandler eh = mockExceptionHandler(); 174 | final DefaultSubscriber s = 175 | configureSubscriber(new SubscriberConfig() 176 | .withExceptionHandler(eh) 177 | .withStreamConfig(new StreamConfig() 178 | .withName(stream) 179 | .withHeapCapacity(capacity))); 180 | final Ringbuffer buffer = s.getInstance().getRingbuffer(Namespace.METEOR_STREAM.qualify(stream)); 181 | 182 | buffer.add("h0".getBytes()); 183 | buffer.add("h1".getBytes()); 184 | 185 | s.seek(1); 186 | final RecordBatch b0 = s.poll(1_000); 187 | assertEquals(1, b0.size()); 188 | assertArrayEquals("h1".getBytes(), b0.toList().get(0).getData()); 189 | 190 | verifyNoError(eh); 191 | } 192 | 193 | /** 194 | * Tests a seek to an illegal position. 195 | * 196 | */ 197 | @Test(expected=IllegalArgumentException.class) 198 | public void testSeekIllegalArgumentTooLow() { 199 | final String stream = "s"; 200 | final int capacity = 10; 201 | 202 | final DefaultSubscriber s = 203 | configureSubscriber(new SubscriberConfig().withStreamConfig(new StreamConfig() 204 | .withName(stream) 205 | .withHeapCapacity(capacity))); 206 | s.seek(-5); 207 | } 208 | 209 | /** 210 | * Tests a seek to a position outside of the buffer's allowable range. 211 | * 212 | */ 213 | @Test 214 | public void testSeekIllegalArgumentBeyondLastOffset() throws InterruptedException { 215 | final String stream = "s"; 216 | final int capacity = 10; 217 | final ExceptionHandler eh = mockExceptionHandler(); 218 | 219 | final DefaultSubscriber s = 220 | configureSubscriber(new SubscriberConfig() 221 | .withExceptionHandler(eh) 222 | .withStreamConfig(new StreamConfig() 223 | .withName(stream) 224 | .withHeapCapacity(capacity))); 225 | s.seek(1); 226 | s.poll(1_000); 227 | 228 | verify(eh).onException(notNull(), isA(IllegalArgumentException.class)); 229 | } 230 | 231 | /** 232 | * Tests read failure by rigging the ringbuffer to throw an exception when reading. 233 | * 234 | */ 235 | @Test 236 | public void testReadFailure() throws InterruptedException { 237 | final String stream = "s"; 238 | final int capacity = 1; 239 | final ExceptionHandler eh = mock(ExceptionHandler.class); 240 | 241 | final HazelcastInstance realInstance = newInstance(); 242 | final HazelcastInstance mockInstance = mock(HazelcastInstance.class); 243 | when(mockInstance.getConfig()).thenReturn(realInstance.getConfig()); 244 | when(mockInstance.getMap(any())).thenAnswer(invocation -> realInstance.getMap(invocation.getArgument(0))); 245 | final PartitionService partitionService = mock(PartitionService.class); 246 | when(mockInstance.getPartitionService()).thenReturn(partitionService); 247 | final Partition partition = mock(Partition.class); 248 | when(partitionService.getPartition(any())).thenReturn(partition); 249 | @SuppressWarnings("unchecked") 250 | final Ringbuffer ringbuffer = mock(Ringbuffer.class); 251 | when(mockInstance.getRingbuffer(any())).thenReturn(ringbuffer); 252 | 253 | final Exception cause = new Exception("simulated error"); 254 | when(ringbuffer.readManyAsync(anyLong(), anyInt(), anyInt(), any())) 255 | .thenReturn(new CompletedFuture<>(null, cause, null)); 256 | 257 | final DefaultSubscriber s = 258 | configureSubscriber(mockInstance, 259 | new SubscriberConfig() 260 | .withExceptionHandler(eh) 261 | .withInitialOffsetScheme(InitialOffsetScheme.EARLIEST) 262 | .withStreamConfig(new StreamConfig() 263 | .withName(stream) 264 | .withHeapCapacity(capacity))); 265 | final RecordBatch b = s.poll(1_000); 266 | assertEquals(0, b.size()); 267 | verify(eh).onException(isNotNull(), eq(cause)); 268 | } 269 | 270 | /** 271 | * Tests stale reads. This is achieved by simulating a buffer overflow with no backing 272 | * storage, so that the subscriber consumes from a stale sequence. The subscriber, having 273 | * noticed the stale offset, should fast-forward the next read offset to the next safe read 274 | * position, plus a small safety buffer (which for this test we've set to zero). 275 | * 276 | */ 277 | @Test 278 | public void testStaleRead() throws InterruptedException { 279 | final String stream = "s"; 280 | final int capacity = 1; 281 | final ExceptionHandler eh = mockExceptionHandler(); 282 | 283 | final DefaultSubscriber s = 284 | configureSubscriber(new SubscriberConfig() 285 | .withExceptionHandler(eh) 286 | .withStaleReadSafetyMargin(0) 287 | .withStreamConfig(new StreamConfig() 288 | .withName(stream) 289 | .withHeapCapacity(capacity))); 290 | final Ringbuffer buffer = s.getInstance().getRingbuffer(Namespace.METEOR_STREAM.qualify(stream)); 291 | 292 | buffer.add("h0".getBytes()); 293 | buffer.add("h1".getBytes()); 294 | final RecordBatch b = s.poll(1_000); 295 | assertEquals(1, b.size()); 296 | assertArrayEquals("h1".getBytes(), b.toList().get(0).getData()); 297 | verifyNoError(eh); 298 | } 299 | 300 | /** 301 | * Tests initialising to the earliest offset. 302 | * 303 | */ 304 | @Test 305 | public void testInitialOffsetEarliest() throws InterruptedException { 306 | final String stream = "s"; 307 | final int capacity = 10; 308 | 309 | final HazelcastInstance instance = newInstance(); 310 | final Ringbuffer buffer = instance.getRingbuffer(Namespace.METEOR_STREAM.qualify(stream)); 311 | buffer.add("h0".getBytes()); 312 | buffer.add("h1".getBytes()); 313 | 314 | final ExceptionHandler eh = mockExceptionHandler(); 315 | final DefaultSubscriber subscriber = 316 | configureSubscriber(instance, 317 | new SubscriberConfig() 318 | .withExceptionHandler(eh) 319 | .withInitialOffsetScheme(InitialOffsetScheme.EARLIEST) 320 | .withStreamConfig(new StreamConfig() 321 | .withName(stream) 322 | .withHeapCapacity(capacity))); 323 | 324 | final RecordBatch b = subscriber.poll(1_000); 325 | assertEquals(2, b.size()); 326 | 327 | verifyNoError(eh); 328 | } 329 | 330 | /** 331 | * Tests initialising to the latest offset. 332 | * 333 | */ 334 | @Test 335 | public void testInitialOffsetLatest() throws InterruptedException { 336 | final String stream = "s"; 337 | final int capacity = 10; 338 | 339 | final HazelcastInstance instance = newInstance(); 340 | final Ringbuffer buffer = instance.getRingbuffer(Namespace.METEOR_STREAM.qualify(stream)); 341 | buffer.add("h0".getBytes()); 342 | buffer.add("h1".getBytes()); 343 | 344 | final ExceptionHandler eh = mockExceptionHandler(); 345 | final DefaultSubscriber s = 346 | configureSubscriber(instance, 347 | new SubscriberConfig() 348 | .withExceptionHandler(eh) 349 | .withInitialOffsetScheme(InitialOffsetScheme.LATEST) 350 | .withStreamConfig(new StreamConfig() 351 | .withName(stream) 352 | .withHeapCapacity(capacity))); 353 | 354 | final RecordBatch b = s.poll(10); 355 | assertEquals(0, b.size()); 356 | 357 | verifyNoError(eh); 358 | } 359 | 360 | /** 361 | * Tests the {@link InitialOffsetScheme#NONE} offset initialisation, which is only allowed in 362 | * a grouped context. 363 | * 364 | */ 365 | @Test(expected=InvalidInitialOffsetSchemeException.class) 366 | public void testInitialOffsetNone() { 367 | final String stream = "s"; 368 | final int capacity = 10; 369 | 370 | configureSubscriber(new SubscriberConfig() 371 | .withInitialOffsetScheme(InitialOffsetScheme.NONE) 372 | .withStreamConfig(new StreamConfig() 373 | .withName(stream) 374 | .withHeapCapacity(capacity))); 375 | } 376 | 377 | /** 378 | * Tests ability to asynchronously consume messages from a {@link Receiver}. 379 | */ 380 | @Test 381 | public void testReceiverConsume() { 382 | final String stream = "s"; 383 | final int capacity = 10; 384 | 385 | final ExceptionHandler eh = mockExceptionHandler(); 386 | final DefaultSubscriber s = 387 | configureSubscriber(new SubscriberConfig() 388 | .withExceptionHandler(eh) 389 | .withStreamConfig(new StreamConfig() 390 | .withName(stream) 391 | .withHeapCapacity(capacity))); 392 | final RecordHandler handler = mock(RecordHandler.class); 393 | final Receiver receiver = s.attachReceiver(handler, 1_000); 394 | 395 | final Ringbuffer buffer = s.getInstance().getRingbuffer(Namespace.METEOR_STREAM.qualify(stream)); 396 | 397 | buffer.add("h0".getBytes()); 398 | buffer.add("h1".getBytes()); 399 | 400 | wait.until(() -> { 401 | verify(handler, times(2)).onRecord(isNotNull()); 402 | }); 403 | verifyNoError(eh); 404 | 405 | s.terminate().joinSilently(); 406 | 407 | assertEquals(WorkerState.TERMINATED, ((DefaultReceiver) receiver).getThreadState()); 408 | } 409 | 410 | /** 411 | * Tests attaching a receiver a second time, which should fail as there can be at 412 | * most one receiver. 413 | */ 414 | @Test(expected=IllegalStateException.class) 415 | public void testReceiverDuplicateAttach() { 416 | final String stream = "s"; 417 | final int capacity = 10; 418 | 419 | final ExceptionHandler eh = mockExceptionHandler(); 420 | final DefaultSubscriber s = 421 | configureSubscriber(new SubscriberConfig() 422 | .withExceptionHandler(eh) 423 | .withStreamConfig(new StreamConfig() 424 | .withName(stream) 425 | .withHeapCapacity(capacity))); 426 | final RecordHandler handler = mock(RecordHandler.class); 427 | s.attachReceiver(handler, 1_000); 428 | s.attachReceiver(handler, 1_000); 429 | } 430 | } -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/sample/AsyncPubSubSample.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor.sample; 2 | 3 | import com.hazelcast.config.*; 4 | import com.hazelcast.core.*; 5 | import com.obsidiandynamics.meteor.*; 6 | import com.obsidiandynamics.meteor.Record; 7 | import com.obsidiandynamics.threads.*; 8 | import com.obsidiandynamics.zerolog.*; 9 | 10 | public final class AsyncPubSubSample { 11 | public static void main(String[] args) { 12 | // set up a Zerolog logger and bridge from Hazelcast's internal logger 13 | final Zlg zlg = Zlg.forDeclaringClass().get(); 14 | HazelcastZlgBridge.install(); 15 | 16 | // configure Hazelcast 17 | final HazelcastProvider provider = GridProvider.getInstance(); 18 | final HazelcastInstance instance = provider.createInstance(new Config()); 19 | 20 | // the stream config is shared between all publishers and subscribers 21 | final StreamConfig streamConfig = new StreamConfig().withName("test-stream"); 22 | 23 | // create a publisher and send a message 24 | final Publisher publisher = Publisher.createDefault(instance, 25 | new PublisherConfig() 26 | .withStreamConfig(streamConfig)); 27 | 28 | publisher.publishAsync(new Record("Hello world".getBytes())); 29 | 30 | // create a subscriber for a test group and poll for records 31 | final Subscriber subscriber = Subscriber.createDefault(instance, 32 | new SubscriberConfig() 33 | .withStreamConfig(streamConfig) 34 | .withGroup("test-group")); 35 | 36 | // receive records asynchronously; polls every 100 ms 37 | subscriber.attachReceiver(record -> { 38 | zlg.i("Got %s", z -> z.arg(new String(record.getData()))); 39 | subscriber.confirm(); 40 | }, 100); 41 | 42 | // give it some time... 43 | Threads.sleep(5_000); 44 | 45 | // clean up 46 | publisher.terminate().joinSilently(); 47 | subscriber.terminate().joinSilently(); 48 | instance.shutdown(); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /core/src/test/java/com/obsidiandynamics/meteor/sample/SyncPubSubSample.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor.sample; 2 | 3 | import com.hazelcast.config.*; 4 | import com.hazelcast.core.*; 5 | import com.obsidiandynamics.meteor.*; 6 | import com.obsidiandynamics.meteor.Record; 7 | import com.obsidiandynamics.zerolog.*; 8 | 9 | public final class SyncPubSubSample { 10 | public static void main(String[] args) throws InterruptedException { 11 | // set up a Zerolog logger and bridge from Hazelcast's internal logger 12 | final Zlg zlg = Zlg.forDeclaringClass().get(); 13 | HazelcastZlgBridge.install(); 14 | 15 | // configure Hazelcast 16 | final HazelcastProvider provider = GridProvider.getInstance(); 17 | final HazelcastInstance instance = provider.createInstance(new Config()); 18 | 19 | // the stream config is shared between all publishers and subscribers 20 | final StreamConfig streamConfig = new StreamConfig().withName("test-stream"); 21 | 22 | // create a publisher and send a message 23 | final Publisher publisher = Publisher.createDefault(instance, 24 | new PublisherConfig() 25 | .withStreamConfig(streamConfig)); 26 | 27 | publisher.publishAsync(new Record("Hello world".getBytes())); 28 | 29 | // create a subscriber for a test group and poll for records 30 | final Subscriber subscriber = Subscriber.createDefault(instance, 31 | new SubscriberConfig() 32 | .withStreamConfig(streamConfig) 33 | .withGroup("test-group")); 34 | // 10 polls, at 100 ms each 35 | for (int i = 0; i < 10; i++) { 36 | zlg.i("Polling..."); 37 | final RecordBatch records = subscriber.poll(100); 38 | 39 | if (! records.isEmpty()) { 40 | zlg.i("Got %d record(s)", z -> z.arg(records::size)); 41 | records.forEach(r -> zlg.i(new String(r.getData()))); 42 | subscriber.confirm(); 43 | } 44 | } 45 | 46 | // clean up 47 | publisher.terminate().joinSilently(); 48 | subscriber.terminate().joinSilently(); 49 | instance.shutdown(); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /elect/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | /build/ 3 | /.settings/ 4 | .classpath 5 | .project 6 | -------------------------------------------------------------------------------- /elect/build.gradle: -------------------------------------------------------------------------------- 1 | def packageName = "meteor-elect" 2 | def packageDescription = "Leader election subsystem for Meteor" 3 | 4 | dependencies { 5 | api project(":") 6 | api "org.apache.commons:commons-lang3:${commonsLangVersion}" 7 | 8 | testImplementation project(":") 9 | testImplementation project(":meteor-assurance") 10 | } 11 | 12 | publishing { 13 | publications { 14 | mavenJava(MavenPublication) { 15 | artifactId = packageName 16 | from components.java 17 | pom { 18 | name = packageName 19 | description = packageDescription 20 | url = 'https://github.com/obsidiandynamics/meteor' 21 | licenses { 22 | license { 23 | name = 'BSD 3-Clause License' 24 | url = 'https://opensource.org/licenses/BSD-3-Clause' 25 | } 26 | } 27 | developers { 28 | developer { 29 | id = 'ekoutanov' 30 | name = 'Emil Koutanov' 31 | email = 'ekoutanov@gmail.com' 32 | } 33 | } 34 | scm { 35 | connection = 'scm:git:git://github.com/obsidiandynamics/meteor.git' 36 | developerConnection = 'scm:git:ssh://github.com/obsidiandynamics/meteor.git' 37 | url = 'https://github.com/obsidiandynamics/meteor' 38 | } 39 | } 40 | } 41 | } 42 | } -------------------------------------------------------------------------------- /elect/src/main/java/com/obsidiandynamics/meteor/Election.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static com.obsidiandynamics.retry.Retry.*; 4 | 5 | import java.util.*; 6 | 7 | import com.hazelcast.core.*; 8 | import com.obsidiandynamics.meteor.util.*; 9 | import com.obsidiandynamics.retry.*; 10 | import com.obsidiandynamics.worker.*; 11 | import com.obsidiandynamics.zerolog.*; 12 | import com.obsidiandynamics.zerolog.util.*; 13 | 14 | public final class Election implements Terminable, Joinable { 15 | private final ElectionConfig config; 16 | 17 | private final RetryableMap leases; 18 | 19 | private final Registry registry; 20 | 21 | private final WorkerThread scavengerThread; 22 | 23 | private final Object scavengeLock = new Object(); 24 | 25 | private final Object viewLock = new Object(); 26 | 27 | private ScavengeWatcher scavengeWatcher = ScavengeWatcher.nop(); 28 | 29 | private volatile LeaseViewImpl leaseView = new LeaseViewImpl(0); 30 | 31 | private long nextViewVersion = 1; 32 | 33 | public Election(ElectionConfig config, IMap leases) { 34 | this.config = config; 35 | 36 | final Retry retry = new Retry() 37 | .withExceptionMatcher(isA(HazelcastException.class)) 38 | .withAttempts(Integer.MAX_VALUE) 39 | .withBackoff(100) 40 | .withFaultHandler(config.getZlg()::w) 41 | .withErrorHandler(config.getZlg()::e); 42 | this.leases = new RetryableMap<>(retry, leases); 43 | registry = new Registry(); 44 | 45 | scavengerThread = WorkerThread.builder() 46 | .withOptions(new WorkerOptions().daemon().withName(Election.class, "scavenger")) 47 | .onCycle(this::scavegerCycle) 48 | .onUncaughtException(new ZlgWorkerExceptionHandler(config.getZlg())) 49 | .build(); 50 | } 51 | 52 | public Election start() { 53 | scavengerThread.start(); 54 | return this; 55 | } 56 | 57 | public Registry getRegistry() { 58 | return registry; 59 | } 60 | 61 | private void scavegerCycle(WorkerThread t) throws InterruptedException { 62 | scavenge(); 63 | Thread.sleep(config.getScavengeInterval()); 64 | } 65 | 66 | void scavenge() { 67 | reloadView(); 68 | 69 | synchronized (scavengeLock) { 70 | final Set resources = registry.getCandidatesView().keySet(); 71 | for (String resource : resources) { 72 | final Lease existingLease = leaseView.getOrDefault(resource, Lease.vacant()); 73 | if (! existingLease.isCurrent()) { 74 | if (existingLease.isVacant()) { 75 | config.getZlg().d("Lease of %s is vacant", z -> z.arg(resource)); 76 | } else { 77 | scavengeWatcher.onExpire(resource, existingLease.getTenant()); 78 | config.getZlg().d("Lease of %s by %s expired at %s", 79 | z -> z 80 | .arg(resource) 81 | .arg(existingLease::getTenant) 82 | .arg(Args.map(existingLease::getExpiry, Lease::formatExpiry))); 83 | } 84 | 85 | final UUID nextCandidate = registry.getRandomCandidate(resource); 86 | if (nextCandidate != null) { 87 | final boolean success; 88 | final Lease newLease = new Lease(nextCandidate, System.currentTimeMillis() + config.getLeaseDuration()); 89 | if (existingLease.isVacant()) { 90 | final byte[] previous = leases.putIfAbsent(resource, newLease.pack()); 91 | success = previous == null; 92 | } else { 93 | success = leases.replace(resource, existingLease.pack(), newLease.pack()); 94 | } 95 | 96 | if (success) { 97 | config.getZlg().d("New lease of %s by %s until %s", 98 | z -> z 99 | .arg(resource) 100 | .arg(nextCandidate) 101 | .arg(Args.map(newLease::getExpiry, Lease::formatExpiry))); 102 | reloadView(); 103 | scavengeWatcher.onAssign(resource, nextCandidate); 104 | } 105 | } 106 | } 107 | } 108 | } 109 | } 110 | 111 | private void reloadView() { 112 | synchronized (viewLock) { 113 | final LeaseViewImpl newLeaseView = new LeaseViewImpl(nextViewVersion++); 114 | for (Map.Entry leaseTableEntry : leases.entrySet()) { 115 | final Lease lease = Lease.unpack(leaseTableEntry.getValue()); 116 | newLeaseView.put(leaseTableEntry.getKey(), lease); 117 | } 118 | leaseView = newLeaseView; 119 | } 120 | } 121 | 122 | public LeaseView getLeaseView() { 123 | return leaseView; 124 | } 125 | 126 | public void extend(String resource, UUID tenant) throws NotTenantException { 127 | for (;;) { 128 | final Lease existingLease = checkCurrent(resource, tenant); 129 | final Lease newLease = new Lease(tenant, System.currentTimeMillis() + config.getLeaseDuration()); 130 | final boolean extended = leases.replace(resource, existingLease.pack(), newLease.pack()); 131 | if (extended) { 132 | reloadView(); 133 | return; 134 | } else { 135 | reloadView(); 136 | } 137 | } 138 | } 139 | 140 | public void yield(String resource, UUID tenant) throws NotTenantException { 141 | for (;;) { 142 | final Lease existingLease = checkCurrent(resource, tenant); 143 | final boolean removed = leases.remove(resource, existingLease.pack()); 144 | if (removed) { 145 | reloadView(); 146 | return; 147 | } else { 148 | reloadView(); 149 | } 150 | } 151 | } 152 | 153 | private Lease checkCurrent(String resource, UUID assumedTenant) throws NotTenantException { 154 | final Lease existingLease = leaseView.getOrDefault(resource, Lease.vacant()); 155 | if (! existingLease.isHeldByAndCurrent(assumedTenant)) { 156 | final String m = String.format("Leader of %s is %s until %s", 157 | resource, existingLease.getTenant(), Lease.formatExpiry(existingLease.getExpiry())); 158 | throw new NotTenantException(m); 159 | } else { 160 | return existingLease; 161 | } 162 | } 163 | 164 | void setScavengeWatcher(ScavengeWatcher scavengeWatcher) { 165 | this.scavengeWatcher = scavengeWatcher; 166 | } 167 | 168 | @Override 169 | public Joinable terminate() { 170 | scavengerThread.terminate(); 171 | return this; 172 | } 173 | 174 | @Override 175 | public boolean join(long timeoutMillis) throws InterruptedException { 176 | return scavengerThread.join(timeoutMillis); 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /elect/src/main/java/com/obsidiandynamics/meteor/ElectionConfig.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import com.obsidiandynamics.yconf.*; 4 | import com.obsidiandynamics.zerolog.*; 5 | 6 | @Y 7 | public final class ElectionConfig { 8 | @YInject 9 | private Zlg zlg = Zlg.forDeclaringClass().get(); 10 | 11 | @YInject 12 | private int scavengeIntervalMillis = 100; 13 | 14 | @YInject 15 | private int leaseDurationMillis = 60_000; 16 | 17 | Zlg getZlg() { 18 | return zlg; 19 | } 20 | 21 | public ElectionConfig withZlg(Zlg zlg) { 22 | this.zlg = zlg; 23 | return this; 24 | } 25 | 26 | int getScavengeInterval() { 27 | return scavengeIntervalMillis; 28 | } 29 | 30 | public ElectionConfig withScavengeInterval(int scavengeIntervalMillis) { 31 | this.scavengeIntervalMillis = scavengeIntervalMillis; 32 | return this; 33 | } 34 | 35 | int getLeaseDuration() { 36 | return leaseDurationMillis; 37 | } 38 | 39 | public ElectionConfig withLeaseDuration(int leaseDurationMillis) { 40 | this.leaseDurationMillis = leaseDurationMillis; 41 | return this; 42 | } 43 | 44 | @Override 45 | public String toString() { 46 | return ElectionConfig.class.getSimpleName() + " [scavengeIntervalMillis=" + scavengeIntervalMillis + 47 | ", leaseDurationMillis=" + leaseDurationMillis + "]"; 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /elect/src/main/java/com/obsidiandynamics/meteor/Lease.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.nio.*; 4 | import java.text.*; 5 | import java.util.*; 6 | 7 | import org.apache.commons.lang3.builder.*; 8 | 9 | public final class Lease { 10 | private static final Lease vacant = new Lease(null, 0); 11 | 12 | public static Lease vacant() { return vacant; } 13 | 14 | private final UUID tenant; 15 | 16 | private final long expiry; 17 | 18 | Lease(UUID tenant, long expiry) { 19 | this.tenant = tenant; 20 | this.expiry = expiry; 21 | } 22 | 23 | public boolean isVacant() { 24 | return tenant == null; 25 | } 26 | 27 | public boolean isHeldBy(UUID candidate) { 28 | return candidate.equals(tenant); 29 | } 30 | 31 | public boolean isHeldByAndCurrent(UUID candidate) { 32 | return isHeldBy(candidate) && isCurrent(); 33 | } 34 | 35 | public UUID getTenant() { 36 | return tenant; 37 | } 38 | 39 | public long getExpiry() { 40 | return expiry; 41 | } 42 | 43 | @Override 44 | public int hashCode() { 45 | return new HashCodeBuilder().append(tenant).append(expiry).hashCode(); 46 | } 47 | 48 | @Override 49 | public boolean equals(Object obj) { 50 | if (this == obj) { 51 | return true; 52 | } else if (obj instanceof Lease) { 53 | final Lease that = (Lease) obj; 54 | return new EqualsBuilder().append(tenant, that.tenant).append(expiry, that.expiry).isEquals(); 55 | } else { 56 | return false; 57 | } 58 | } 59 | 60 | public boolean isCurrent() { 61 | return expiry != 0 && System.currentTimeMillis() <= expiry; 62 | } 63 | 64 | @Override 65 | public String toString() { 66 | return Lease.class.getSimpleName() + " [tenant=" + tenant + ", expiry=" + formatExpiry(expiry) + "]"; 67 | } 68 | 69 | public static String formatExpiry(long expiry) { 70 | if (expiry == Long.MAX_VALUE) { 71 | return "eschaton"; 72 | } else if (expiry == 0) { 73 | return "epoch"; 74 | } else { 75 | return new SimpleDateFormat("MMM dd HH:mm:ss.SSS zzz yyyy").format(new Date(expiry)); 76 | } 77 | } 78 | 79 | public byte[] pack() { 80 | final ByteBuffer buf = ByteBuffer.allocate(24); 81 | buf.putLong(tenant.getMostSignificantBits()); 82 | buf.putLong(tenant.getLeastSignificantBits()); 83 | buf.putLong(expiry); 84 | return buf.array(); 85 | } 86 | 87 | public static Lease unpack(byte[] bytes) { 88 | final ByteBuffer buf = ByteBuffer.wrap(bytes); 89 | final UUID tenant = new UUID(buf.getLong(), buf.getLong()); 90 | final long expiry = buf.getLong(); 91 | return new Lease(tenant, expiry); 92 | } 93 | 94 | public static Lease forever(UUID tenant) { 95 | return new Lease(tenant, Long.MAX_VALUE); 96 | } 97 | 98 | public static Lease expired(UUID tenant) { 99 | return new Lease(tenant, 0); 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /elect/src/main/java/com/obsidiandynamics/meteor/LeaseView.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.util.*; 4 | 5 | public interface LeaseView { 6 | Map asMap(); 7 | 8 | long getVersion(); 9 | 10 | default Lease getLease(String resource) { 11 | return asMap().getOrDefault(resource, Lease.vacant()); 12 | } 13 | 14 | default boolean isCurrentTenant(String resource, UUID candidate) { 15 | return getLease(resource).isHeldByAndCurrent(candidate); 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /elect/src/main/java/com/obsidiandynamics/meteor/LeaseViewImpl.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.util.*; 4 | 5 | final class LeaseViewImpl implements LeaseView { 6 | private final Map leases; 7 | 8 | private final long version; 9 | 10 | LeaseViewImpl(long version) { 11 | this.version = version; 12 | leases = new HashMap<>(); 13 | } 14 | 15 | LeaseViewImpl(LeaseViewImpl original, long version) { 16 | this.version = version; 17 | leases = new HashMap<>(original.leases); 18 | } 19 | 20 | @Override 21 | public Map asMap() { 22 | return this.leases; 23 | } 24 | 25 | @Override 26 | public long getVersion() { 27 | return version; 28 | } 29 | 30 | @Override 31 | public String toString() { 32 | return super.toString() + "@" + version; 33 | } 34 | 35 | public Lease put(String key, Lease lease) { 36 | return leases.put(key, lease); 37 | } 38 | 39 | public Lease getOrDefault(String resource, Lease vacant) { 40 | return leases.getOrDefault(resource, vacant); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /elect/src/main/java/com/obsidiandynamics/meteor/NotTenantException.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | public final class NotTenantException extends Exception { 4 | private static final long serialVersionUID = 1L; 5 | 6 | NotTenantException(String m) { 7 | super(m); 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /elect/src/main/java/com/obsidiandynamics/meteor/Registry.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.util.*; 4 | 5 | public final class Registry { 6 | private final Map> candidates = new HashMap<>(); 7 | private final Object lock = new Object(); 8 | 9 | public Registry withCandidate(String resource, UUID candidate) { 10 | enrol(resource, candidate); 11 | return this; 12 | } 13 | 14 | public void enrolAll(Registry source) { 15 | candidates.putAll(source.getCandidatesView()); 16 | } 17 | 18 | public void enrol(String resource, UUID candidate) { 19 | synchronized (lock) { 20 | final Set candidatesForResource = candidates.computeIfAbsent(resource, k -> new HashSet<>()); 21 | candidatesForResource.add(candidate); 22 | } 23 | } 24 | 25 | public void unenrol(String resource, UUID candidate) { 26 | synchronized (lock) { 27 | final Set candidatesForResource = candidates.getOrDefault(resource, Collections.emptySet()); 28 | candidatesForResource.remove(candidate); 29 | if (candidatesForResource.isEmpty()) { 30 | candidates.remove(resource); 31 | } 32 | } 33 | } 34 | 35 | public Set getResourcesView() { 36 | final Set copy; 37 | synchronized (lock) { 38 | copy = new HashSet<>(candidates.keySet()); 39 | } 40 | return Collections.unmodifiableSet(copy); 41 | } 42 | 43 | public Map> getCandidatesView() { 44 | final Map> copy = new HashMap<>(); 45 | synchronized (lock) { 46 | for (Map.Entry> entry : candidates.entrySet()) { 47 | copy.put(entry.getKey(), new HashSet<>(entry.getValue())); 48 | } 49 | } 50 | return Collections.unmodifiableMap(copy); 51 | } 52 | 53 | UUID getRandomCandidate(String resource) { 54 | synchronized (lock) { 55 | final Set candidatesForResource = candidates.getOrDefault(resource, Collections.emptySet()); 56 | if (candidatesForResource.isEmpty()) { 57 | return null; 58 | } else { 59 | final int randomIndex = (int) (Math.random() * candidatesForResource.size()); 60 | return new ArrayList<>(candidatesForResource).get(randomIndex); 61 | } 62 | } 63 | } 64 | 65 | @Override 66 | public String toString() { 67 | return Registry.class.getSimpleName() + " [candidates=" + candidates + "]"; 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /elect/src/main/java/com/obsidiandynamics/meteor/ScavengeWatcher.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static com.obsidiandynamics.func.Functions.*; 4 | 5 | import java.util.*; 6 | 7 | interface ScavengeWatcher { 8 | ScavengeWatcher nop = new ScavengeWatcher() { 9 | @Override 10 | public void onExpire(String resource, UUID tenant) { 11 | mustExist(resource); 12 | mustExist(tenant); 13 | } 14 | 15 | @Override 16 | public void onAssign(String resource, UUID tenant) { 17 | mustExist(resource); 18 | mustExist(tenant); 19 | } 20 | }; 21 | 22 | static ScavengeWatcher nop() { return nop; } 23 | 24 | void onAssign(String resource, UUID tenant); 25 | 26 | void onExpire(String resource, UUID tenant); 27 | } 28 | -------------------------------------------------------------------------------- /elect/src/test/java/com/obsidiandynamics/meteor/ElectionConfigTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import org.junit.*; 6 | 7 | import com.obsidiandynamics.assertion.*; 8 | import com.obsidiandynamics.zerolog.*; 9 | 10 | public final class ElectionConfigTest { 11 | @Test 12 | public void testFields() { 13 | final Zlg zlg = Zlg.forDeclaringClass().get(); 14 | final ElectionConfig c = new ElectionConfig() 15 | .withZlg(zlg) 16 | .withLeaseDuration(100) 17 | .withScavengeInterval(200); 18 | 19 | assertEquals(zlg, c.getZlg()); 20 | assertEquals(100, c.getLeaseDuration()); 21 | assertEquals(200, c.getScavengeInterval()); 22 | } 23 | 24 | @Test 25 | public void testToString() { 26 | Assertions.assertToStringOverride(new ElectionConfig()); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /elect/src/test/java/com/obsidiandynamics/meteor/LeaseTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import java.util.*; 6 | 7 | import org.junit.*; 8 | 9 | import com.obsidiandynamics.assertion.*; 10 | 11 | import nl.jqno.equalsverifier.*; 12 | 13 | public final class LeaseTest { 14 | @Test 15 | public void testToString() { 16 | Assertions.assertToStringOverride(new Lease(UUID.randomUUID(), 0)); 17 | } 18 | 19 | @Test 20 | public void testEqualsHashCode() { 21 | EqualsVerifier.forClass(Lease.class).verify(); 22 | } 23 | 24 | @Test 25 | public void testPackUnpack() { 26 | final Lease original = new Lease(UUID.randomUUID(), System.currentTimeMillis()); 27 | final byte[] packed = original.pack(); 28 | final Lease unpacked = Lease.unpack(packed); 29 | assertEquals(original, unpacked); 30 | } 31 | 32 | @Test 33 | public void testFields() { 34 | final UUID c = UUID.randomUUID(); 35 | final Lease current = new Lease(c, Long.MAX_VALUE); 36 | assertEquals(c, current.getTenant()); 37 | assertEquals(Long.MAX_VALUE, current.getExpiry()); 38 | } 39 | 40 | @Test 41 | public void testHeldByAndCurrent() { 42 | final UUID c = UUID.randomUUID(); 43 | final Lease current = new Lease(c, Long.MAX_VALUE); 44 | assertFalse(current.isVacant()); 45 | assertTrue(current.isHeldBy(c)); 46 | assertTrue(current.isCurrent()); 47 | assertTrue(current.isHeldByAndCurrent(c)); 48 | assertFalse(current.isHeldByAndCurrent(UUID.randomUUID())); 49 | 50 | final Lease vacant = Lease.vacant(); 51 | assertTrue(vacant.isVacant()); 52 | assertFalse(vacant.isHeldBy(c)); 53 | assertFalse(vacant.isCurrent()); 54 | assertFalse(vacant.isHeldByAndCurrent(c)); 55 | 56 | final Lease expired = new Lease(c, 1); 57 | assertTrue(expired.isHeldBy(c)); 58 | assertFalse(expired.isHeldBy(UUID.randomUUID())); 59 | assertFalse(expired.isCurrent()); 60 | assertFalse(expired.isHeldByAndCurrent(c)); 61 | assertFalse(expired.isHeldByAndCurrent(UUID.randomUUID())); 62 | } 63 | 64 | @Test 65 | public void testForever() { 66 | assertEquals(Long.MAX_VALUE, Lease.forever(UUID.randomUUID()).getExpiry()); 67 | } 68 | 69 | @Test 70 | public void testExpired() { 71 | assertEquals(0, Lease.expired(UUID.randomUUID()).getExpiry()); 72 | } 73 | 74 | @Test 75 | public void testFormatExpiry() { 76 | assertEquals("eschaton", Lease.formatExpiry(Long.MAX_VALUE)); 77 | assertEquals("epoch", Lease.formatExpiry(0)); 78 | assertNotNull(Lease.formatExpiry(1)); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /elect/src/test/java/com/obsidiandynamics/meteor/LeaseViewTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import java.util.*; 6 | 7 | import org.junit.*; 8 | 9 | import com.obsidiandynamics.assertion.*; 10 | 11 | public final class LeaseViewTest { 12 | @Test 13 | public void testToString() { 14 | Assertions.assertToStringOverride(new LeaseViewImpl(10)); 15 | } 16 | 17 | @Test 18 | public void testViewVersion() { 19 | assertEquals(10, new LeaseViewImpl(10).getVersion()); 20 | } 21 | 22 | @Test 23 | public void testCopy() { 24 | final LeaseViewImpl orig = new LeaseViewImpl(0); 25 | final Lease lease0 = Lease.forever(UUID.randomUUID()); 26 | orig.put("key0", lease0); 27 | 28 | final LeaseViewImpl copy = new LeaseViewImpl(orig, 1); 29 | assertEquals(1, copy.getVersion()); 30 | assertEquals(Collections.singletonMap("key0", lease0), copy.asMap()); 31 | 32 | // mutate the copy and ensure that the original remains unchanged 33 | final Lease lease1 = Lease.forever(UUID.randomUUID()); 34 | copy.put("key1", lease1); 35 | assertEquals(Collections.singletonMap("key0", lease0), orig.asMap()); 36 | 37 | // mutate the original and ensure that the copy remains unchanged 38 | final Lease lease2 = Lease.forever(UUID.randomUUID()); 39 | orig.put("key2", lease2); 40 | assertEquals(2, copy.asMap().size()); 41 | assertEquals(lease0, copy.asMap().get("key0")); 42 | assertEquals(lease1, copy.asMap().get("key1")); 43 | } 44 | 45 | @Test 46 | public void testGetTenant() { 47 | final LeaseViewImpl v = new LeaseViewImpl(0); 48 | final UUID c = UUID.randomUUID(); 49 | v.put("resource", new Lease(c, Long.MAX_VALUE)); 50 | assertEquals(c, v.getLease("resource").getTenant()); 51 | } 52 | 53 | @Test 54 | public void testIsCurrentTenant() { 55 | final LeaseViewImpl v = new LeaseViewImpl(0); 56 | final UUID c = UUID.randomUUID(); 57 | v.put("resource", new Lease(c, Long.MAX_VALUE)); 58 | assertTrue(v.isCurrentTenant("resource", c)); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /elect/src/test/java/com/obsidiandynamics/meteor/RegistryTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import java.util.*; 6 | 7 | import org.junit.*; 8 | 9 | import com.obsidiandynamics.assertion.*; 10 | 11 | public final class RegistryTest { 12 | @Test 13 | public void testToString() { 14 | Assertions.assertToStringOverride(new Registry()); 15 | } 16 | 17 | @Test 18 | public void testEnrolAll() { 19 | final UUID c = UUID.randomUUID(); 20 | final Registry source = new Registry().withCandidate("resource", c); 21 | final Registry r = new Registry(); 22 | r.enrolAll(source); 23 | assertEquals(Collections.singleton(c), r.getCandidatesView().get("resource")); 24 | assertEquals(1, r.getCandidatesView().size()); 25 | assertEquals(Collections.singleton("resource"), r.getResourcesView()); 26 | } 27 | 28 | @Test 29 | public void testEnrolUnenrol() { 30 | final Registry r = new Registry(); 31 | assertEquals(Collections.emptyMap(), r.getCandidatesView()); 32 | assertNull(r.getRandomCandidate("key")); 33 | 34 | final UUID c0 = UUID.randomUUID(); 35 | r.enrol("key", c0); 36 | assertEquals(1, r.getCandidatesView().size()); 37 | assertEquals(setOf(c0), r.getCandidatesView().get("key")); 38 | assertNotNull(r.getRandomCandidate("key")); 39 | 40 | final UUID c1 = UUID.randomUUID(); 41 | r.enrol("key", c1); 42 | assertEquals(1, r.getCandidatesView().size()); 43 | assertEquals(setOf(c0, c1), r.getCandidatesView().get("key")); 44 | assertNotNull(r.getRandomCandidate("key")); 45 | 46 | r.unenrol("key", c0); 47 | assertEquals(1, r.getCandidatesView().size()); 48 | assertEquals(setOf(c1), r.getCandidatesView().get("key")); 49 | assertNotNull(r.getRandomCandidate("key")); 50 | 51 | r.unenrol("key", c1); 52 | assertEquals(Collections.emptyMap(), r.getCandidatesView()); 53 | assertNull(r.getRandomCandidate("key")); 54 | } 55 | 56 | @SafeVarargs 57 | private static Set setOf(T... items) { 58 | return new HashSet<>(Arrays.asList(items)); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /gradle.properties: -------------------------------------------------------------------------------- 1 | org.gradle.daemon=true 2 | org.gradle.parallel=true 3 | org.gradle.fork=true 4 | org.gradle.jvmargs=--add-opens=java.base/java.lang=ALL-UNNAMED 5 | org.gradle.configureondemand=true 6 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obsidiandynamics/meteor/327d4260f854e0d4d8073db0de4be7cb2b9c9174/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionBase=GRADLE_USER_HOME 2 | distributionPath=wrapper/dists 3 | distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-bin.zip 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists 6 | -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | # 4 | # Copyright 2015 the original author or authors. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | ############################################################################## 20 | ## 21 | ## Gradle start up script for UN*X 22 | ## 23 | ############################################################################## 24 | 25 | # Attempt to set APP_HOME 26 | # Resolve links: $0 may be a link 27 | PRG="$0" 28 | # Need this for relative symlinks. 29 | while [ -h "$PRG" ] ; do 30 | ls=`ls -ld "$PRG"` 31 | link=`expr "$ls" : '.*-> \(.*\)$'` 32 | if expr "$link" : '/.*' > /dev/null; then 33 | PRG="$link" 34 | else 35 | PRG=`dirname "$PRG"`"/$link" 36 | fi 37 | done 38 | SAVED="`pwd`" 39 | cd "`dirname \"$PRG\"`/" >/dev/null 40 | APP_HOME="`pwd -P`" 41 | cd "$SAVED" >/dev/null 42 | 43 | APP_NAME="Gradle" 44 | APP_BASE_NAME=`basename "$0"` 45 | 46 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 47 | DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' 48 | 49 | # Use the maximum available, or set MAX_FD != -1 to use that value. 50 | MAX_FD="maximum" 51 | 52 | warn () { 53 | echo "$*" 54 | } 55 | 56 | die () { 57 | echo 58 | echo "$*" 59 | echo 60 | exit 1 61 | } 62 | 63 | # OS specific support (must be 'true' or 'false'). 64 | cygwin=false 65 | msys=false 66 | darwin=false 67 | nonstop=false 68 | case "`uname`" in 69 | CYGWIN* ) 70 | cygwin=true 71 | ;; 72 | Darwin* ) 73 | darwin=true 74 | ;; 75 | MINGW* ) 76 | msys=true 77 | ;; 78 | NONSTOP* ) 79 | nonstop=true 80 | ;; 81 | esac 82 | 83 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 84 | 85 | # Determine the Java command to use to start the JVM. 86 | if [ -n "$JAVA_HOME" ] ; then 87 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 88 | # IBM's JDK on AIX uses strange locations for the executables 89 | JAVACMD="$JAVA_HOME/jre/sh/java" 90 | else 91 | JAVACMD="$JAVA_HOME/bin/java" 92 | fi 93 | if [ ! -x "$JAVACMD" ] ; then 94 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 95 | 96 | Please set the JAVA_HOME variable in your environment to match the 97 | location of your Java installation." 98 | fi 99 | else 100 | JAVACMD="java" 101 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 102 | 103 | Please set the JAVA_HOME variable in your environment to match the 104 | location of your Java installation." 105 | fi 106 | 107 | # Increase the maximum file descriptors if we can. 108 | if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then 109 | MAX_FD_LIMIT=`ulimit -H -n` 110 | if [ $? -eq 0 ] ; then 111 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then 112 | MAX_FD="$MAX_FD_LIMIT" 113 | fi 114 | ulimit -n $MAX_FD 115 | if [ $? -ne 0 ] ; then 116 | warn "Could not set maximum file descriptor limit: $MAX_FD" 117 | fi 118 | else 119 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" 120 | fi 121 | fi 122 | 123 | # For Darwin, add options to specify how the application appears in the dock 124 | if $darwin; then 125 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" 126 | fi 127 | 128 | # For Cygwin, switch paths to Windows format before running java 129 | if $cygwin ; then 130 | APP_HOME=`cygpath --path --mixed "$APP_HOME"` 131 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` 132 | JAVACMD=`cygpath --unix "$JAVACMD"` 133 | 134 | # We build the pattern for arguments to be converted via cygpath 135 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` 136 | SEP="" 137 | for dir in $ROOTDIRSRAW ; do 138 | ROOTDIRS="$ROOTDIRS$SEP$dir" 139 | SEP="|" 140 | done 141 | OURCYGPATTERN="(^($ROOTDIRS))" 142 | # Add a user-defined pattern to the cygpath arguments 143 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then 144 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" 145 | fi 146 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 147 | i=0 148 | for arg in "$@" ; do 149 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` 150 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option 151 | 152 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition 153 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` 154 | else 155 | eval `echo args$i`="\"$arg\"" 156 | fi 157 | i=$((i+1)) 158 | done 159 | case $i in 160 | (0) set -- ;; 161 | (1) set -- "$args0" ;; 162 | (2) set -- "$args0" "$args1" ;; 163 | (3) set -- "$args0" "$args1" "$args2" ;; 164 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;; 165 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; 166 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; 167 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; 168 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; 169 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; 170 | esac 171 | fi 172 | 173 | # Escape application args 174 | save () { 175 | for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done 176 | echo " " 177 | } 178 | APP_ARGS=$(save "$@") 179 | 180 | # Collect all arguments for the java command, following the shell quoting and substitution rules 181 | eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" 182 | 183 | # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong 184 | if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then 185 | cd "$(dirname "$0")" 186 | fi 187 | 188 | exec "$JAVACMD" "$@" 189 | -------------------------------------------------------------------------------- /gradlew.bat: -------------------------------------------------------------------------------- 1 | @rem 2 | @rem Copyright 2015 the original author or authors. 3 | @rem 4 | @rem Licensed under the Apache License, Version 2.0 (the "License"); 5 | @rem you may not use this file except in compliance with the License. 6 | @rem You may obtain a copy of the License at 7 | @rem 8 | @rem http://www.apache.org/licenses/LICENSE-2.0 9 | @rem 10 | @rem Unless required by applicable law or agreed to in writing, software 11 | @rem distributed under the License is distributed on an "AS IS" BASIS, 12 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | @rem See the License for the specific language governing permissions and 14 | @rem limitations under the License. 15 | @rem 16 | 17 | @if "%DEBUG%" == "" @echo off 18 | @rem ########################################################################## 19 | @rem 20 | @rem Gradle startup script for Windows 21 | @rem 22 | @rem ########################################################################## 23 | 24 | @rem Set local scope for the variables with windows NT shell 25 | if "%OS%"=="Windows_NT" setlocal 26 | 27 | set DIRNAME=%~dp0 28 | if "%DIRNAME%" == "" set DIRNAME=. 29 | set APP_BASE_NAME=%~n0 30 | set APP_HOME=%DIRNAME% 31 | 32 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 33 | set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" 34 | 35 | @rem Find java.exe 36 | if defined JAVA_HOME goto findJavaFromJavaHome 37 | 38 | set JAVA_EXE=java.exe 39 | %JAVA_EXE% -version >NUL 2>&1 40 | if "%ERRORLEVEL%" == "0" goto init 41 | 42 | echo. 43 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 44 | echo. 45 | echo Please set the JAVA_HOME variable in your environment to match the 46 | echo location of your Java installation. 47 | 48 | goto fail 49 | 50 | :findJavaFromJavaHome 51 | set JAVA_HOME=%JAVA_HOME:"=% 52 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 53 | 54 | if exist "%JAVA_EXE%" goto init 55 | 56 | echo. 57 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 58 | echo. 59 | echo Please set the JAVA_HOME variable in your environment to match the 60 | echo location of your Java installation. 61 | 62 | goto fail 63 | 64 | :init 65 | @rem Get command-line arguments, handling Windows variants 66 | 67 | if not "%OS%" == "Windows_NT" goto win9xME_args 68 | 69 | :win9xME_args 70 | @rem Slurp the command line arguments. 71 | set CMD_LINE_ARGS= 72 | set _SKIP=2 73 | 74 | :win9xME_args_slurp 75 | if "x%~1" == "x" goto execute 76 | 77 | set CMD_LINE_ARGS=%* 78 | 79 | :execute 80 | @rem Setup the command line 81 | 82 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 83 | 84 | @rem Execute Gradle 85 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% 86 | 87 | :end 88 | @rem End local scope for the variables with windows NT shell 89 | if "%ERRORLEVEL%"=="0" goto mainEnd 90 | 91 | :fail 92 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 93 | rem the _cmd.exe /c_ return code! 94 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 95 | exit /b 1 96 | 97 | :mainEnd 98 | if "%OS%"=="Windows_NT" endlocal 99 | 100 | :omega 101 | -------------------------------------------------------------------------------- /settings.gradle: -------------------------------------------------------------------------------- 1 | rootProject.name = "meteor" 2 | 3 | include "meteor-assurance" 4 | project(":meteor-assurance").projectDir = new File(settingsDir, "assurance") 5 | 6 | include "meteor-core" 7 | project(":meteor-core").projectDir = new File(settingsDir, "core") 8 | 9 | include "meteor-elect" 10 | project(":meteor-elect").projectDir = new File(settingsDir, "elect") 11 | -------------------------------------------------------------------------------- /src/main/java/com/obsidiandynamics/meteor/GridProvider.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import com.hazelcast.config.*; 4 | import com.hazelcast.core.*; 5 | import com.obsidiandynamics.yconf.*; 6 | 7 | @Y(GridProvider.Mapper.class) 8 | public final class GridProvider implements HazelcastProvider { 9 | public static final class Mapper implements TypeMapper { 10 | @Override public Object map(YObject y, Class type) { 11 | return instance; 12 | } 13 | } 14 | 15 | private static final GridProvider instance = new GridProvider(); 16 | 17 | public static GridProvider getInstance() { 18 | return instance; 19 | } 20 | 21 | private GridProvider() {} 22 | 23 | @Override 24 | public HazelcastInstance createInstance(Config config) { 25 | return Hazelcast.newHazelcastInstance(config); 26 | } 27 | 28 | @Override 29 | public void shutdownAll() { 30 | Hazelcast.shutdownAll(); 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/main/java/com/obsidiandynamics/meteor/HazelcastProvider.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import com.hazelcast.config.*; 4 | import com.hazelcast.core.*; 5 | 6 | public interface HazelcastProvider { 7 | HazelcastInstance createInstance(Config config); 8 | 9 | void shutdownAll(); 10 | } 11 | -------------------------------------------------------------------------------- /src/main/java/com/obsidiandynamics/meteor/util/NamespaceEnum.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor.util; 2 | 3 | public interface NamespaceEnum { 4 | default String qualify(String objectName) { 5 | return toString().toLowerCase().replace('_', '.') + "::" + objectName; 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /src/main/java/com/obsidiandynamics/meteor/util/RetryableMap.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor.util; 2 | 3 | import java.util.*; 4 | 5 | import com.hazelcast.core.*; 6 | import com.obsidiandynamics.retry.*; 7 | 8 | public final class RetryableMap { 9 | private final Retry retry; 10 | 11 | private final IMap map; 12 | 13 | public RetryableMap(Retry retry, IMap map) { 14 | this.retry = retry; 15 | this.map = map; 16 | } 17 | 18 | public V putIfAbsent(K key, V value) { 19 | return retry.run(() -> map.putIfAbsent(key, value)); 20 | } 21 | 22 | public Set> entrySet() { 23 | return retry.run(() -> map.entrySet()); 24 | } 25 | 26 | public boolean replace(K key, V oldValue, V newValue) { 27 | return retry.run(() -> map.replace(key, oldValue, newValue)); 28 | } 29 | 30 | public boolean remove(Object key, Object value) { 31 | return retry.run(() -> map.remove(key, value)); 32 | } 33 | 34 | public V get(Object key) { 35 | return retry.run(() -> map.get(key)); 36 | } 37 | 38 | public V put(K key, V value) { 39 | return retry.run(() -> map.put(key, value)); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/main/java/com/obsidiandynamics/meteor/util/RetryableRingbuffer.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor.util; 2 | 3 | import java.util.*; 4 | 5 | import com.hazelcast.core.*; 6 | import com.hazelcast.ringbuffer.*; 7 | import com.obsidiandynamics.retry.*; 8 | 9 | public final class RetryableRingbuffer { 10 | private final Retry retry; 11 | 12 | private final Ringbuffer ringbuffer; 13 | 14 | public RetryableRingbuffer(Retry retry, Ringbuffer ringbuffer) { 15 | this.retry = retry; 16 | this.ringbuffer = ringbuffer; 17 | } 18 | 19 | public Ringbuffer getRingbuffer() { 20 | return ringbuffer; 21 | } 22 | 23 | public ICompletableFuture addAllAsync(Collection collection, OverflowPolicy overflowPolicy) { 24 | return retry.run(() -> ringbuffer.addAllAsync(collection, overflowPolicy)); 25 | } 26 | 27 | public long add(E item) { 28 | return retry.run(() -> ringbuffer.add(item)); 29 | } 30 | 31 | public ICompletableFuture> readManyAsync(long startSequence, int minCount, int maxCount, IFunction filter) { 32 | return retry.run(() -> ringbuffer.readManyAsync(startSequence, minCount, maxCount, filter)); 33 | } 34 | 35 | public long tailSequence() { 36 | return retry.run(ringbuffer::tailSequence); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/test/java/com/obsidiandynamics/meteor/GridProviderTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import java.io.*; 6 | 7 | import org.junit.*; 8 | 9 | import com.hazelcast.config.*; 10 | import com.hazelcast.core.*; 11 | import com.obsidiandynamics.yconf.*; 12 | 13 | public final class GridProviderTest { 14 | private HazelcastProvider provider; 15 | 16 | @Before 17 | public void before() { 18 | provider = GridProvider.getInstance(); 19 | } 20 | 21 | @After 22 | public void after() { 23 | if (provider != null) provider.shutdownAll(); 24 | } 25 | 26 | @Test 27 | public void testCreate() { 28 | final Config config = new Config() 29 | .setProperty("hazelcast.shutdownhook.enabled", "false") 30 | .setProperty("hazelcast.logging.type", "none"); 31 | 32 | final MulticastConfig multicastConfig = new MulticastConfig() 33 | .setEnabled(false); 34 | 35 | final TcpIpConfig tcpIpConfig = new TcpIpConfig() 36 | .setEnabled(false); 37 | 38 | config.setNetworkConfig(new NetworkConfig().setJoin(new JoinConfig() 39 | .setMulticastConfig(multicastConfig) 40 | .setTcpIpConfig(tcpIpConfig))); 41 | final HazelcastInstance instance = GridProvider.getInstance().createInstance(config); 42 | assertNotNull(instance); 43 | } 44 | 45 | @Test 46 | public void testConfig() throws IOException { 47 | assertNotNull(new MappingContext().withParser(__reader -> new Object()).fromString("").map(GridProvider.class)); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/test/java/com/obsidiandynamics/meteor/HazelcastSample.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.util.*; 4 | 5 | import com.hazelcast.config.*; 6 | import com.hazelcast.core.*; 7 | 8 | public final class HazelcastSample { 9 | public static void main(String[] args) { 10 | final Config config = new Config() 11 | .setProperty("hazelcast.logging.type", "slf4j") 12 | .setProperty("hazelcast.max.no.heartbeat.seconds", String.valueOf(10)); 13 | 14 | final HazelcastProvider provider = GridProvider.getInstance(); 15 | final HazelcastInstance h0 = provider.createInstance(config); 16 | final HazelcastInstance h1 = provider.createInstance(config); 17 | useInstance(h0); 18 | useInstance(h1); 19 | h0.shutdown(); 20 | h1.shutdown(); 21 | } 22 | 23 | private static void useInstance(HazelcastInstance instance) { 24 | final Map mapCustomers = instance.getMap("customers"); 25 | mapCustomers.put(1, "Alpha"); 26 | mapCustomers.put(2, "Bravo"); 27 | mapCustomers.put(3, "Charlie"); 28 | 29 | System.out.println("Customer with key 1: " + mapCustomers.get(1)); 30 | System.out.println("Map size: " + mapCustomers.size()); 31 | 32 | final Queue queueCustomers = instance.getQueue("customers"); 33 | queueCustomers.offer("Tom"); 34 | queueCustomers.offer("Mary"); 35 | queueCustomers.offer("Jane"); 36 | System.out.println("First customer: " + queueCustomers.poll()); 37 | System.out.println("Second customer: " + queueCustomers.peek()); 38 | System.out.println("Queue size: " + queueCustomers.size()); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/test/java/com/obsidiandynamics/meteor/HeapRingbufferStore.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.io.*; 4 | import java.util.*; 5 | 6 | import com.hazelcast.core.*; 7 | import com.obsidiandynamics.func.*; 8 | 9 | public final class HeapRingbufferStore implements RingbufferStore { 10 | public static final class Factory implements RingbufferStoreFactory, Serializable { 11 | private static final long serialVersionUID = 1L; 12 | 13 | @Override 14 | public HeapRingbufferStore newRingbufferStore(String name, Properties properties) { 15 | return new HeapRingbufferStore(); 16 | } 17 | } 18 | 19 | private final List stored = new ArrayList<>(); 20 | 21 | private HeapRingbufferStore() {} 22 | 23 | @Override 24 | public void store(long sequence, Object data) { 25 | stored.add(Classes.cast(data)); 26 | } 27 | 28 | @Override 29 | public void storeAll(long firstItemSequence, Object[] items) { 30 | long sequence = firstItemSequence; 31 | for (Object item : items) { 32 | store(sequence++, item); 33 | } 34 | } 35 | 36 | @Override 37 | public byte[] load(long sequence) { 38 | return stored.get((int) sequence); 39 | } 40 | 41 | @Override 42 | public long getLargestSequence() { 43 | return stored.size() - 1; 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/test/java/com/obsidiandynamics/meteor/NopRingbufferStore.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import java.io.*; 4 | import java.util.*; 5 | 6 | import com.hazelcast.core.*; 7 | 8 | public final class NopRingbufferStore implements RingbufferStore { 9 | public static final class Factory implements RingbufferStoreFactory, Serializable { 10 | private static final long serialVersionUID = 1L; 11 | 12 | private static final Factory instance = new Factory(); 13 | 14 | public static Factory getInstance() { return instance; } 15 | 16 | @Override 17 | public NopRingbufferStore newRingbufferStore(String name, Properties properties) { 18 | return NopRingbufferStore.instance; 19 | } 20 | } 21 | 22 | private static final NopRingbufferStore instance = new NopRingbufferStore(); 23 | 24 | private NopRingbufferStore() {} 25 | 26 | @Override 27 | public void store(long sequence, Object data) {} 28 | 29 | @Override 30 | public void storeAll(long firstItemSequence, Object[] items) {} 31 | 32 | @Override 33 | public byte[] load(long sequence) { 34 | return null; 35 | } 36 | 37 | @Override 38 | public long getLargestSequence() { 39 | return -1; 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/test/java/com/obsidiandynamics/meteor/NopRingbufferStoreTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import java.util.*; 6 | 7 | import org.junit.*; 8 | 9 | public final class NopRingbufferStoreTest { 10 | @Test 11 | public void testMethods() { 12 | final NopRingbufferStore store = NopRingbufferStore.Factory.getInstance() 13 | .newRingbufferStore("store", new Properties()); 14 | store.store(0, null); 15 | store.storeAll(0, null); 16 | assertNull(store.load(0)); 17 | assertEquals(-1, store.getLargestSequence()); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/test/java/com/obsidiandynamics/meteor/util/Bandwidth.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor.util; 2 | 3 | /** 4 | * Helper for expressing bandwidth figures. 5 | */ 6 | public final class Bandwidth { 7 | public enum Unit { 8 | TBPS(1_000_000_000_000L, "Tbit/s"), 9 | GBPS(1_000_000_000, "Gbit/s"), 10 | MBPS(1_000_000, "Mbit/s"), 11 | KBPS(1_000, "kbit/s"), 12 | BPS(1, "bit/s"); 13 | 14 | private final long bits; 15 | private final String symbol; 16 | 17 | Unit(long bits, String symbol) { this.bits = bits; this.symbol = symbol; } 18 | 19 | public long getBits() { 20 | return bits; 21 | } 22 | 23 | public String getSymbol() { 24 | return symbol; 25 | } 26 | } 27 | 28 | /** 29 | * A tuple comprising the rate and the unit of measurement. 30 | */ 31 | public static final class RateAndUnit { 32 | private double rate; 33 | private Unit unit; 34 | 35 | public RateAndUnit(double rate, Unit unit) { 36 | this.rate = rate; 37 | this.unit = unit; 38 | } 39 | 40 | public double getRate() { 41 | return rate; 42 | } 43 | 44 | public void setRate(double rate) { 45 | this.rate = rate; 46 | } 47 | 48 | public Unit getUnit() { 49 | return unit; 50 | } 51 | 52 | public void setUnit(Unit unit) { 53 | this.unit = unit; 54 | } 55 | 56 | @Override 57 | public String toString() { 58 | return String.format("%,.1f %s", rate, unit.symbol); 59 | } 60 | } 61 | 62 | private Bandwidth() {} 63 | 64 | public static RateAndUnit translate(long bps) { 65 | final Unit unit; 66 | if (bps > Unit.TBPS.bits) { 67 | unit = Unit.TBPS; 68 | } else if (bps > Unit.GBPS.bits) { 69 | unit = Unit.GBPS; 70 | } else if (bps > Unit.MBPS.bits) { 71 | unit = Unit.MBPS; 72 | } else if (bps > Unit.KBPS.bits) { 73 | unit = Unit.KBPS; 74 | } else { 75 | unit = Unit.BPS; 76 | } 77 | return new RateAndUnit((double) bps / unit.bits, unit); 78 | } 79 | } -------------------------------------------------------------------------------- /src/test/java/com/obsidiandynamics/meteor/util/NamespaceEnumTest.java: -------------------------------------------------------------------------------- 1 | package com.obsidiandynamics.meteor.util; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import org.junit.*; 6 | 7 | public final class NamespaceEnumTest { 8 | private enum E implements NamespaceEnum { 9 | TEST_PACKAGE 10 | } 11 | 12 | @Test 13 | public void testQualify() { 14 | assertEquals("test.package::item", E.TEST_PACKAGE.qualify("item")); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootCategory=trace, CONSOLE, NULL 2 | 3 | log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender 4 | log4j.appender.CONSOLE.layout=org.apache.log4j.EnhancedPatternLayout 5 | log4j.appender.CONSOLE.layout.ConversionPattern=%d{HH:mm:ss}/%-5r %-5p [%t] %C{1}.%M():%L: %m%n 6 | log4j.appender.CONSOLE.threshold=trace 7 | 8 | log4j.appender.NULL=org.apache.log4j.varia.NullAppender 9 | log4j.appender.NULL.threshold=trace 10 | 11 | log4j.logger.io=info 12 | log4j.logger.org=info 13 | log4j.logger.com=info 14 | log4j.logger.com.obsidiandynamics.meteor=info -------------------------------------------------------------------------------- /src/test/resources/zlg.properties: -------------------------------------------------------------------------------- 1 | zlg.base.level=CONF --------------------------------------------------------------------------------