├── src ├── main │ └── java │ │ └── biz │ │ └── paluch │ │ └── spinach │ │ ├── package-info.java │ │ ├── api │ │ ├── package-info.java │ │ ├── rx │ │ │ ├── package-info.java │ │ │ ├── DisqueReactiveCommands.java │ │ │ ├── DisqueClusterReactiveCommands.java │ │ │ └── DisqueQueueReactiveCommands.java │ │ ├── async │ │ │ ├── package-info.java │ │ │ ├── DisqueAsyncCommands.java │ │ │ ├── DisqueClusterAsyncCommands.java │ │ │ └── DisqueQueueAsyncCommands.java │ │ ├── sync │ │ │ ├── package-info.java │ │ │ ├── DisqueCommands.java │ │ │ ├── DisqueClusterCommands.java │ │ │ └── DisqueQueueCommands.java │ │ ├── CommandKeyword.java │ │ ├── CommandType.java │ │ ├── Job.java │ │ ├── DisqueConnection.java │ │ ├── QScanArgs.java │ │ ├── PauseArgs.java │ │ └── JScanArgs.java │ │ ├── output │ │ ├── package-info.java │ │ ├── SupportsObservables.java │ │ ├── StringScanOutput.java │ │ ├── JobOutput.java │ │ ├── QstatMapOutput.java │ │ └── JobListOutput.java │ │ ├── cluster │ │ ├── package-info.java │ │ ├── GetJobsArgs.java │ │ ├── DisqueNode.java │ │ ├── NodeIdAwareSocketAddressSupplier.java │ │ └── ClusterNodesParser.java │ │ ├── impl │ │ ├── package-info.java │ │ ├── ConnectionAware.java │ │ ├── EventExecutorAware.java │ │ ├── SocketAddressSupplier.java │ │ ├── BaseCommandBuilder.java │ │ ├── DisqueCommand.java │ │ ├── RoundRobin.java │ │ ├── HelloClusterSocketAddressSupplier.java │ │ ├── SocketAddressSupplierFactory.java │ │ ├── RoundRobinSocketAddressSupplier.java │ │ ├── FutureSyncInvocationHandler.java │ │ ├── DisqueCommandArgs.java │ │ └── ClusterAwareNodeSupport.java │ │ └── SocketAddressResolver.java ├── test │ ├── java │ │ └── biz │ │ │ └── paluch │ │ │ └── spinach │ │ │ ├── impl │ │ │ ├── TestClusterAwareNodeSupport.java │ │ │ ├── ClusterAwareNodeSupportTest.java │ │ │ ├── AsyncCommandTest.java │ │ │ └── DisqueCommandTest.java │ │ │ ├── commands │ │ │ ├── rx │ │ │ │ ├── RxQueueCommandTest.java │ │ │ │ ├── RxClusterCommandTest.java │ │ │ │ ├── RxServerCommandTest.java │ │ │ │ ├── RxJobCommandTest.java │ │ │ │ └── RxSyncInvocationHandler.java │ │ │ ├── BasicCommandTest.java │ │ │ ├── AbstractCommandTest.java │ │ │ └── ClusterCommandTest.java │ │ │ ├── Example.java │ │ │ ├── Sockets.java │ │ │ ├── examples │ │ │ ├── Standalone.java │ │ │ └── PeriodicallyUpdatingSocketAddressSupplierFactory.java │ │ │ ├── support │ │ │ ├── FastShutdown.java │ │ │ ├── TestClientResources.java │ │ │ ├── DefaultDisqueClient.java │ │ │ └── TestEventLoopGroupProvider.java │ │ │ ├── ClusterConnectionTest.java │ │ │ ├── RoundRobinSocketAddressSupplierTest.java │ │ │ ├── UnixDomainSocketTest.java │ │ │ ├── TestSettings.java │ │ │ ├── cluster │ │ │ └── ClusterNodesParserTest.java │ │ │ ├── SslTest.java │ │ │ ├── SyncAsyncApiConvergenceTest.java │ │ │ └── ClientMetricsTest.java │ └── resources │ │ └── log4j.properties ├── assembly │ ├── src.xml │ └── bin.xml └── site │ ├── markdown │ ├── index.md.vm │ └── download.md.vm │ └── site.xml ├── .gitignore ├── .travis.yml ├── .github ├── PULL_REQUEST_TEMPLATE.md ├── ISSUE_TEMPLATE.md └── CONTRIBUTING.md ├── Makefile └── README.md /src/main/java/biz/paluch/spinach/package-info.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Disque client. 3 | */ 4 | package biz.paluch.spinach; -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/package-info.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Disque API. 3 | */ 4 | package biz.paluch.spinach.api; -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/output/package-info.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Command output processors. 3 | */ 4 | package biz.paluch.spinach.output; 5 | 6 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/rx/package-info.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Disque API for reactive executed commands. 3 | */ 4 | package biz.paluch.spinach.api.rx; -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/cluster/package-info.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Support for Disque Cluster. 3 | */ 4 | package biz.paluch.spinach.cluster; 5 | 6 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/async/package-info.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Disque API for asynchronous executed commands. 3 | */ 4 | package biz.paluch.spinach.api.async; -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/sync/package-info.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Disque API for synchronous executed commands. 3 | */ 4 | package biz.paluch.spinach.api.sync; -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/impl/package-info.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Disque implementation. API within this package can change at any time and without further notice. 3 | */ 4 | package biz.paluch.spinach.impl; -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | *.rdb 3 | *.aof 4 | nodes.conf 5 | 6 | - 7 | atlassian-ide-plugin.xml 8 | *.iml 9 | redis-git 10 | *.releaseBackup 11 | release.properties 12 | work/ 13 | .project 14 | .classpath 15 | .settings 16 | dependency-reduced-pom.xml 17 | .idea 18 | Vagrantfile 19 | .vagrant 20 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: java 2 | jdk: 3 | - oraclejdk8 4 | sudo: false 5 | before_install: 6 | - if [[ ! -f stunnel.tar.gz ]]; then wget -O stunnel.tar.gz ftp://ftp.stunnel.org/stunnel/archive/5.x/stunnel-5.29.tar.gz; fi 7 | - if [[ ! -f ./stunnel-5.29/configure ]]; then tar -xzf stunnel.tar.gz; fi 8 | - if [[ ! -f ./stunnel-5.29/src/stunnel ]]; then cd ./stunnel-5.29; ./configure; make; cd ..; fi 9 | - export PATH="$PATH:$(pwd)/stunnel-5.29/src" 10 | install: make prepare ssl-keys 11 | script: make test-coveralls 12 | cache: 13 | directories: 14 | - '$HOME/.m2/repository' 15 | - '$TRAVIS_BUILD_DIR/stunnel-5.29' 16 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 4 | Make sure that: 5 | 6 | - [ ] You have read the [contribution guidelines](https://github.com/mp911de/spinach/blob/master/.github/CONTRIBUTING.md). 7 | - [ ] You use the code formatters provided [here](https://github.com/mp911de/spinach/blob/master/formatting.xml) and have them applied to your changes. Don’t submit any formatting related changes. 8 | - [ ] You submit test cases (unit or integration tests) that back your changes. 9 | 10 | 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 8 | Make sure that: 9 | 10 | - [ ] You have read the [contribution guidelines](https://github.com/mp911de/spinach/blob/master/.github/CONTRIBUTING.md). 11 | - [ ] You specify the spinach version and environment so it's obvious which version is affected 12 | - [ ] You provide a reproducible test case (either descriptive of as JUnit test) if it's a bug or the expected behavior differs from the actual behavior. 13 | 14 | 17 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/impl/TestClusterAwareNodeSupport.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | /** 19 | * @author Mark Paluch 20 | */ 21 | public class TestClusterAwareNodeSupport extends ClusterAwareNodeSupport { 22 | } -------------------------------------------------------------------------------- /src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout, file 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.threshold=INFO 8 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 9 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} [%-5p] [%t] (%c{1}:%L) %m%n 10 | 11 | log4j.appender.file=org.apache.log4j.FileAppender 12 | log4j.appender.file.File=target/log.log 13 | log4j.appender.file.Append=false 14 | log4j.appender.file.layout=org.apache.log4j.PatternLayout 15 | log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} [%-5p] [%t] (%c{1}:%L) %m%n 16 | 17 | log4j.logger.com.lambdaworks=INFO 18 | log4j.logger.biz.paluch.spinach=INFO 19 | log4j.logger.io.netty=INFO 20 | log4j.logger.com.lambdaworks.redis.protocol.ConnectionWatchdog=INFO 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to spinach 2 | 3 | If you would like to contribute code you can do so through GitHub by forking the repository and sending a pull request. 4 | 5 | When submitting code, please make every effort to follow existing conventions and style in order to keep the code as readable as possible. 6 | Formatting settings are provided for Eclipse in https://github.com/mp911de/spinach/blob/master/formatting.xml 7 | 8 | ## Bugreports 9 | 10 | If you report a bug, please ensure to specify the following: 11 | 12 | * spinach version (e.g. 0.1) 13 | * Contextual information (what were you trying to do using spinach) 14 | * Simplest possible steps to reproduce 15 | * JUnit tests to reproduce are great but not obligatory 16 | 17 | ## License 18 | 19 | By contributing your code, you agree to license your contribution under the terms of [Apache License 2.0] (http://www.apache.org/licenses/LICENSE-2.0). 20 | 21 | All files are released with the Apache 2.0 license. 22 | -------------------------------------------------------------------------------- /src/assembly/src.xml: -------------------------------------------------------------------------------- 1 | 4 | 5 | src 6 | 7 | zip 8 | tar.gz 9 | 10 | ${project.version}-${project.version}-src 11 | 12 | 13 | src/main/java 14 | / 15 | true 16 | 17 | 18 | src/main/resources 19 | / 20 | true 21 | 22 | 23 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/commands/rx/RxQueueCommandTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.commands.rx; 17 | 18 | import biz.paluch.spinach.commands.QueueCommandTest; 19 | import org.junit.Before; 20 | 21 | /** 22 | * @author Mark Paluch 23 | */ 24 | public class RxQueueCommandTest extends QueueCommandTest { 25 | 26 | @Before 27 | public void openConnection() throws Exception { 28 | disque = RxSyncInvocationHandler.sync(client.connect()); 29 | disque.debugFlushall(); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/commands/rx/RxClusterCommandTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.commands.rx; 17 | 18 | import biz.paluch.spinach.commands.ClusterCommandTest; 19 | import org.junit.Before; 20 | 21 | import biz.paluch.spinach.commands.QueueCommandTest; 22 | 23 | /** 24 | * @author Mark Paluch 25 | */ 26 | public class RxClusterCommandTest extends ClusterCommandTest { 27 | 28 | @Before 29 | public void openConnection() throws Exception { 30 | disque = RxSyncInvocationHandler.sync(client.connect()); 31 | disque.debugFlushall(); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/impl/ConnectionAware.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import biz.paluch.spinach.api.DisqueConnection; 19 | 20 | /** 21 | * Interface to be implemented by {@link SocketAddressSupplier} that want to be aware of their connection. 22 | * 23 | * @author Mark Paluch 24 | */ 25 | public interface ConnectionAware { 26 | 27 | /** 28 | * Set the {@link DisqueConnection connection}. 29 | *

30 | * Invoked after activating and authenticating the connection. 31 | * 32 | * @param disqueConnection the connection 33 | * @param Key type 34 | * @param Value type 35 | */ 36 | void setConnection(DisqueConnection disqueConnection); 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/impl/EventExecutorAware.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import io.netty.util.concurrent.EventExecutor; 19 | 20 | import java.util.concurrent.ScheduledExecutorService; 21 | 22 | /** 23 | * Interface to be implemented by {@link SocketAddressSupplier} that want to be aware of the {@link ScheduledExecutorService}. 24 | * 25 | * @author Mark Paluch 26 | */ 27 | public interface EventExecutorAware { 28 | 29 | /** 30 | * Set the {@link ScheduledExecutorService event executor}. Invoked after activating and authenticating the connection. 31 | * 32 | * @param eventExecutor the eventExecutor 33 | */ 34 | void setEventExecutor(ScheduledExecutorService eventExecutor); 35 | } 36 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/output/SupportsObservables.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.output; 17 | 18 | import rx.Subscriber; 19 | 20 | /** 21 | * Facet interface that allows streaming support for {@link rx.Observable} result types. Implementors emit elements during 22 | * command parsing. 23 | * 24 | * @author Mark Paluch 25 | */ 26 | public interface SupportsObservables { 27 | 28 | /** 29 | * Provide a subscriber for a certain output so results can be streamed to the subscriber in the moment of reception instead 30 | * of waiting for the command to finish. 31 | * 32 | * @param subscriber the subscriber 33 | * @param Result type 34 | */ 35 | void setSubscriber(Subscriber subscriber); 36 | } 37 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/commands/rx/RxServerCommandTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.commands.rx; 17 | 18 | import biz.paluch.spinach.commands.ServerCommandTest; 19 | import org.junit.Before; 20 | import org.junit.Test; 21 | 22 | /** 23 | * @author Mark Paluch 24 | */ 25 | public class RxServerCommandTest extends ServerCommandTest { 26 | 27 | @Before 28 | public void openConnection() throws Exception { 29 | disque = RxSyncInvocationHandler.sync(client.connect()); 30 | disque.debugFlushall(); 31 | } 32 | 33 | // does not harm, because it's only executed when subscribing. 34 | @Test 35 | public void shutdown() throws Exception { 36 | disque.getConnection().reactive().shutdown(true); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/Example.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach; 17 | 18 | import biz.paluch.spinach.api.DisqueConnection; 19 | import biz.paluch.spinach.api.sync.DisqueCommands; 20 | 21 | /** 22 | * @author Mark Paluch 23 | */ 24 | public class Example { 25 | 26 | public static void main(String[] args) { 27 | String nodes = System.getenv("TYND_DISQUE_NODES"); 28 | String auth = System.getenv("TYND_DISQUE_AUTH"); 29 | DisqueClient disqueClient = new DisqueClient("disque://" + auth + "@" + nodes); 30 | 31 | DisqueCommands connection = disqueClient.connect().sync(); 32 | System.out.println(connection.ping()); 33 | connection.close(); 34 | disqueClient.shutdown(); 35 | 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/Sockets.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach; 17 | 18 | import java.io.IOException; 19 | import java.net.InetSocketAddress; 20 | import java.net.Socket; 21 | import java.util.concurrent.TimeUnit; 22 | 23 | /** 24 | * @author Mark Paluch 25 | */ 26 | public class Sockets { 27 | public static boolean isOpen(String host, int port) { 28 | Socket socket = new Socket(); 29 | try { 30 | socket.connect(new InetSocketAddress(host, port), (int) TimeUnit.MILLISECONDS.convert(1, TimeUnit.SECONDS)); 31 | socket.close(); 32 | return true; 33 | } catch (IOException e) { 34 | return false; 35 | } 36 | } 37 | 38 | private Sockets() { 39 | // unused 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/output/StringScanOutput.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.output; 17 | 18 | import java.nio.ByteBuffer; 19 | 20 | import com.lambdaworks.redis.KeyScanCursor; 21 | import com.lambdaworks.redis.codec.RedisCodec; 22 | import com.lambdaworks.redis.output.ScanOutput; 23 | 24 | /** 25 | * Output handler for string-based {@code SCAN} commands. 26 | * 27 | * @param Key type. 28 | * @param Value type. 29 | * @author Mark Paluch 30 | */ 31 | public class StringScanOutput extends ScanOutput> { 32 | 33 | public StringScanOutput(RedisCodec codec) { 34 | super(codec, new KeyScanCursor()); 35 | } 36 | 37 | @Override 38 | protected void setOutput(ByteBuffer bytes) { 39 | output.getKeys().add(bytes == null ? null : decodeAscii(bytes)); 40 | } 41 | 42 | } 43 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/CommandKeyword.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api; 17 | 18 | import com.lambdaworks.redis.protocol.LettuceCharsets; 19 | import com.lambdaworks.redis.protocol.ProtocolKeyword; 20 | 21 | /** 22 | * @author Mark Paluch 23 | */ 24 | public enum CommandKeyword implements ProtocolKeyword { 25 | 26 | ALL, ASYNC, BCAST, BLOCKING, BUSYLOOP, COUNT, DELAY, FLUSHALL, FORGET, FROM, GET, HARD, ID, IMPORTRATE, IN, LEAVING, 27 | 28 | MAXLEN, MEET, MINLEN, NODES, NOHANG, NONE, OUT, QUEUE, REPLICATE, REPLY, RESET, RESETSTAT, RETRY, 29 | 30 | REWRITE, SAVECONFIG, SET, SOFT, STATE, TIMEOUT, TTL, WITHCOUNTERS; 31 | 32 | public final byte[] bytes; 33 | 34 | private CommandKeyword() { 35 | bytes = name().getBytes(LettuceCharsets.ASCII); 36 | } 37 | 38 | @Override 39 | public byte[] getBytes() { 40 | return bytes; 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/impl/SocketAddressSupplier.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import java.net.SocketAddress; 19 | import java.util.function.Supplier; 20 | 21 | /** 22 | * Supplier API for {@link SocketAddress}. A {@code SocketAddressSupplier} is typically used to provide a {@link SocketAddress} 23 | * for connecting to Disque. The client requests a socket address from the supplier to establish initially a connection or to 24 | * reconnect. The supplier is required to supply an infinite number of elements. The sequence and ordering of elements are a 25 | * detail of the particular implementation. 26 | *

27 | * {@link SocketAddressSupplier} instances should not be shared between connections although this is possible. 28 | *

29 | * 30 | * @author Mark Paluch 31 | * @since 0.3 32 | */ 33 | public interface SocketAddressSupplier extends Supplier { 34 | 35 | } 36 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/examples/Standalone.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.examples; 17 | 18 | import java.util.concurrent.TimeUnit; 19 | 20 | import biz.paluch.spinach.DisqueClient; 21 | import biz.paluch.spinach.DisqueURI; 22 | import biz.paluch.spinach.api.Job; 23 | import biz.paluch.spinach.api.sync.DisqueCommands; 24 | 25 | /** 26 | * @author Mark Paluch 27 | */ 28 | public class Standalone { 29 | 30 | public static void main(String[] args) { 31 | DisqueClient disqueClient = DisqueClient.create(DisqueURI.create("disque://password@localhost:7711")); 32 | DisqueCommands sync = disqueClient.connect().sync(); 33 | 34 | sync.addjob("queue", "body", 1, TimeUnit.MINUTES); 35 | 36 | Job job = sync.getjob("queue"); 37 | 38 | sync.ackjob(job.getId()); 39 | 40 | sync.close(); 41 | disqueClient.shutdown(); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/impl/BaseCommandBuilder.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import com.lambdaworks.redis.codec.RedisCodec; 19 | import com.lambdaworks.redis.output.CommandOutput; 20 | import com.lambdaworks.redis.protocol.Command; 21 | import com.lambdaworks.redis.protocol.ProtocolKeyword; 22 | 23 | class BaseCommandBuilder { 24 | protected RedisCodec codec; 25 | 26 | public BaseCommandBuilder(RedisCodec codec) { 27 | this.codec = codec; 28 | } 29 | 30 | protected Command createCommand(ProtocolKeyword type, CommandOutput output) { 31 | return createCommand(type, output, null); 32 | } 33 | 34 | protected Command createCommand(ProtocolKeyword type, CommandOutput output, 35 | DisqueCommandArgs args) { 36 | return new DisqueCommand(type, output, args); 37 | } 38 | 39 | } -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/CommandType.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api; 17 | 18 | import com.lambdaworks.redis.protocol.LettuceCharsets; 19 | import com.lambdaworks.redis.protocol.ProtocolKeyword; 20 | 21 | /** 22 | * @author Mark Paluch 23 | */ 24 | public enum CommandType implements ProtocolKeyword { 25 | // Jobs 26 | ADDJOB, ACKJOB, DELJOB, FASTACK, GETJOB, JSCAN, SHOW, 27 | 28 | // Queues 29 | ENQUEUE, DEQUEUE, NACK, PAUSE, QLEN, QPEEK, QSCAN, QSTAT, WORKING, 30 | 31 | // AOF 32 | BGREWRITEAOF, 33 | 34 | // Server commands 35 | AUTH, CONFIG, CLUSTER, CLIENT, COMMAND, DEBUG, INFO, /* LATENCY, */HELLO, PING, QUIT, SHUTDOWN, SLOWLOG, TIME; 36 | 37 | public final byte[] bytes; 38 | 39 | CommandType() { 40 | bytes = name().getBytes(LettuceCharsets.ASCII); 41 | } 42 | 43 | @Override 44 | public byte[] getBytes() { 45 | return bytes; 46 | } 47 | 48 | } 49 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/support/FastShutdown.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.support; 17 | 18 | import java.util.concurrent.TimeUnit; 19 | 20 | import com.lambdaworks.redis.AbstractRedisClient; 21 | import com.lambdaworks.redis.resource.ClientResources; 22 | 23 | /** 24 | * @author Mark Paluch 25 | */ 26 | public class FastShutdown { 27 | 28 | /** 29 | * Shut down a {@link AbstractRedisClient} with a timeout of 10ms. 30 | * 31 | * @param redisClient 32 | */ 33 | public static void shutdown(AbstractRedisClient redisClient) { 34 | redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS); 35 | } 36 | 37 | /** 38 | * Shut down a {@link ClientResources} client with a timeout of 10ms. 39 | * 40 | * @param clientResources 41 | */ 42 | public static void shutdown(ClientResources clientResources) { 43 | clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/impl/ClusterAwareNodeSupportTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import biz.paluch.spinach.api.DisqueConnection; 19 | import biz.paluch.spinach.cluster.DisqueNode; 20 | import biz.paluch.spinach.commands.AbstractCommandTest; 21 | import org.junit.Test; 22 | 23 | import java.util.List; 24 | 25 | import static org.assertj.core.api.Assertions.assertThat; 26 | 27 | /** 28 | * @author Mark Paluch 29 | */ 30 | public class ClusterAwareNodeSupportTest extends AbstractCommandTest { 31 | 32 | private TestClusterAwareNodeSupport sut = new TestClusterAwareNodeSupport(); 33 | 34 | @Test 35 | public void testClusterView() throws Exception { 36 | 37 | sut.setConnection(disque.getConnection()); 38 | sut.reloadNodes(); 39 | 40 | List nodes = sut.getNodes(); 41 | assertThat(nodes.size()).isGreaterThan(1); 42 | assertThat(nodes.get(0).getPort()).isEqualTo(port); 43 | assertThat(nodes.get(0).getAddr()).isNotNull(); 44 | assertThat(nodes.get(0).getNodeId()).isNotNull(); 45 | } 46 | } -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/cluster/GetJobsArgs.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.cluster; 17 | 18 | import java.util.concurrent.TimeUnit; 19 | 20 | /** 21 | * @author Mark Paluch 22 | */ 23 | class GetJobsArgs { 24 | 25 | private final long timeout; 26 | private final TimeUnit timeUnit; 27 | private final long count; 28 | private final Q[] queues; 29 | 30 | public static GetJobsArgs create(long timeout, TimeUnit timeUnit, long count, Q[] queues) { 31 | return new GetJobsArgs(timeout, timeUnit, count, queues); 32 | } 33 | 34 | GetJobsArgs(long timeout, TimeUnit timeUnit, long count, Q[] queues) { 35 | this.timeout = timeout; 36 | this.timeUnit = timeUnit; 37 | this.count = count; 38 | this.queues = queues; 39 | } 40 | 41 | public long getTimeout() { 42 | return timeout; 43 | } 44 | 45 | public TimeUnit getTimeUnit() { 46 | return timeUnit; 47 | } 48 | 49 | public long getCount() { 50 | return count; 51 | } 52 | 53 | public Q[] getQueues() { 54 | return queues; 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/impl/DisqueCommand.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import java.util.concurrent.ExecutionException; 19 | import java.util.concurrent.TimeUnit; 20 | import java.util.concurrent.TimeoutException; 21 | 22 | import com.lambdaworks.redis.RedisCommandExecutionException; 23 | import com.lambdaworks.redis.RedisCommandInterruptedException; 24 | import com.lambdaworks.redis.output.CommandOutput; 25 | import com.lambdaworks.redis.protocol.Command; 26 | import com.lambdaworks.redis.protocol.ProtocolKeyword; 27 | 28 | /** 29 | * Command based on the original lettuce command but the command throws a {@link RedisCommandExecutionException} if Disque 30 | * reports an error while command execution. 31 | * 32 | * @author Mark Paluch 33 | */ 34 | class DisqueCommand extends Command { 35 | 36 | public DisqueCommand(ProtocolKeyword type, CommandOutput output, DisqueCommandArgs args) { 37 | super(type, output, args); 38 | } 39 | 40 | @Override 41 | public DisqueCommandArgs getArgs() { 42 | return (DisqueCommandArgs) super.getArgs(); 43 | } 44 | 45 | } 46 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/impl/AsyncCommandTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import static org.assertj.core.api.Assertions.*; 19 | 20 | import java.util.concurrent.ExecutionException; 21 | import java.util.concurrent.TimeUnit; 22 | 23 | import com.lambdaworks.redis.protocol.AsyncCommand; 24 | import org.junit.Test; 25 | 26 | import biz.paluch.spinach.commands.AbstractCommandTest; 27 | 28 | import com.lambdaworks.redis.RedisCommandExecutionException; 29 | import com.lambdaworks.redis.RedisFuture; 30 | 31 | /** 32 | * @author Mark Paluch 33 | * @since 24.06.15 09:04 34 | */ 35 | public class AsyncCommandTest extends AbstractCommandTest { 36 | 37 | @Test(expected = ExecutionException.class) 38 | public void asyncThrowsExecutionException() throws Exception { 39 | disque.getConnection().async().clientKill("do not exist").get(); 40 | } 41 | 42 | @Test 43 | public void testAsyncCommand() throws Exception { 44 | RedisFuture ping = disque.getConnection().async().ping(); 45 | assertThat(ping).isInstanceOf(AsyncCommand.class); 46 | assertThat(ping.isCancelled()).isFalse(); 47 | assertThat(ping.getError()).isNull(); 48 | assertThat(ping.get(1, TimeUnit.MINUTES)).isEqualTo("PONG"); 49 | 50 | } 51 | 52 | } 53 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/Job.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api; 17 | 18 | import java.util.Map; 19 | 20 | /** 21 | * Disque Job data structure. 22 | * 23 | * @author Mark Paluch 24 | * @param Queue-Id Type. 25 | * @param Body-Id Type. 26 | */ 27 | public class Job { 28 | private K queue; 29 | private String id; 30 | private V body; 31 | private Map counters; 32 | 33 | protected Job() { 34 | } 35 | 36 | public Job(K queue, String id, V body, Map counters) { 37 | this.queue = queue; 38 | this.id = id; 39 | this.body = body; 40 | this.counters = counters; 41 | } 42 | 43 | /** 44 | * 45 | * @return the queue 46 | */ 47 | public K getQueue() { 48 | return queue; 49 | } 50 | 51 | /** 52 | * 53 | * @return the JobId 54 | */ 55 | public String getId() { 56 | return id; 57 | } 58 | 59 | /** 60 | * 61 | * @return the Job body 62 | */ 63 | public V getBody() { 64 | return body; 65 | } 66 | 67 | /** 68 | * If requested with a WITHCOUNTERS flag, getjob also populates a counters field. 69 | * 70 | * @return map of counters 71 | */ 72 | public Map getCounters() { return counters; } 73 | } 74 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/support/TestClientResources.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.support; 17 | 18 | import java.util.concurrent.TimeUnit; 19 | 20 | import com.lambdaworks.redis.resource.ClientResources; 21 | import com.lambdaworks.redis.resource.DefaultClientResources; 22 | 23 | /** 24 | * Client-Resources suitable for testing. Uses {@link TestEventLoopGroupProvider} to preserve the event 25 | * loop groups between tests. Every time a new {@link TestClientResources} instance is created, shutdown hook is added 26 | * {@link Runtime#addShutdownHook(Thread)}. 27 | * 28 | * @author Mark Paluch 29 | */ 30 | public class TestClientResources { 31 | 32 | public static ClientResources create() { 33 | final DefaultClientResources resources = new DefaultClientResources.Builder().eventLoopGroupProvider( 34 | new TestEventLoopGroupProvider()).build(); 35 | 36 | Runtime.getRuntime().addShutdownHook(new Thread() { 37 | @Override 38 | public void run() { 39 | try { 40 | resources.shutdown(100, 100, TimeUnit.MILLISECONDS).get(10, TimeUnit.SECONDS); 41 | } catch (Exception e) { 42 | e.printStackTrace(); 43 | } 44 | } 45 | }); 46 | 47 | return resources; 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/SocketAddressResolver.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package biz.paluch.spinach; 18 | 19 | import java.net.InetAddress; 20 | import java.net.InetSocketAddress; 21 | import java.net.SocketAddress; 22 | import java.net.UnknownHostException; 23 | 24 | import com.lambdaworks.redis.ConnectionPoint; 25 | import com.lambdaworks.redis.resource.DnsResolver; 26 | 27 | /** 28 | * Resolves a {@link com.lambdaworks.redis.RedisURI} to a {@link java.net.SocketAddress}. 29 | * 30 | * @author Mark Paluch 31 | */ 32 | class SocketAddressResolver { 33 | 34 | /** 35 | * Resolves a {@link ConnectionPoint} to a {@link java.net.SocketAddress}. 36 | * 37 | * @param inetSocketAddress must not be {@literal null} 38 | * @param dnsResolver must not be {@literal null} 39 | * @return the resolved {@link SocketAddress} 40 | */ 41 | public static SocketAddress resolve(InetSocketAddress inetSocketAddress, DnsResolver dnsResolver) { 42 | 43 | try { 44 | InetAddress inetAddress = dnsResolver.resolve(inetSocketAddress.getHostString())[0]; 45 | return new InetSocketAddress(inetAddress, inetSocketAddress.getPort()); 46 | } catch (UnknownHostException e) { 47 | return new InetSocketAddress(inetSocketAddress.getHostString(), inetSocketAddress.getPort()); 48 | } 49 | 50 | } 51 | } -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/impl/RoundRobin.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import java.util.Collection; 19 | 20 | /** 21 | * Circular element provider. This class allows infinite scrolling over a collection with the possibility to provide an initial 22 | * offset. 23 | * 24 | * @author Mark Paluch 25 | */ 26 | public class RoundRobin { 27 | 28 | protected final Collection collection; 29 | protected V offset; 30 | 31 | public RoundRobin(Collection collection) { 32 | this(collection, null); 33 | } 34 | 35 | public RoundRobin(Collection collection, V offset) { 36 | this.collection = collection; 37 | this.offset = offset; 38 | } 39 | 40 | /** 41 | * Returns the next item. 42 | * 43 | * @return the next item 44 | */ 45 | public V next() { 46 | if (offset != null) { 47 | boolean accept = false; 48 | for (V element : collection) { 49 | if (element == offset) { 50 | accept = true; 51 | continue; 52 | } 53 | 54 | if (accept) { 55 | return offset = element; 56 | } 57 | } 58 | } 59 | 60 | return offset = collection.iterator().next(); 61 | } 62 | 63 | } 64 | -------------------------------------------------------------------------------- /src/site/markdown/index.md.vm: -------------------------------------------------------------------------------- 1 | spinach - A scalable Java Disque client 2 | ============= 3 | 4 | Spinach is a scalable thread-safe Disque client providing both synchronous and 5 | asynchronous APIs. Multiple threads may share one connection if they do not use blocking commands. Spinach is based on 6 | [lettuce](https://github.com/mp911de/lettuce). 7 | Multiple connections are efficiently managed by the excellent netty NIO 8 | framework. 9 | 10 | * Works with Java 6, 7 and 8 11 | * [synchronous](https://github.com/mp911de/spinach/wiki/Basic-usage), [asynchronous](https://github.com/mp911de/spinach/wiki/Asynchronous-API) and [reactive](https://github.com/mp911de/spinach/wiki/Reactive-API) APIs 12 | * [SSL](https://github.com/mp911de/spinach/wiki/SSL-Connections) and [Unix Domain Socket](https://github.com/mp911de/spinach/wiki/Unix-Domain-Sockets) connections 13 | * [Codecs](https://github.com/mp911de/lettuce/wiki/Codecs) (for UTF8/bit/JSON etc. representation of your data) 14 | 15 | See the [Wiki](https://github.com/mp911de/spinach/wiki) for more docs. 16 | 17 | 18 | Communication 19 | --------------- 20 | 21 | * [Github Issues](https://github.com/mp911de/spinach/issues) 22 | 23 | 24 | Documentation 25 | --------------- 26 | 27 | * [Wiki](https://github.com/mp911de/spinach/wiki) 28 | * [Javadoc](http://spinach.paluch.biz/apidocs/) 29 | 30 | Binaries/Download 31 | ---------------- 32 | 33 | Binaries and dependency information for Maven, Ivy, Gradle and others can be found at http://search.maven.org. 34 | 35 | Releases of spinach are available in the maven central repository. Take also a look at the [Download](https://github.com/mp911de/spinach/wiki/Download) page in the [Wiki](https://github.com/mp911de/lettuce/wiki). 36 | 37 | Example for Maven: 38 | 39 | ```xml 40 | 41 | biz.paluch.redis 42 | spinach 43 | ${spinach-release-version} 44 | 45 | ``` 46 | 47 | All versions: [Maven Central](http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22biz.paluch.redis%22%20AND%20a%3A%spinach%22) 48 | 49 | Snapshots: [Sonatype OSS Repository](https://oss.sonatype.org/#nexus-search;gav~biz.paluch.redis~spinach~~~) 50 | 51 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/rx/DisqueReactiveCommands.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api.rx; 17 | 18 | import rx.Observable; 19 | import biz.paluch.spinach.api.DisqueConnection; 20 | 21 | /** 22 | * Reactive commands for Disque. This API is thread-safe. 23 | * 24 | * @param Key type. 25 | * @param Value type. 26 | * @author Mark Paluch 27 | */ 28 | public interface DisqueReactiveCommands extends DisqueJobReactiveCommands, DisqueQueueReactiveCommands, 29 | DisqueServerReactiveCommands, DisqueClusterReactiveCommands { 30 | 31 | /** 32 | * Authenticate to the server. 33 | * 34 | * @param password the password 35 | * @return String simple-string-reply 36 | */ 37 | Observable auth(String password); 38 | 39 | /** 40 | * Close the connection. The connection will become not usable anymore as soon as this method was called. 41 | */ 42 | void close(); 43 | 44 | /** 45 | * 46 | * @return the underlying connection. 47 | */ 48 | DisqueConnection getConnection(); 49 | 50 | /** 51 | * 52 | * @return true if the connection is open (connected and not closed). 53 | */ 54 | boolean isOpen(); 55 | 56 | /** 57 | * Ping the server. 58 | * 59 | * @return simple-string-reply 60 | */ 61 | Observable ping(); 62 | 63 | /** 64 | * Close the connection. 65 | * 66 | * @return String simple-string-reply always OK. 67 | */ 68 | Observable quit(); 69 | 70 | } 71 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/ClusterConnectionTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach; 17 | 18 | import static biz.paluch.spinach.TestSettings.host; 19 | import static biz.paluch.spinach.TestSettings.port; 20 | import static org.assertj.core.api.Assertions.assertThat; 21 | 22 | import org.junit.AfterClass; 23 | import org.junit.BeforeClass; 24 | import org.junit.Test; 25 | 26 | import biz.paluch.spinach.api.DisqueConnection; 27 | import biz.paluch.spinach.support.DefaultDisqueClient; 28 | import biz.paluch.spinach.support.FastShutdown; 29 | 30 | /** 31 | * @author Mark Paluch 32 | */ 33 | public class ClusterConnectionTest { 34 | 35 | private static DisqueClient disqueClient; 36 | 37 | @BeforeClass 38 | public static void beforeClass() { 39 | 40 | DisqueURI disqueURI = new DisqueURI.Builder().withDisque(host(), port()).withDisque(host(), port(1)).build(); 41 | disqueClient = DisqueClient.create(DefaultDisqueClient.getClientResources(), disqueURI); 42 | } 43 | 44 | @AfterClass 45 | public static void afterClass() { 46 | FastShutdown.shutdown(disqueClient); 47 | } 48 | 49 | @Test 50 | public void connect() throws Exception { 51 | DisqueConnection connection = disqueClient.connect(); 52 | 53 | assertThat(connection.sync().info()).contains("tcp_port:" + port()); 54 | connection.sync().quit(); 55 | assertThat(connection.sync().info()).contains("tcp_port:" + port(1)); 56 | 57 | assertThat(connection.isOpen()).isTrue(); 58 | 59 | connection.close(); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/async/DisqueAsyncCommands.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api.async; 17 | 18 | import biz.paluch.spinach.api.DisqueConnection; 19 | import com.lambdaworks.redis.RedisFuture; 20 | 21 | /** 22 | * Asynchronous executed commands for Disque. This API is thread-safe. 23 | * 24 | * @param Key type. 25 | * @param Value type. 26 | * @author Mark Paluch 27 | */ 28 | public interface DisqueAsyncCommands extends DisqueJobAsyncCommands, DisqueQueueAsyncCommands, 29 | DisqueServerAsyncCommands, DisqueClusterAsyncCommands { 30 | 31 | /** 32 | * Authenticate to the server. 33 | * 34 | * @param password the password 35 | * @return String simple-string-reply 36 | */ 37 | RedisFuture auth(String password); 38 | 39 | /** 40 | * Close the connection. The connection will become not usable anymore as soon as this method was called. 41 | */ 42 | void close(); 43 | 44 | /** 45 | * 46 | * @return the underlying connection. 47 | */ 48 | DisqueConnection getConnection(); 49 | 50 | /** 51 | * 52 | * @return true if the connection is open (connected and not closed). 53 | */ 54 | boolean isOpen(); 55 | 56 | /** 57 | * Ping the server. 58 | * 59 | * @return simple-string-reply 60 | */ 61 | RedisFuture ping(); 62 | 63 | /** 64 | * Close the connection. 65 | * 66 | * @return String simple-string-reply always OK. 67 | */ 68 | RedisFuture quit(); 69 | 70 | } 71 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.support; 17 | 18 | import java.util.concurrent.TimeUnit; 19 | 20 | import biz.paluch.spinach.DisqueClient; 21 | import biz.paluch.spinach.DisqueURI; 22 | import biz.paluch.spinach.TestSettings; 23 | 24 | import com.lambdaworks.redis.resource.ClientResources; 25 | 26 | /** 27 | * @author Mark Paluch 28 | */ 29 | public class DefaultDisqueClient { 30 | 31 | public final static DefaultDisqueClient instance = new DefaultDisqueClient(); 32 | 33 | private DisqueClient disqueClient; 34 | private ClientResources clientResources; 35 | 36 | public DefaultDisqueClient() { 37 | clientResources = TestClientResources.create(); 38 | disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port()) 39 | .build()); 40 | Runtime.getRuntime().addShutdownHook(new Thread() { 41 | @Override 42 | public void run() { 43 | FastShutdown.shutdown(disqueClient); 44 | } 45 | }); 46 | } 47 | 48 | /** 49 | * Do not close the client. 50 | * 51 | * @return the default disque client for the tests. 52 | */ 53 | public static DisqueClient get() { 54 | instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS); 55 | return instance.disqueClient; 56 | } 57 | 58 | /** 59 | * Do not close the client resources. 60 | * @return the default client resources for the tests. 61 | */ 62 | public static ClientResources getClientResources() { 63 | return instance.clientResources; 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/DisqueConnection.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api; 17 | 18 | import biz.paluch.spinach.api.async.DisqueAsyncCommands; 19 | import biz.paluch.spinach.api.rx.DisqueReactiveCommands; 20 | import biz.paluch.spinach.api.sync.DisqueCommands; 21 | 22 | import com.lambdaworks.redis.api.StatefulConnection; 23 | 24 | /** 25 | * A thread-safe connection to a redis server. Multiple threads may share one {@link DisqueConnection}. 26 | * 27 | * A {@link com.lambdaworks.redis.protocol.ConnectionWatchdog} monitors each connection and reconnects automatically until 28 | * {@link #close} is called. All pending commands will be (re)sent after successful reconnection. 29 | * 30 | * @param Key type. 31 | * @param Value type. 32 | * @author Mark Paluch 33 | */ 34 | public interface DisqueConnection extends StatefulConnection { 35 | 36 | /** 37 | * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection. 38 | * 39 | * @return the synchronous API for the underlying connection. 40 | */ 41 | DisqueCommands sync(); 42 | 43 | /** 44 | * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection. 45 | * 46 | * @return the asynchronous API for the underlying connection. 47 | */ 48 | DisqueAsyncCommands async(); 49 | 50 | /** 51 | * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection. 52 | * 53 | * @return the reactive API for the underlying connection. 54 | */ 55 | DisqueReactiveCommands reactive(); 56 | 57 | } 58 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/support/TestEventLoopGroupProvider.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.support; 17 | 18 | import java.util.concurrent.TimeUnit; 19 | 20 | import com.lambdaworks.redis.resource.DefaultEventLoopGroupProvider; 21 | 22 | import io.netty.util.concurrent.DefaultPromise; 23 | import io.netty.util.concurrent.EventExecutorGroup; 24 | import io.netty.util.concurrent.ImmediateEventExecutor; 25 | import io.netty.util.concurrent.Promise; 26 | 27 | /** 28 | * A {@link com.lambdaworks.redis.resource.EventLoopGroupProvider} suitable for testing. Preserves the event loop groups between 29 | * tests. Every time a new {@link TestEventLoopGroupProvider} instance is created, shutdown hook is added 30 | * {@link Runtime#addShutdownHook(Thread)}. 31 | * 32 | * @author Mark Paluch 33 | */ 34 | public class TestEventLoopGroupProvider extends DefaultEventLoopGroupProvider { 35 | 36 | public TestEventLoopGroupProvider() { 37 | super(10); 38 | Runtime.getRuntime().addShutdownHook(new Thread() { 39 | @Override 40 | public void run() { 41 | try { 42 | TestEventLoopGroupProvider.this.shutdown(100, 100, TimeUnit.MILLISECONDS).get(10, TimeUnit.SECONDS); 43 | } catch (Exception e) { 44 | e.printStackTrace(); 45 | } 46 | } 47 | }); 48 | } 49 | 50 | @Override 51 | public Promise release(EventExecutorGroup eventLoopGroup, long quietPeriod, long timeout, TimeUnit unit) { 52 | DefaultPromise result = new DefaultPromise(ImmediateEventExecutor.INSTANCE); 53 | result.setSuccess(true); 54 | 55 | return result; 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api.sync; 17 | 18 | import java.util.concurrent.TimeUnit; 19 | 20 | import biz.paluch.spinach.api.DisqueConnection; 21 | 22 | /** 23 | * 24 | * Synchronous executed commands for Disque. This API is thread-safe. 25 | * 26 | * @param Key type. 27 | * @param Value type. 28 | * @author Mark Paluch 29 | */ 30 | public interface DisqueCommands extends DisqueJobCommands, DisqueQueueCommands, DisqueServerCommands, 31 | DisqueClusterCommands { 32 | 33 | /** 34 | * Authenticate to the server. 35 | * 36 | * @param password the password 37 | * @return String simple-string-reply 38 | */ 39 | String auth(String password); 40 | 41 | /** 42 | * Close the connection. The connection will become not usable anymore as soon as this method was called. 43 | */ 44 | void close(); 45 | 46 | /** 47 | * 48 | * @return the underlying connection. 49 | */ 50 | DisqueConnection getConnection(); 51 | 52 | /** 53 | * 54 | * @return true if the connection is open (connected and not closed). 55 | */ 56 | boolean isOpen(); 57 | 58 | /** 59 | * Ping the server. 60 | * 61 | * @return simple-string-reply 62 | */ 63 | String ping(); 64 | 65 | /** 66 | * Close the connection. 67 | * 68 | * @return String simple-string-reply always OK. 69 | */ 70 | String quit(); 71 | 72 | /** 73 | * Set the default timeout for operations. 74 | * 75 | * @param timeout the timeout value 76 | * @param unit the unit of the timeout value 77 | */ 78 | void setTimeout(long timeout, TimeUnit unit); 79 | } 80 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/output/JobOutput.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.output; 17 | 18 | import java.nio.ByteBuffer; 19 | import java.util.HashMap; 20 | import java.util.Map; 21 | 22 | import biz.paluch.spinach.api.Job; 23 | 24 | import com.lambdaworks.redis.codec.RedisCodec; 25 | import com.lambdaworks.redis.output.CommandOutput; 26 | 27 | /** 28 | * Output handler for commands returning {@link Job} data structres. 29 | * 30 | * @author Mark Paluch 31 | */ 32 | public class JobOutput extends CommandOutput> { 33 | 34 | private K queue; 35 | private String id; 36 | private V body; 37 | private Map counters = new HashMap(); 38 | private String lastKey; 39 | 40 | public JobOutput(RedisCodec codec) { 41 | super(codec, null); 42 | } 43 | 44 | @Override 45 | public void set(ByteBuffer bytes) { 46 | 47 | if (queue == null) { 48 | queue = codec.decodeKey(bytes); 49 | return; 50 | } 51 | 52 | if (id == null) { 53 | id = decodeAscii(bytes); 54 | return; 55 | } 56 | 57 | if (body == null) { 58 | counters = new HashMap(); 59 | body = codec.decodeValue(bytes); 60 | return; 61 | } 62 | 63 | lastKey = decodeAscii(bytes); 64 | } 65 | 66 | @Override 67 | public void set(long integer) { 68 | if (lastKey != null) { 69 | counters.put(lastKey, integer); 70 | lastKey = null; 71 | } 72 | } 73 | 74 | @Override 75 | public void complete(int depth) { 76 | if (queue != null && id != null && body != null && counters != null) { 77 | output = new Job(queue, id, body, counters); 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/commands/rx/RxJobCommandTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.commands.rx; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | 20 | import java.util.concurrent.TimeUnit; 21 | 22 | import org.junit.Before; 23 | import org.junit.Test; 24 | 25 | import rx.Observable; 26 | import rx.functions.Func1; 27 | import biz.paluch.spinach.api.Job; 28 | import biz.paluch.spinach.api.rx.DisqueReactiveCommands; 29 | import biz.paluch.spinach.commands.JobCommandTest; 30 | 31 | /** 32 | * @author Mark Paluch 33 | */ 34 | public class RxJobCommandTest extends JobCommandTest { 35 | 36 | protected DisqueReactiveCommands rx; 37 | 38 | @Before 39 | public void openConnection() throws Exception { 40 | disque = RxSyncInvocationHandler.sync(client.connect()); 41 | disque.debugFlushall(); 42 | rx = disque.getConnection().reactive(); 43 | } 44 | 45 | @Test 46 | public void addJob() throws Exception { 47 | 48 | String result = rx.addjob(queue, value, 5, TimeUnit.SECONDS).toBlocking().first(); 49 | assertThat(result).startsWith("D-"); 50 | } 51 | 52 | @Test 53 | public void rxChaining() throws Exception { 54 | 55 | addJob(); 56 | long qlen = rx.qlen(queue).toBlocking().first(); 57 | 58 | assertThat(qlen).isEqualTo(1); 59 | 60 | final DisqueReactiveCommands rx = client.connect().reactive(); 61 | rx.getjob(queue).flatMap(new Func1, Observable>() { 62 | @Override 63 | public Observable call(Job job) { 64 | return rx.ackjob(job.getId()); 65 | } 66 | }).subscribe(); 67 | 68 | assertThat(rx.qlen(queue).toBlocking().first()).isEqualTo(0); 69 | 70 | } 71 | 72 | } 73 | -------------------------------------------------------------------------------- /src/assembly/bin.xml: -------------------------------------------------------------------------------- 1 | 4 | bin 5 | 6 | tar.gz 7 | zip 8 | dir 9 | 10 | ${project.artifactId}-${project.version}-bin 11 | false 12 | 13 | 14 | 15 | biz.paluch.redis:spinach:jar:${project.version} 16 | 17 | 18 | false 19 | true 20 | true 21 | 22 | 23 | 24 | io.netty:* 25 | 26 | provided 27 | dependencies 28 | false 29 | 30 | 31 | 32 | io.netty:* 33 | io.reactivex:* 34 | com.google.guava:* 35 | org.apache.commons:* 36 | biz.paluch.redis:lettuce:* 37 | 38 | dependencies 39 | false 40 | 41 | 42 | 43 | biz.paluch.redis:spinach:*:javadoc 44 | 45 | apidocs 46 | true 47 | false 48 | true 49 | 50 | 51 | 52 | 53 | 54 | 55 | LICENSE 56 | README.md 57 | RELEASE-NOTES.md 58 | 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /src/site/site.xml: -------------------------------------------------------------------------------- 1 | 16 | 19 | 20 | spinach - A scalable Java Disque client 21 | http://oss.paluch.biz/spinach-doc 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | true 43 | 44 | mp911de/spinach 45 | right 46 | black 47 | 48 | 49 | mp911de 50 | true 51 | true 52 | 53 | 54 | piwik.paluch.biz 55 | 8 56 | 57 | 58 | 59 | 60 | 61 | org.apache.maven.skins 62 | maven-fluido-skin 63 | 1.3.1 64 | 65 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/RoundRobinSocketAddressSupplierTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | 20 | import java.net.InetSocketAddress; 21 | import java.util.Arrays; 22 | import java.util.Collection; 23 | 24 | import org.junit.Test; 25 | 26 | import biz.paluch.spinach.impl.RoundRobinSocketAddressSupplier; 27 | 28 | /** 29 | * @author Mark Paluch 30 | */ 31 | public class RoundRobinSocketAddressSupplierTest { 32 | 33 | private static DisqueURI.DisqueHostAndPort hap1 = new DisqueURI.DisqueHostAndPort("127.0.0.1", 1); 34 | private static DisqueURI.DisqueHostAndPort hap2 = new DisqueURI.DisqueHostAndPort("127.0.0.1", 2); 35 | private static DisqueURI.DisqueHostAndPort hap3 = new DisqueURI.DisqueHostAndPort("127.0.0.1", 3); 36 | 37 | private Collection points = Arrays.asList(hap1, hap2, hap3); 38 | 39 | @Test 40 | public void noOffset() throws Exception { 41 | 42 | RoundRobinSocketAddressSupplier sut = new RoundRobinSocketAddressSupplier(points, null); 43 | 44 | assertThat(sut.get()).isEqualTo(getSocketAddress(hap1)); 45 | assertThat(sut.get()).isEqualTo(getSocketAddress(hap2)); 46 | assertThat(sut.get()).isEqualTo(getSocketAddress(hap3)); 47 | assertThat(sut.get()).isEqualTo(getSocketAddress(hap1)); 48 | } 49 | 50 | @Test 51 | public void withOffset() throws Exception { 52 | 53 | RoundRobinSocketAddressSupplier sut = new RoundRobinSocketAddressSupplier(points, hap2); 54 | 55 | assertThat(sut.get()).isEqualTo(getSocketAddress(hap3)); 56 | assertThat(sut.get()).isEqualTo(getSocketAddress(hap1)); 57 | assertThat(sut.get()).isEqualTo(getSocketAddress(hap2)); 58 | assertThat(sut.get()).isEqualTo(getSocketAddress(hap3)); 59 | } 60 | 61 | private InetSocketAddress getSocketAddress(DisqueURI.DisqueHostAndPort hostAndPort) { 62 | return InetSocketAddress.createUnresolved(hostAndPort.getHost(), hostAndPort 63 | .getPort()); 64 | } 65 | } -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/impl/HelloClusterSocketAddressSupplier.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import java.net.InetSocketAddress; 19 | import java.net.SocketAddress; 20 | 21 | import biz.paluch.spinach.api.DisqueConnection; 22 | import biz.paluch.spinach.cluster.DisqueNode; 23 | 24 | /** 25 | * Supplier for {@link SocketAddress adresses} that is aware of the cluster nodes. 26 | *

27 | * This class performs a {@code HELLO} command handshake upon connection and retrieves the nodes from the command result. The 28 | * node set is not refreshed once it is retrieved. The nodes are used in the order of their priority in a round-robin fashion. 29 | * Until the handshake is completed a fallback {@link SocketAddressSupplier} is used. 30 | *

31 | * 32 | * @author Mark Paluch 33 | */ 34 | public class HelloClusterSocketAddressSupplier extends ClusterAwareNodeSupport implements SocketAddressSupplier, 35 | ConnectionAware { 36 | 37 | protected final SocketAddressSupplier bootstrap; 38 | protected RoundRobin roundRobin; 39 | 40 | /** 41 | * 42 | * @param bootstrap bootstrap/fallback {@link SocketAddressSupplier} for bootstrapping before any communication is done. 43 | */ 44 | public HelloClusterSocketAddressSupplier(SocketAddressSupplier bootstrap) { 45 | this.bootstrap = bootstrap; 46 | } 47 | 48 | @Override 49 | public SocketAddress get() { 50 | 51 | if (getNodes().isEmpty()) { 52 | return bootstrap.get(); 53 | } 54 | 55 | DisqueNode disqueNode = roundRobin.next(); 56 | return InetSocketAddress.createUnresolved(disqueNode.getAddr(), disqueNode.getPort()); 57 | } 58 | 59 | @Override 60 | public void setConnection(DisqueConnection disqueConnection) { 61 | super.setConnection(disqueConnection); 62 | reloadNodes(); 63 | } 64 | 65 | @Override 66 | public void reloadNodes() { 67 | super.reloadNodes(); 68 | roundRobin = new RoundRobin(getNodes()); 69 | } 70 | 71 | } 72 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/UnixDomainSocketTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | import static org.junit.Assume.assumeTrue; 20 | 21 | import java.util.Locale; 22 | 23 | import biz.paluch.spinach.support.DefaultDisqueClient; 24 | import org.junit.Test; 25 | 26 | import biz.paluch.spinach.api.sync.DisqueCommands; 27 | import biz.paluch.spinach.commands.AbstractCommandTest; 28 | import biz.paluch.spinach.support.FastShutdown; 29 | 30 | import com.lambdaworks.redis.resource.ClientResources; 31 | import io.netty.util.internal.SystemPropertyUtil; 32 | 33 | /** 34 | * @author Mark Paluch 35 | */ 36 | public class UnixDomainSocketTest extends AbstractCommandTest { 37 | 38 | private static ClientResources clientResources = DefaultDisqueClient.getClientResources(); 39 | 40 | @Test 41 | public void linux_x86_64_socket() throws Exception { 42 | 43 | linuxOnly(); 44 | 45 | DisqueClient disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disqueSocket(TestSettings.socket()) 46 | .build()); 47 | 48 | DisqueCommands connection = disqueClient.connect().sync(); 49 | 50 | connection.debugFlushall(); 51 | connection.ping(); 52 | 53 | FastShutdown.shutdown(disqueClient); 54 | } 55 | 56 | @Test 57 | public void differentSocketTypes() throws Exception { 58 | 59 | DisqueClient disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disqueSocket(TestSettings.socket()) 60 | .withDisque(TestSettings.host()).build()); 61 | 62 | try { 63 | disqueClient.connect(); 64 | } catch (Exception e) { 65 | assertThat(e).hasMessageContaining("You cannot mix unix"); 66 | } 67 | 68 | FastShutdown.shutdown(disqueClient); 69 | } 70 | 71 | private void linuxOnly() { 72 | String osName = SystemPropertyUtil.get("os.name").toLowerCase(Locale.UK).trim(); 73 | assumeTrue("Only supported on Linux, your os is " + osName, osName.startsWith("linux")); 74 | } 75 | 76 | } 77 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/output/QstatMapOutput.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.output; 17 | 18 | import java.nio.ByteBuffer; 19 | import java.util.*; 20 | 21 | import com.lambdaworks.redis.codec.RedisCodec; 22 | import com.lambdaworks.redis.output.CommandOutput; 23 | 24 | /** 25 | * @author Mark Paluch 26 | */ 27 | public class QstatMapOutput extends CommandOutput> { 28 | private final Deque> stack; 29 | private int depth; 30 | private String key; 31 | 32 | @SuppressWarnings("rawtypes") 33 | public QstatMapOutput(RedisCodec codec) { 34 | super(codec, new HashMap()); 35 | stack = new LinkedList>(); 36 | depth = 0; 37 | } 38 | 39 | @Override 40 | public void set(ByteBuffer bytes) { 41 | if (stack.isEmpty()) { 42 | if (key == null) { 43 | key = decodeAscii(bytes); 44 | return; 45 | } 46 | 47 | Object value = (bytes == null) ? null : key.equals("queue") ? codec.decodeKey(bytes) : decodeAscii(bytes); 48 | output.put(key, value); 49 | key = null; 50 | } else { 51 | stack.peek().add(bytes == null ? null : decodeAscii(bytes)); 52 | } 53 | } 54 | 55 | @Override 56 | public void set(long integer) { 57 | if (stack.isEmpty()) { 58 | if (key == null) { 59 | key = ""; 60 | return; 61 | } 62 | 63 | output.put(key, Long.valueOf(integer)); 64 | key = null; 65 | } else { 66 | stack.peek().add(integer); 67 | } 68 | } 69 | 70 | @Override 71 | public void complete(int depth) { 72 | if (depth < this.depth) { 73 | if (!stack.isEmpty()) { 74 | output.put(key, stack.pop()); 75 | key = null; 76 | } 77 | this.depth--; 78 | } 79 | } 80 | 81 | @Override 82 | public void multi(int count) { 83 | this.depth++; 84 | if (depth > 1) { 85 | List a = new ArrayList(count); 86 | stack.push(a); 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/impl/SocketAddressSupplierFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import com.lambdaworks.redis.internal.LettuceAssert; 19 | import com.lambdaworks.redis.internal.LettuceLists; 20 | 21 | import biz.paluch.spinach.DisqueURI; 22 | 23 | /** 24 | * Factory for a {@link SocketAddressSupplier}. This factory creates new instances of {@link SocketAddressSupplier} based on a 25 | * {@link DisqueURI} for every logical connection. 26 | * 27 | * @author Mark Paluch 28 | */ 29 | public interface SocketAddressSupplierFactory { 30 | 31 | /** 32 | * Creates a new {@link SocketAddressSupplier} from a {@link SocketAddressSupplier}. 33 | * 34 | * @param disqueURI the connection URI object, must not be {@literal null} 35 | * @return an new instance of {@link SocketAddressSupplier} 36 | */ 37 | SocketAddressSupplier newSupplier(DisqueURI disqueURI); 38 | 39 | enum Factories implements SocketAddressSupplierFactory { 40 | /** 41 | * Round-Robin address supplier that runs circularly over the provided connection points within the {@link DisqueURI}. 42 | */ 43 | ROUND_ROBIN { 44 | @Override 45 | public SocketAddressSupplier newSupplier(DisqueURI disqueURI) { 46 | LettuceAssert.notNull(disqueURI, "DisqueURI must not be null"); 47 | return new RoundRobinSocketAddressSupplier(LettuceLists.unmodifiableList(disqueURI.getConnectionPoints())); 48 | } 49 | }, 50 | 51 | /** 52 | * Cluster-topology-aware address supplier that obtains its initial connection point from the {@link DisqueURI} and then 53 | * looks up the cluster topology using the {@code HELLO} command. Connections are established using the cluster node IP 54 | * addresses. 55 | */ 56 | HELLO_CLUSTER { 57 | @Override 58 | public SocketAddressSupplier newSupplier(DisqueURI disqueURI) { 59 | LettuceAssert.notNull(disqueURI, "DisqueURI must not be null"); 60 | return new HelloClusterSocketAddressSupplier(ROUND_ROBIN.newSupplier(disqueURI)); 61 | } 62 | }; 63 | 64 | @Override 65 | public SocketAddressSupplier newSupplier(DisqueURI disqueURI) { 66 | return null; 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/impl/RoundRobinSocketAddressSupplier.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import java.net.InetSocketAddress; 19 | import java.net.SocketAddress; 20 | import java.util.Collection; 21 | 22 | import com.lambdaworks.redis.ConnectionPoint; 23 | import com.lambdaworks.redis.internal.LettuceAssert; 24 | 25 | import biz.paluch.spinach.DisqueURI; 26 | 27 | /** 28 | * Round-Robin socket address supplier. Connection points are iterated circular/infinitely. 29 | * 30 | * @author Mark Paluch 31 | */ 32 | public class RoundRobinSocketAddressSupplier implements SocketAddressSupplier { 33 | 34 | protected final Collection connectionPoint; 35 | protected RoundRobin roundRobin; 36 | 37 | /** 38 | * 39 | * @param connectionPoints the collection of {@link ConnectionPoint connection points}, must not be {@literal null}. 40 | */ 41 | public RoundRobinSocketAddressSupplier(Collection connectionPoints) { 42 | this(connectionPoints, null); 43 | } 44 | 45 | /** 46 | * 47 | * @param connectionPoints the collection of {@link ConnectionPoint connection points}, must not be {@literal null}. 48 | * @param offset {@link ConnectionPoint connection point} offset for starting the round robin cycle at that point, can be 49 | * {@literal null}. 50 | */ 51 | public RoundRobinSocketAddressSupplier(Collection connectionPoints, ConnectionPoint offset) { 52 | LettuceAssert.notNull(connectionPoints, "ConnectionPoints must not be null"); 53 | this.connectionPoint = connectionPoints; 54 | this.roundRobin = new RoundRobin(connectionPoints, offset); 55 | } 56 | 57 | @Override 58 | public SocketAddress get() { 59 | ConnectionPoint connectionPoint = roundRobin.next(); 60 | return getSocketAddress(connectionPoint); 61 | } 62 | 63 | protected static SocketAddress getSocketAddress(ConnectionPoint connectionPoint) { 64 | 65 | if (connectionPoint instanceof DisqueURI.DisqueSocket) { 66 | return ((DisqueURI.DisqueSocket) connectionPoint).getSocketAddress(); 67 | } 68 | return InetSocketAddress.createUnresolved(connectionPoint.getHost(), connectionPoint.getPort()); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/commands/BasicCommandTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.commands; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | import static org.assertj.core.api.Assertions.fail; 20 | 21 | import java.util.List; 22 | import java.util.concurrent.TimeUnit; 23 | 24 | import org.junit.Test; 25 | 26 | import biz.paluch.spinach.DisqueClient; 27 | import biz.paluch.spinach.DisqueURI; 28 | import biz.paluch.spinach.TestSettings; 29 | import biz.paluch.spinach.api.sync.DisqueCommands; 30 | 31 | import com.lambdaworks.redis.RedisException; 32 | 33 | /** 34 | * @author Mark Paluch 35 | */ 36 | public class BasicCommandTest extends AbstractCommandTest { 37 | 38 | @Test 39 | public void hello() throws Exception { 40 | 41 | List result = disque.hello(); 42 | // 1, nodeId, two nested nodes 43 | assertThat(result.size()).isGreaterThan(3); 44 | } 45 | 46 | @Test 47 | public void info() throws Exception { 48 | 49 | String result = disque.ping(); 50 | assertThat(result).isNotEmpty(); 51 | } 52 | 53 | @Test 54 | public void auth() throws Exception { 55 | new WithPasswordRequired() { 56 | @Override 57 | protected void run(DisqueClient client) throws Exception { 58 | DisqueCommands connection = client.connect().sync(); 59 | try { 60 | connection.ping(); 61 | fail("Server doesn't require authentication"); 62 | } catch (RedisException e) { 63 | assertThat(e.getMessage()).isEqualTo("NOAUTH Authentication required."); 64 | assertThat(connection.auth(passwd)).isEqualTo("OK"); 65 | assertThat(connection.ping()).isEqualTo("PONG"); 66 | } 67 | 68 | DisqueURI disqueURI = DisqueURI.create("disque://" + passwd + "@" + TestSettings.host() + ":" 69 | + TestSettings.port()); 70 | DisqueClient disqueClient = DisqueClient.create(disqueURI); 71 | DisqueCommands authConnection = disqueClient.connect().sync(); 72 | authConnection.ping(); 73 | authConnection.close(); 74 | disqueClient.shutdown(100, 100, TimeUnit.MILLISECONDS); 75 | } 76 | 77 | }; 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/TestSettings.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach; 17 | 18 | import java.net.InetAddress; 19 | import java.net.UnknownHostException; 20 | 21 | /** 22 | * This class provides settings used while testing. You can override these using system properties. 23 | * 24 | * @author Mark Paluch 25 | */ 26 | public class TestSettings { 27 | private TestSettings() { 28 | 29 | } 30 | 31 | /** 32 | * 33 | * @return hostname of your redis instance. Defaults to {@literal localhost}. Can be overriden with 34 | * {@code -Dhost=YourHostName} 35 | */ 36 | public static String host() { 37 | return System.getProperty("host", "localhost"); 38 | } 39 | 40 | /** 41 | * 42 | * @return resolved address of {@link #host()} 43 | * @throws IllegalStateException when hostname cannot be resolved 44 | */ 45 | public static String hostAddr() { 46 | try { 47 | return InetAddress.getByName(host()).getHostAddress(); 48 | } catch (UnknownHostException e) { 49 | throw new IllegalStateException(e); 50 | } 51 | } 52 | 53 | /** 54 | * 55 | * @return unix domain socket name of your redis instance. Defaults to {@literal work/socket-7711}. Can be overriden with 56 | * {@code -Ddomainsocket=YourSocket} 57 | */ 58 | public static String socket() { 59 | return System.getProperty("domainsocket", "work/disque-7711/socket"); 60 | } 61 | 62 | /** 63 | * 64 | * @return password of your redis instance. Defaults to {@literal passwd}. Can be overriden with 65 | * {@code -Dpassword=YourPassword} 66 | */ 67 | public static String password() { 68 | return System.getProperty("password", "passwd"); 69 | } 70 | 71 | /** 72 | * 73 | * @return port of your redis instance. Defaults to {@literal 7711}. Can be overriden with {@code -Dport=1234} 74 | */ 75 | public static int port() { 76 | return Integer.valueOf(System.getProperty("port", "7711")); 77 | } 78 | 79 | /** 80 | * 81 | * @return sslport of your redis instance. Defaults to {@literal 7443}. Can be overriden with {@code -Dsslport=1234} 82 | */ 83 | public static int sslPort() { 84 | return Integer.valueOf(System.getProperty("sslport", "7443")); 85 | } 86 | 87 | /** 88 | * 89 | * @param offset 90 | * @return {@link #port()} with added {@literal offset} 91 | */ 92 | public static int port(int offset) { 93 | return port() + offset; 94 | } 95 | 96 | } 97 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/examples/PeriodicallyUpdatingSocketAddressSupplierFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.examples; 17 | 18 | import java.util.Set; 19 | import java.util.concurrent.ScheduledExecutorService; 20 | import java.util.concurrent.ScheduledFuture; 21 | import java.util.concurrent.TimeUnit; 22 | 23 | import biz.paluch.spinach.DisqueURI; 24 | import biz.paluch.spinach.api.DisqueConnection; 25 | import biz.paluch.spinach.impl.HelloClusterSocketAddressSupplier; 26 | import biz.paluch.spinach.impl.RoundRobinSocketAddressSupplier; 27 | import biz.paluch.spinach.impl.SocketAddressSupplier; 28 | import biz.paluch.spinach.impl.SocketAddressSupplierFactory; 29 | import io.netty.util.internal.ConcurrentSet; 30 | 31 | /** 32 | * @author Mark Paluch 33 | */ 34 | public class PeriodicallyUpdatingSocketAddressSupplierFactory implements SocketAddressSupplierFactory { 35 | 36 | private final ScheduledExecutorService scheduledExecutorService; 37 | private final Set> futures = new ConcurrentSet<>(); 38 | 39 | public PeriodicallyUpdatingSocketAddressSupplierFactory(ScheduledExecutorService scheduledExecutorService) { 40 | this.scheduledExecutorService = scheduledExecutorService; 41 | } 42 | 43 | @Override 44 | public SocketAddressSupplier newSupplier(DisqueURI disqueURI) { 45 | 46 | RoundRobinSocketAddressSupplier bootstrap = new RoundRobinSocketAddressSupplier(disqueURI.getConnectionPoints()); 47 | 48 | HelloClusterSocketAddressSupplier helloCluster = new HelloClusterSocketAddressSupplier(bootstrap) { 49 | 50 | /** 51 | * This method is called only once when the connection is established. 52 | */ 53 | @Override 54 | public void setConnection(DisqueConnection disqueConnection) { 55 | 56 | Runnable command = new Runnable() { 57 | @Override 58 | public void run() { 59 | reloadNodes(); 60 | } 61 | }; 62 | 63 | ScheduledFuture scheduledFuture = scheduledExecutorService.scheduleAtFixedRate(command, 1, 1, 64 | TimeUnit.HOURS); 65 | 66 | futures.add(scheduledFuture); 67 | super.setConnection(disqueConnection); 68 | } 69 | 70 | }; 71 | 72 | return helloCluster; 73 | } 74 | 75 | /** 76 | * Shutdown the scheduling. 77 | */ 78 | public void shutdown() { 79 | for (ScheduledFuture future : futures) { 80 | future.cancel(false); 81 | } 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/impl/FutureSyncInvocationHandler.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import java.lang.reflect.InvocationTargetException; 19 | import java.lang.reflect.Method; 20 | 21 | import com.google.common.cache.CacheBuilder; 22 | import com.google.common.cache.CacheLoader; 23 | import com.google.common.cache.LoadingCache; 24 | import com.google.common.reflect.AbstractInvocationHandler; 25 | import com.lambdaworks.redis.LettuceFutures; 26 | import com.lambdaworks.redis.RedisFuture; 27 | import com.lambdaworks.redis.api.StatefulConnection; 28 | import com.lambdaworks.redis.api.StatefulRedisConnection; 29 | 30 | /** 31 | * Invocation-handler to synchronize API calls which use Futures as backend. This class leverages the need to implement a full 32 | * sync class which just delegates every request. 33 | * 34 | * @param Key type. 35 | * @param Value type. 36 | * @author Mark Paluch 37 | */ 38 | class FutureSyncInvocationHandler extends AbstractInvocationHandler { 39 | 40 | private final StatefulConnection connection; 41 | private final Object asyncApi; 42 | private LoadingCache methodCache; 43 | 44 | public FutureSyncInvocationHandler(StatefulConnection connection, Object asyncApi) { 45 | this.connection = connection; 46 | this.asyncApi = asyncApi; 47 | 48 | methodCache = CacheBuilder.newBuilder().build(new CacheLoader() { 49 | @Override 50 | public Method load(Method key) throws Exception { 51 | return asyncApi.getClass().getMethod(key.getName(), key.getParameterTypes()); 52 | } 53 | }); 54 | 55 | } 56 | 57 | @Override 58 | @SuppressWarnings("unchecked") 59 | protected Object handleInvocation(Object proxy, Method method, Object[] args) throws Throwable { 60 | 61 | try { 62 | 63 | Method targetMethod = methodCache.get(method); 64 | Object result = targetMethod.invoke(asyncApi, args); 65 | 66 | if (result instanceof RedisFuture) { 67 | RedisFuture command = (RedisFuture) result; 68 | if (!method.getName().equals("exec") && !method.getName().equals("multi")) { 69 | if (connection instanceof StatefulRedisConnection && ((StatefulRedisConnection) connection).isMulti()) { 70 | return null; 71 | } 72 | } 73 | 74 | LettuceFutures.awaitOrCancel(command, connection.getTimeout(), connection.getTimeoutUnit()); 75 | return command.get(); 76 | } 77 | return result; 78 | } catch (InvocationTargetException e) { 79 | throw e.getTargetException(); 80 | } 81 | 82 | } 83 | } -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/impl/DisqueCommandTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | 20 | import java.nio.ByteBuffer; 21 | import java.util.concurrent.ExecutionException; 22 | 23 | import com.lambdaworks.redis.codec.Utf8StringCodec; 24 | import com.lambdaworks.redis.protocol.AsyncCommand; 25 | import org.junit.After; 26 | import org.junit.Test; 27 | 28 | import biz.paluch.spinach.api.CommandKeyword; 29 | 30 | import com.lambdaworks.redis.output.StatusOutput; 31 | 32 | /** 33 | * @author Mark Paluch 34 | */ 35 | public class DisqueCommandTest { 36 | 37 | private Utf8StringCodec codec = new Utf8StringCodec(); 38 | private DisqueCommand command = new DisqueCommand(CommandKeyword.GET, 39 | new StatusOutput<>(codec), new DisqueCommandArgs<>(codec)); 40 | private AsyncCommand sut = new AsyncCommand<>(command); 41 | 42 | @After 43 | public void tearDown() throws Exception { 44 | // clear interrupted flag 45 | Thread.interrupted(); 46 | } 47 | 48 | @Test 49 | public void sucessfulGet() throws Exception { 50 | 51 | sut.getOutput().set(ByteBuffer.wrap("OK".getBytes())); 52 | sut.complete(); 53 | assertThat(sut.get()).isEqualTo("OK"); 54 | } 55 | 56 | @Test(expected = ExecutionException.class) 57 | public void exceptionOnGet() throws Exception { 58 | 59 | sut.completeExceptionally(new IllegalStateException()); 60 | sut.get(); 61 | } 62 | 63 | @Test(expected = ExecutionException.class) 64 | public void exceptionOnOutputError() throws Exception { 65 | 66 | sut.getOutput().setError("blubb"); 67 | sut.complete(); 68 | sut.get(); 69 | } 70 | 71 | @Test 72 | public void interrupt() throws Exception { 73 | 74 | sut.complete(); 75 | Thread.currentThread().interrupt(); 76 | sut.get(); 77 | } 78 | 79 | @Test(expected = InterruptedException.class) 80 | public void interruptGetWithTimeout() throws Exception { 81 | 82 | Thread.currentThread().interrupt(); 83 | sut.get(); 84 | } 85 | 86 | @Test(expected = ExecutionException.class) 87 | public void exceptionOnGetWithTimeout() throws Exception { 88 | 89 | sut.completeExceptionally(new IllegalStateException()); 90 | sut.get(); 91 | } 92 | 93 | @Test(expected = ExecutionException.class) 94 | public void exceptionOnGetWithTimeoutOutputError() throws Exception { 95 | 96 | sut.getOutput().setError("blubb"); 97 | sut.complete(); 98 | sut.get(); 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/impl/DisqueCommandArgs.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import java.util.Map; 19 | 20 | import com.lambdaworks.redis.codec.RedisCodec; 21 | import com.lambdaworks.redis.protocol.CommandArgs; 22 | import com.lambdaworks.redis.protocol.CommandKeyword; 23 | import com.lambdaworks.redis.protocol.CommandType; 24 | import com.lambdaworks.redis.protocol.ProtocolKeyword; 25 | 26 | /** 27 | * @author Mark Paluch 28 | * @since 24.06.15 08:15 29 | */ 30 | class DisqueCommandArgs extends CommandArgs { 31 | 32 | public DisqueCommandArgs(RedisCodec codec) { 33 | super(codec); 34 | } 35 | 36 | @Override 37 | public DisqueCommandArgs add(String s) { 38 | return (DisqueCommandArgs) super.add(s); 39 | } 40 | 41 | @Override 42 | public DisqueCommandArgs addKey(K key) { 43 | return (DisqueCommandArgs) super.addKey(key); 44 | } 45 | 46 | @Override 47 | public DisqueCommandArgs addKeys(K... keys) { 48 | return (DisqueCommandArgs) super.addKeys(keys); 49 | } 50 | 51 | @Override 52 | public DisqueCommandArgs addValue(V value) { 53 | return (DisqueCommandArgs) super.addValue(value); 54 | } 55 | 56 | @Override 57 | public DisqueCommandArgs add(long n) { 58 | return (DisqueCommandArgs) super.add(n); 59 | } 60 | 61 | @Override 62 | public DisqueCommandArgs add(byte[] value) { 63 | return (DisqueCommandArgs) super.add(value); 64 | } 65 | 66 | @Override 67 | public DisqueCommandArgs add(CommandKeyword keyword) { 68 | return (DisqueCommandArgs) super.add(keyword); 69 | } 70 | 71 | @Override 72 | public DisqueCommandArgs add(CommandType type) { 73 | return (DisqueCommandArgs) super.add(type); 74 | } 75 | 76 | @Override 77 | public DisqueCommandArgs add(ProtocolKeyword keyword) { 78 | return (DisqueCommandArgs) super.add(keyword); 79 | } 80 | 81 | @Override 82 | public DisqueCommandArgs addKeys(Iterable keys) { 83 | return (DisqueCommandArgs) super.addKeys(keys); 84 | } 85 | 86 | @Override 87 | public DisqueCommandArgs addValues(V... values) { 88 | return (DisqueCommandArgs) super.addValues(values); 89 | } 90 | 91 | @Override 92 | public DisqueCommandArgs add(Map map) { 93 | return (DisqueCommandArgs) super.add(map); 94 | } 95 | 96 | @Override 97 | public DisqueCommandArgs add(double n) { 98 | return (DisqueCommandArgs) super.add(n); 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/commands/rx/RxSyncInvocationHandler.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.commands.rx; 17 | 18 | import java.lang.reflect.InvocationTargetException; 19 | import java.lang.reflect.Method; 20 | import java.lang.reflect.Proxy; 21 | import java.util.Iterator; 22 | import java.util.LinkedHashSet; 23 | import java.util.List; 24 | import java.util.Set; 25 | 26 | import com.lambdaworks.redis.internal.AbstractInvocationHandler; 27 | import com.lambdaworks.redis.internal.LettuceLists; 28 | 29 | import biz.paluch.spinach.api.DisqueConnection; 30 | import biz.paluch.spinach.api.sync.DisqueCommands; 31 | import rx.Observable; 32 | 33 | public class RxSyncInvocationHandler extends AbstractInvocationHandler { 34 | 35 | private final DisqueConnection connection; 36 | private final Object rxApi; 37 | 38 | public RxSyncInvocationHandler(DisqueConnection connection, Object rxApi) { 39 | this.connection = connection; 40 | this.rxApi = rxApi; 41 | } 42 | 43 | /** 44 | * 45 | * @see AbstractInvocationHandler#handleInvocation(java.lang.Object, java.lang.reflect.Method, java.lang.Object[]) 46 | */ 47 | @Override 48 | @SuppressWarnings("unchecked") 49 | protected Object handleInvocation(Object proxy, Method method, Object[] args) throws Throwable { 50 | 51 | try { 52 | 53 | Method targetMethod = rxApi.getClass().getMethod(method.getName(), method.getParameterTypes()); 54 | 55 | Object result = targetMethod.invoke(rxApi, args); 56 | 57 | if (result == null || !(result instanceof Observable)) { 58 | return result; 59 | } 60 | Observable observable = (Observable) result; 61 | 62 | Iterable objects = observable.toBlocking().toIterable(); 63 | 64 | if (method.getReturnType().equals(List.class)) { 65 | return LettuceLists.newList(objects); 66 | } 67 | 68 | if (method.getReturnType().equals(Set.class)) { 69 | return new LinkedHashSet<>(LettuceLists.newList(objects)); 70 | } 71 | 72 | Iterator iterator = objects.iterator(); 73 | 74 | if (iterator.hasNext()) { 75 | return iterator.next(); 76 | } 77 | 78 | return null; 79 | 80 | } catch (InvocationTargetException e) { 81 | throw e.getTargetException(); 82 | } 83 | } 84 | 85 | public static DisqueCommands sync(DisqueConnection connection) { 86 | 87 | RxSyncInvocationHandler handler = new RxSyncInvocationHandler(connection, connection.reactive()); 88 | return (DisqueCommands) Proxy.newProxyInstance(handler.getClass().getClassLoader(), 89 | new Class[] { DisqueCommands.class }, handler); 90 | } 91 | } -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/output/JobListOutput.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.output; 17 | 18 | import java.nio.ByteBuffer; 19 | import java.util.ArrayList; 20 | import java.util.HashMap; 21 | import java.util.List; 22 | import java.util.Map; 23 | 24 | import com.lambdaworks.redis.output.CommandOutput; 25 | import rx.Subscriber; 26 | import biz.paluch.spinach.api.Job; 27 | 28 | import com.lambdaworks.redis.codec.RedisCodec; 29 | 30 | /** 31 | * Output handler for commands returning a {@link List} of {@link Job Jobs} data structres. 32 | * 33 | * @author Mark Paluch 34 | */ 35 | public class JobListOutput extends CommandOutput>> implements SupportsObservables { 36 | 37 | private K defaultQueue; 38 | private K queue; 39 | private String id; 40 | private V body; 41 | private Map counters = new HashMap(); 42 | private String lastKey; 43 | private Subscriber subscriber; 44 | 45 | public JobListOutput(RedisCodec codec) { 46 | super(codec, new ArrayList>()); 47 | } 48 | 49 | public JobListOutput(RedisCodec codec, K defaultQueue) { 50 | super(codec, new ArrayList>()); 51 | this.defaultQueue = defaultQueue; 52 | } 53 | 54 | @Override 55 | public void set(ByteBuffer bytes) { 56 | 57 | if (queue == null) { 58 | if (defaultQueue != null) { 59 | queue = defaultQueue; 60 | } else { 61 | queue = codec.decodeKey(bytes); 62 | return; 63 | } 64 | } 65 | 66 | if (id == null) { 67 | id = decodeAscii(bytes); 68 | return; 69 | } 70 | 71 | if (body == null) { 72 | counters = new HashMap(); 73 | body = codec.decodeValue(bytes); 74 | return; 75 | } 76 | 77 | lastKey = decodeAscii(bytes); 78 | } 79 | 80 | @Override 81 | public void set(long integer) { 82 | if (lastKey != null) { 83 | counters.put(lastKey, integer); 84 | lastKey = null; 85 | } 86 | } 87 | 88 | @Override 89 | public void complete(int depth) { 90 | 91 | if (id != null && body != null && depth == 1) { 92 | Job job = new Job(queue, id, body, counters); 93 | if (subscriber != null && !subscriber.isUnsubscribed()) { 94 | subscriber.onNext(job); 95 | } 96 | 97 | output.add(job); 98 | 99 | queue = null; 100 | id = null; 101 | body = null; 102 | } 103 | 104 | } 105 | 106 | @Override 107 | public void setSubscriber(Subscriber subscriber) { 108 | this.subscriber = (Subscriber) subscriber; 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /src/site/markdown/download.md.vm: -------------------------------------------------------------------------------- 1 | Download spinach 2 | ====================== 3 | 4 | spinach is distributed under the 5 | [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt). 6 | 7 | The checksum and signature are links to the originals on the main distribution server. 8 | 9 | | | Download | Checksum | Signature | 10 | | ----------------- |:-------------|:-------------|:-------------| 11 | | spinach (jar) | [spinach-${spinach-release-version}.jar](http://search.maven.org/remotecontent?filepath=biz/paluch/redis/spinach/${spinach-release-version}/spinach-${spinach-release-version}.jar)|[spinach-${spinach-release-version}.jar.md5](http://search.maven.org/remotecontent?filepath=biz/paluch/redis/spinach/${spinach-release-version}/spinach-${spinach-release-version}.jar.md5)|[spinach-${spinach-release-version}.jar.asc](http://search.maven.org/remotecontent?filepath=biz/paluch/redis/spinach/${spinach-release-version}/spinach-${spinach-release-version}.jar.asc)| 12 | | spinach binary (zip) | [spinach-${spinach-release-version}-bin.zip](https://github.com/mp911de/spinach/releases/download/${spinach-release-version}/spinach-${spinach-release-version}-bin.zip)|[spinach-${spinach-release-version}-bin.zip.md5](https://github.com/mp911de/spinach/releases/download/${spinach-release-version}/spinach-${spinach-release-version}-bin.zip.md5)|[spinach-${spinach-release-version}-bin.zip.asc](https://github.com/mp911de/spinach/releases/download/${spinach-release-version}/spinach-${spinach-release-version}-bin.zip.asc)| 13 | | spinach binary (tar.gz) | [spinach-${spinach-release-version}-bin.tar.gz](https://github.com/mp911de/spinach/releases/download/${spinach-release-version}/spinach-${spinach-release-version}-bin.tar.gz)|[spinach-${spinach-release-version}-bin.tar.gz.md5](https://github.com/mp911de/spinach/releases/download/${spinach-release-version}/spinach-${spinach-release-version}-bin.tar.gz.md5)|[spinach-${spinach-release-version}-bin.tar.gz.asc](https://github.com/mp911de/spinach/releases/download/${spinach-release-version}/spinach-${spinach-release-version}-bin.tar.gz.asc)| 14 | 15 | 16 | It is essential that you verify the integrity of the downloaded files using the PGP or MD5 signatures. 17 | 18 | The PGP signatures can be verified using PGP or GPG. First download the 19 | [KEYS](http://spinach.paluch.biz/KEYS) as well as the asc signature file for the relevant distribution. 20 | Make sure you get these files from Maven Central rather 21 | than from a mirror. Then verify the signatures using 22 | 23 | 24 | % gpg --import KEYS 25 | % gpg --verify spinach-${spinach-release-version}.jar.asc 26 | 27 | 28 | Alternatively, you can verify the MD5 signature on the files. A unix program called md5 or md5sum is included 29 | in many unix distributions. 30 | 31 | 32 | Previous Releases 33 | ----------------- 34 | 35 | All previous releases of spinach can be found in 36 | [Maven Central](http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22biz.paluch.redis%22%20AND%20a%3A%22spinach%22). 37 | 38 | 39 | Using spinach on your classpath 40 | ----------------- 41 | 42 | To use spinach in your application make sure that the jars are in the application's classpath. Add 43 | the dependencies listed below to your classpath. 44 | 45 | spinach-${spinach-release-version}.jar 46 | lettuce-${lettuce-version}.jar 47 | netty-buffer-${netty-version}.jar 48 | netty-codec-${netty-version}.Final.jar 49 | netty-common-${netty-version}.Final.jar 50 | netty-transport-${netty-version}.jar 51 | netty-transport-native-epoll-${netty-version}.jar 52 | netty-handler-${netty-version}.jar 53 | rxjava-${rxjava-version}.jar 54 | guava-17.0.jar 55 | commons-pool2-2.2.jar 56 | 57 | You can do this from the command line or a manifest file. 58 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/sync/DisqueClusterCommands.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api.sync; 17 | 18 | /** 19 | * Asynchronous executed commands related with Disque Cluster. 20 | * 21 | * @param Key type. 22 | * @param Value type. 23 | * @author Mark Paluch 24 | */ 25 | public interface DisqueClusterCommands { 26 | 27 | /** 28 | * Blacklist and remove the cluster node from the cluster. 29 | * 30 | * @param nodeId the node Id 31 | * @return String simple-string-reply 32 | */ 33 | String clusterForget(String nodeId); 34 | 35 | /** 36 | * Get information and statistics about the cluster viewed by the current node. 37 | * 38 | * @return String bulk-string-reply as a collection of text lines. 39 | */ 40 | String clusterInfo(); 41 | 42 | /** 43 | * Retrieve cluster leaving state. 44 | * 45 | * @return String simple-string-reply 46 | */ 47 | String clusterLeaving(); 48 | 49 | /** 50 | * Enable/disable cluster leaving state for a graceful cluster leave. 51 | * 52 | * @param state {@literal true} to set the leaving state, {@literal false} to un-set the leaving state 53 | * @return String simple-string-reply 54 | */ 55 | String clusterLeaving(boolean state); 56 | 57 | /** 58 | * Meet another cluster node to include the node into the cluster. The command starts the cluster handshake and returns with 59 | * {@literal OK} when the node was added to the cluster. 60 | * 61 | * @param ip IP address of the host 62 | * @param port port number. 63 | * @return String simple-string-reply 64 | */ 65 | String clusterMeet(String ip, int port); 66 | 67 | /** 68 | * Obtain the nodeId for the currently connected node. 69 | * 70 | * @return String simple-string-reply 71 | */ 72 | String clusterMyId(); 73 | 74 | /** 75 | * Obtain details about all cluster nodes. Can be parsed using {@link biz.paluch.spinach.cluster.ClusterNodesParser#parse} 76 | * 77 | * @return String bulk-string-reply as a collection of text lines 78 | */ 79 | String clusterNodes(); 80 | 81 | /** 82 | * Reset a node performing a soft or hard reset: 83 | *
    84 | *
  • All other nodes are forgotten
  • 85 | *
  • All the assigned / open slots are released
  • 86 | *
  • If the node is a slave, it turns into a master
  • 87 | *
  • Only for hard reset: a new Node ID is generated
  • 88 | *
  • Only for hard reset: currentEpoch and configEpoch are set to 0
  • 89 | *
  • The new configuration is saved and the cluster state updated
  • 90 | *
  • If the node was a slave, the whole data set is flushed away
  • 91 | *
92 | * 93 | * @param hard {@literal true} for hard reset. Generates a new nodeId and currentEpoch/configEpoch are set to 0 94 | * @return String simple-string-reply 95 | */ 96 | String clusterReset(boolean hard); 97 | 98 | /** 99 | * Save the cluster config. 100 | * 101 | * @return String simple-string-reply 102 | */ 103 | String clusterSaveconfig(); 104 | 105 | } 106 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/rx/DisqueClusterReactiveCommands.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api.rx; 17 | 18 | import rx.Observable; 19 | 20 | /** 21 | * Reactive commands related with Disque Cluster. 22 | * 23 | * @param Key type. 24 | * @param Value type. 25 | * @author Mark Paluch 26 | */ 27 | public interface DisqueClusterReactiveCommands { 28 | 29 | /** 30 | * Blacklist and remove the cluster node from the cluster. 31 | * 32 | * @param nodeId the node Id 33 | * @return String simple-string-reply 34 | */ 35 | Observable clusterForget(String nodeId); 36 | 37 | /** 38 | * Get information and statistics about the cluster viewed by the current node. 39 | * 40 | * @return String bulk-string-reply as a collection of text lines. 41 | */ 42 | Observable clusterInfo(); 43 | 44 | /** 45 | * Retrieve cluster leaving state. 46 | * 47 | * @return String simple-string-reply 48 | */ 49 | Observable clusterLeaving(); 50 | 51 | /** 52 | * Enable/disable cluster leaving state for a graceful cluster leave. 53 | * 54 | * @param state {@literal true} to set the leaving state, {@literal false} to un-set the leaving state 55 | * @return String simple-string-reply 56 | */ 57 | Observable clusterLeaving(boolean state); 58 | 59 | /** 60 | * Meet another cluster node to include the node into the cluster. The command starts the cluster handshake and returns with 61 | * {@literal OK} when the node was added to the cluster. 62 | * 63 | * @param ip IP address of the host 64 | * @param port port number. 65 | * @return String simple-string-reply 66 | */ 67 | Observable clusterMeet(String ip, int port); 68 | 69 | /** 70 | * Obtain the nodeId for the currently connected node. 71 | * 72 | * @return String simple-string-reply 73 | */ 74 | Observable clusterMyId(); 75 | 76 | /** 77 | * Obtain details about all cluster nodes. Can be parsed using {@link biz.paluch.spinach.cluster.ClusterNodesParser#parse} 78 | * 79 | * @return String bulk-string-reply as a collection of text lines 80 | */ 81 | Observable clusterNodes(); 82 | 83 | /** 84 | * Reset a node performing a soft or hard reset: 85 | *
    86 | *
  • All other nodes are forgotten
  • 87 | *
  • All the assigned / open slots are released
  • 88 | *
  • If the node is a slave, it turns into a master
  • 89 | *
  • Only for hard reset: a new Node ID is generated
  • 90 | *
  • Only for hard reset: currentEpoch and configEpoch are set to 0
  • 91 | *
  • The new configuration is saved and the cluster state updated
  • 92 | *
  • If the node was a slave, the whole data set is flushed away
  • 93 | *
94 | * 95 | * @param hard {@literal true} for hard reset. Generates a new nodeId and currentEpoch/configEpoch are set to 0 96 | * @return String simple-string-reply 97 | */ 98 | Observable clusterReset(boolean hard); 99 | 100 | /** 101 | * Save the cluster config. 102 | * 103 | * @return String simple-string-reply 104 | */ 105 | Observable clusterSaveconfig(); 106 | 107 | } 108 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/cluster/ClusterNodesParserTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.cluster; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | 20 | import java.util.Collection; 21 | import java.util.Collections; 22 | import java.util.HashSet; 23 | import java.util.Iterator; 24 | 25 | import org.junit.Test; 26 | 27 | /** 28 | * @author Mark Paluch 29 | */ 30 | public class ClusterNodesParserTest { 31 | 32 | @Test 33 | public void testParse3Nodes() throws Exception { 34 | 35 | Collection result = ClusterNodesParser 36 | .parse("c37ab8396be428403d4e55c0d317348be27ed973 127.0.0.1:7381 noflags -111 1401258245007 connected \n" 37 | + "3d005a179da7d8dc1adae6409d47b39c369e992b 127.0.0.1:7380 handshake 0 1401258245007 disconnected\n" 38 | + "4213a8dabb94f92eb6a860f4d0729e6a25d43e0c 127.0.0.1:7379 myself 0 1 connected\n" 39 | + "5f4a2236d00008fba7ac0dd24b95762b446767bd :0 noaddr 0 1 connected"); 40 | 41 | assertThat(result).hasSize(4); 42 | 43 | Iterator iterator = result.iterator(); 44 | 45 | DisqueNode p1 = iterator.next(); 46 | 47 | assertThat(p1.getNodeId()).isEqualTo("c37ab8396be428403d4e55c0d317348be27ed973"); 48 | assertThat(p1.getAddr()).isEqualTo("127.0.0.1"); 49 | assertThat(p1.getPort()).isEqualTo(7381); 50 | assertThat(p1.getFlags()).isEqualTo(Collections.singleton(DisqueNode.NodeFlag.NOFLAGS)); 51 | assertThat(p1.getPingSentTimestamp()).isEqualTo(-111); 52 | assertThat(p1.getPongReceivedTimestamp()).isEqualTo(1401258245007L); 53 | assertThat(p1.isConnected()).isTrue(); 54 | 55 | // skip 56 | iterator.next(); 57 | 58 | DisqueNode p3 = iterator.next(); 59 | 60 | assertThat(p3.toString()).contains(DisqueNode.class.getSimpleName()); 61 | 62 | } 63 | 64 | @Test 65 | public void testParse2Nodes() { 66 | 67 | Collection result = ClusterNodesParser 68 | .parse("f37e56400fdc4b097597b6998b273059ad6f3b47 127.0.0.1:7712 noflags 0 1441026672360 connected\n" 69 | + "2febf1de8bffc1450642b4353e174884cd40b717 127.0.0.1:7711 myself 0 0 connected"); 70 | 71 | assertThat(result).hasSize(2); 72 | 73 | } 74 | 75 | @Test 76 | public void testModel() throws Exception { 77 | DisqueNode node = new DisqueNode(); 78 | node.setConnected(true); 79 | node.setFlags(new HashSet<>()); 80 | node.setNodeId("abcd"); 81 | node.setPingSentTimestamp(2); 82 | node.setPongReceivedTimestamp(3); 83 | node.setAddr("127.0.0.1"); 84 | node.setPort(1); 85 | 86 | assertThat(node.toString()).contains(DisqueNode.class.getSimpleName()); 87 | 88 | DisqueNode similarNode = new DisqueNode(); 89 | similarNode.setNodeId("abcd"); 90 | assertThat(node).isEqualTo(similarNode); 91 | assertThat(node.hashCode()).isEqualTo(similarNode.hashCode()); 92 | 93 | } 94 | 95 | @Test 96 | public void testGetNodeIdPrefixFromJobId() throws Exception { 97 | String result = GetJobsAction.getNodeIdPrefix("D-ff010e8a-6FH7ewVysl5mZmXbW0/GRqvG-05a1A$"); 98 | assertThat(result).isEqualTo("ff010e8a"); 99 | } 100 | 101 | } 102 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/async/DisqueClusterAsyncCommands.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api.async; 17 | 18 | import com.lambdaworks.redis.RedisFuture; 19 | 20 | /** 21 | * Asynchronous executed commands related with Disque Cluster. 22 | * 23 | * @param Key type. 24 | * @param Value type. 25 | * @author Mark Paluch 26 | */ 27 | public interface DisqueClusterAsyncCommands { 28 | 29 | /** 30 | * Blacklist and remove the cluster node from the cluster. 31 | * 32 | * @param nodeId the node Id 33 | * @return String simple-string-reply 34 | */ 35 | RedisFuture clusterForget(String nodeId); 36 | 37 | /** 38 | * Get information and statistics about the cluster viewed by the current node. 39 | * 40 | * @return String bulk-string-reply as a collection of text lines. 41 | */ 42 | RedisFuture clusterInfo(); 43 | 44 | /** 45 | * Retrieve cluster leaving state. 46 | * 47 | * @return String simple-string-reply 48 | */ 49 | RedisFuture clusterLeaving(); 50 | 51 | /** 52 | * Enable/disable cluster leaving state for a graceful cluster leave. 53 | * 54 | * @param state {@literal true} to set the leaving state, {@literal false} to un-set the leaving state 55 | * @return String simple-string-reply 56 | */ 57 | RedisFuture clusterLeaving(boolean state); 58 | 59 | /** 60 | * Meet another cluster node to include the node into the cluster. The command starts the cluster handshake and returns with 61 | * {@literal OK} when the node was added to the cluster. 62 | * 63 | * @param ip IP address of the host 64 | * @param port port number. 65 | * @return String simple-string-reply 66 | */ 67 | RedisFuture clusterMeet(String ip, int port); 68 | 69 | /** 70 | * Obtain the nodeId for the currently connected node. 71 | * 72 | * @return String simple-string-reply 73 | */ 74 | RedisFuture clusterMyId(); 75 | 76 | /** 77 | * Obtain details about all cluster nodes. Can be parsed using {@link biz.paluch.spinach.cluster.ClusterNodesParser#parse} 78 | * 79 | * @return String bulk-string-reply as a collection of text lines 80 | */ 81 | RedisFuture clusterNodes(); 82 | 83 | /** 84 | * Reset a node performing a soft or hard reset: 85 | *
    86 | *
  • All other nodes are forgotten
  • 87 | *
  • All the assigned / open slots are released
  • 88 | *
  • If the node is a slave, it turns into a master
  • 89 | *
  • Only for hard reset: a new Node ID is generated
  • 90 | *
  • Only for hard reset: currentEpoch and configEpoch are set to 0
  • 91 | *
  • The new configuration is saved and the cluster state updated
  • 92 | *
  • If the node was a slave, the whole data set is flushed away
  • 93 | *
94 | * 95 | * @param hard {@literal true} for hard reset. Generates a new nodeId and currentEpoch/configEpoch are set to 0 96 | * @return String simple-string-reply 97 | */ 98 | RedisFuture clusterReset(boolean hard); 99 | 100 | /** 101 | * Save the cluster config. 102 | * 103 | * @return String simple-string-reply 104 | */ 105 | RedisFuture clusterSaveconfig(); 106 | 107 | } 108 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/SslTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach; 17 | 18 | import static biz.paluch.spinach.TestSettings.host; 19 | import static biz.paluch.spinach.TestSettings.sslPort; 20 | import static org.assertj.core.api.Assertions.assertThat; 21 | import static org.junit.Assume.assumeTrue; 22 | 23 | import java.io.File; 24 | 25 | import biz.paluch.spinach.support.DefaultDisqueClient; 26 | import org.junit.AfterClass; 27 | import org.junit.Before; 28 | import org.junit.Test; 29 | 30 | import biz.paluch.spinach.api.sync.DisqueCommands; 31 | import biz.paluch.spinach.support.FastShutdown; 32 | 33 | import com.lambdaworks.redis.ClientOptions; 34 | import com.lambdaworks.redis.JavaRuntime; 35 | import com.lambdaworks.redis.RedisConnectionException; 36 | import com.lambdaworks.redis.resource.ClientResources; 37 | 38 | /** 39 | * @author Mark Paluch 40 | */ 41 | public class SslTest { 42 | public static final String KEYSTORE = "work/keystore.jks"; 43 | public static ClientResources clientResources = DefaultDisqueClient.getClientResources(); 44 | public static DisqueClient disqueClient = DisqueClient.create(clientResources); 45 | 46 | @Before 47 | public void before() throws Exception { 48 | assumeTrue("Assume that stunnel runs on port 7443", Sockets.isOpen(host(), sslPort())); 49 | assertThat(new File(KEYSTORE)).exists(); 50 | System.setProperty("javax.net.ssl.trustStore", KEYSTORE); 51 | } 52 | 53 | @AfterClass 54 | public static void afterClass() { 55 | FastShutdown.shutdown(disqueClient); 56 | } 57 | 58 | @Test 59 | public void regularSsl() throws Exception { 60 | DisqueURI disqueUri = DisqueURI.Builder.disque(host(), sslPort()).withSsl(true).withVerifyPeer(false).build(); 61 | 62 | DisqueCommands connection = disqueClient.connect(disqueUri).sync(); 63 | 64 | assertThat(connection.ping()).isEqualTo("PONG"); 65 | 66 | connection.close(); 67 | } 68 | 69 | @Test 70 | public void pingBeforeActivate() throws Exception { 71 | DisqueURI disqueUri = DisqueURI.Builder.disque(host(), sslPort()).withSsl(true).withVerifyPeer(false).build(); 72 | disqueClient.setOptions(new ClientOptions.Builder().pingBeforeActivateConnection(true).build()); 73 | 74 | DisqueCommands connection = disqueClient.connect(disqueUri).sync(); 75 | 76 | assertThat(connection.ping()).isEqualTo("PONG"); 77 | 78 | connection.close(); 79 | } 80 | 81 | @Test 82 | public void regularSslWithReconnect() throws Exception { 83 | DisqueURI disqueUri = DisqueURI.Builder.disque(host(), sslPort()).withSsl(true).withVerifyPeer(false).build(); 84 | 85 | DisqueCommands connection = disqueClient.connect(disqueUri).sync(); 86 | assertThat(connection.ping()).isEqualTo("PONG"); 87 | connection.quit(); 88 | assertThat(connection.ping()).isEqualTo("PONG"); 89 | connection.close(); 90 | } 91 | 92 | @Test(expected = RedisConnectionException.class) 93 | public void sslWithVerificationWillFail() throws Exception { 94 | 95 | assumeTrue(JavaRuntime.AT_LEAST_JDK_8); 96 | DisqueURI disqueUri = DisqueURI.create("disques://" + host() + ":" + sslPort()); 97 | 98 | disqueClient.connect(disqueUri); 99 | 100 | } 101 | 102 | } -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PATH := ./work/disque-git/src:${PATH} 2 | ROOT_DIR := $(shell pwd) 3 | STUNNEL_BIN := $(shell which stunnel) 4 | BREW_BIN := $(shell which brew) 5 | YUM_BIN := $(shell which yum) 6 | APT_BIN := $(shell which apt-get) 7 | 8 | ####### 9 | # Disque 10 | ####### 11 | .PRECIOUS: work/disque-%/disque.conf 12 | 13 | work/disque-%/disque.conf: 14 | @mkdir -p $(@D) 15 | 16 | @echo port $* >> $@ 17 | @echo daemonize yes >> $@ 18 | @echo pidfile $(ROOT_DIR)/work/disque-$*/disque.pid >> $@ 19 | @echo logfile $(ROOT_DIR)/work/disque-$*/disque.log >> $@ 20 | @echo appendonly no >> $@ 21 | @echo unixsocket $(ROOT_DIR)/work/disque-$*/socket >> $@ 22 | @echo unixsocketperm 777 >> $@ 23 | 24 | work/disque-%/disque.pid: work/disque-%/disque.conf work/disque-git/src/disque-server 25 | cd work/disque-$* && ../../work/disque-git/src/disque-server disque.conf 26 | 27 | disque-start: work/disque-7711/disque.pid work/disque-7712/disque.pid work/disque-7713/disque.pid 28 | 29 | disque-init: disque-start work/disque-git/src/disque-server 30 | work/disque-git/src/disque cluster meet 127.0.0.1 7712 31 | work/disque-git/src/disque cluster meet 127.0.0.1 7713 32 | 33 | 34 | ########## 35 | # stunnel 36 | ########## 37 | 38 | work/stunnel.conf: 39 | @mkdir -p $(@D) 40 | 41 | @echo cert=$(ROOT_DIR)/work/cert.pem >> $@ 42 | @echo key=$(ROOT_DIR)/work/key.pem >> $@ 43 | @echo capath=$(ROOT_DIR)/work/cert.pem >> $@ 44 | @echo cafile=$(ROOT_DIR)/work/cert.pem >> $@ 45 | @echo delay=yes >> $@ 46 | @echo pid=$(ROOT_DIR)/work/stunnel.pid >> $@ 47 | @echo foreground = no >> $@ 48 | 49 | @echo [stunnel] >> $@ 50 | @echo accept = 127.0.0.1:7443 >> $@ 51 | @echo connect = 127.0.0.1:7711 >> $@ 52 | 53 | work/stunnel.pid: work/stunnel.conf ssl-keys 54 | which stunnel4 >/dev/null 2>&1 && stunnel4 $(ROOT_DIR)/work/stunnel.conf || stunnel $(ROOT_DIR)/work/stunnel.conf 55 | 56 | stunnel-start: work/stunnel.pid 57 | 58 | start: cleanup 59 | $(MAKE) disque-init 60 | $(MAKE) stunnel-start 61 | 62 | 63 | cleanup: stop 64 | @mkdir -p $(@D) 65 | rm -f work/*.rdb work/*.aof work/*.conf work/*.log 2>/dev/null 66 | rm -f *.aof 67 | rm -f *.rdb 68 | 69 | ########## 70 | # SSL Keys 71 | # - remove Java keystore as becomes stale 72 | ########## 73 | work/key.pem work/cert.pem: 74 | @mkdir -p $(@D) 75 | openssl genrsa -out work/key.pem 4096 76 | openssl req -new -x509 -key work/key.pem -out work/cert.pem -days 365 -subj "/O=lettuce/ST=Some-State/C=DE/CN=lettuce-test" 77 | chmod go-rwx work/key.pem 78 | chmod go-rwx work/cert.pem 79 | - rm -f work/keystore.jks 80 | 81 | work/keystore.jks: 82 | @mkdir -p $(@D) 83 | $$JAVA_HOME/bin/keytool -importcert -keystore work/keystore.jks -file work/cert.pem -noprompt -storepass changeit 84 | 85 | ssl-keys: work/key.pem work/cert.pem work/keystore.jks 86 | 87 | stop: 88 | pkill stunnel || true 89 | pkill disque-server && sleep 1 || true 90 | 91 | test-coveralls: 92 | make start 93 | mvn -B -DskipTests=false clean compile test jacoco:report coveralls:report 94 | make stop 95 | 96 | test: start 97 | mvn -B -DskipTests=false clean compile test 98 | make stop 99 | 100 | prepare: stop 101 | 102 | ifndef STUNNEL_BIN 103 | ifeq ($(shell uname -s),Linux) 104 | ifdef APT_BIN 105 | sudo apt-get install -y stunnel 106 | else 107 | 108 | ifdef YUM_BIN 109 | sudo yum install stunnel 110 | else 111 | @echo "Cannot install stunnel using yum/apt-get" 112 | @exit 1 113 | endif 114 | 115 | endif 116 | 117 | endif 118 | 119 | ifeq ($(shell uname -s),Darwin) 120 | 121 | ifndef BREW_BIN 122 | @echo "Cannot install stunnel because missing brew.sh" 123 | @exit 1 124 | endif 125 | 126 | brew install stunnel 127 | 128 | endif 129 | 130 | endif 131 | work/disque-git/src/disque work/disque-git/src/disque-server: 132 | [ ! -e work/disque-git ] && git clone https://github.com/antirez/disque.git work/disque-git && cd work/disque-git|| true 133 | [ -e work/disque-git ] && cd work/disque-git && git reset --hard && git pull || true 134 | make -C work/disque-git clean 135 | make -C work/disque-git -j4 136 | 137 | clean: 138 | rm -Rf work/ 139 | rm -Rf target/ 140 | 141 | release: 142 | mvn release:clean 143 | mvn release:prepare -Psonatype-oss-release 144 | mvn release:perform -Psonatype-oss-release 145 | ls target/checkout/target/*-bin.zip | xargs gpg -b -a 146 | ls target/checkout/target/*-bin.tar.gz | xargs gpg -b -a 147 | cd target/checkout && mvn site:site && mvn -o scm-publish:publish-scm -Dgithub.site.upload.skip=false 148 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/SyncAsyncApiConvergenceTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach; 17 | 18 | import static org.assertj.core.api.Assertions.*; 19 | 20 | import java.lang.reflect.*; 21 | import java.util.ArrayList; 22 | import java.util.List; 23 | 24 | import org.junit.Test; 25 | import org.junit.runner.RunWith; 26 | import org.junit.runners.Parameterized; 27 | 28 | import biz.paluch.spinach.api.async.DisqueAsyncCommands; 29 | import biz.paluch.spinach.api.sync.DisqueCommands; 30 | 31 | import com.lambdaworks.redis.RedisFuture; 32 | 33 | /** 34 | * @author Mark Paluch 35 | */ 36 | @RunWith(Parameterized.class) 37 | public class SyncAsyncApiConvergenceTest { 38 | 39 | private Method method; 40 | 41 | @SuppressWarnings("rawtypes") 42 | private Class asyncClass = DisqueAsyncCommands.class; 43 | 44 | @Parameterized.Parameters(name = "Method {0}/{1}") 45 | public static List parameters() { 46 | 47 | List result = new ArrayList(); 48 | Method[] methods = DisqueCommands.class.getMethods(); 49 | for (Method method : methods) { 50 | 51 | if (method.getName().equals("setTimeout")) { 52 | continue; 53 | } 54 | 55 | result.add(new Object[] { method.getName(), method }); 56 | } 57 | 58 | return result; 59 | } 60 | 61 | public SyncAsyncApiConvergenceTest(String methodName, Method method) { 62 | this.method = method; 63 | } 64 | 65 | @Test 66 | public void testMethodPresentOnAsyncApi() throws Exception { 67 | Method method = asyncClass.getMethod(this.method.getName(), this.method.getParameterTypes()); 68 | assertThat(method).isNotNull(); 69 | } 70 | 71 | @Test 72 | public void testSameResultType() throws Exception { 73 | Method method = asyncClass.getMethod(this.method.getName(), this.method.getParameterTypes()); 74 | Class returnType = method.getReturnType(); 75 | 76 | if (returnType.equals(RedisFuture.class)) { 77 | ParameterizedType genericReturnType = (ParameterizedType) method.getGenericReturnType(); 78 | Type[] actualTypeArguments = genericReturnType.getActualTypeArguments(); 79 | 80 | if (actualTypeArguments[0] instanceof TypeVariable) { 81 | 82 | assertThat(Object.class).isEqualTo(this.method.getReturnType()); 83 | return; 84 | } 85 | 86 | if (actualTypeArguments[0] instanceof ParameterizedType) { 87 | 88 | ParameterizedType parameterizedType = (ParameterizedType) actualTypeArguments[0]; 89 | returnType = (Class) parameterizedType.getRawType(); 90 | } else if (actualTypeArguments[0] instanceof GenericArrayType) { 91 | 92 | GenericArrayType arrayType = (GenericArrayType) actualTypeArguments[0]; 93 | returnType = Array.newInstance((Class) arrayType.getGenericComponentType(), 0).getClass(); 94 | } else { 95 | returnType = (Class) actualTypeArguments[0]; 96 | } 97 | } 98 | 99 | Class expectedType = getType(this.method.getReturnType()); 100 | returnType = getType(returnType); 101 | 102 | assertThat(returnType).describedAs(this.method.toString()).isEqualTo(expectedType); 103 | 104 | } 105 | 106 | private Class getType(Class returnType) { 107 | if (returnType == Long.class) { 108 | return Long.TYPE; 109 | } 110 | 111 | if (returnType == Integer.class) { 112 | return Integer.TYPE; 113 | } 114 | 115 | if (returnType == Boolean.class) { 116 | return Boolean.TYPE; 117 | } 118 | return returnType; 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/commands/AbstractCommandTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.commands; 17 | 18 | import java.util.Arrays; 19 | import java.util.HashSet; 20 | import java.util.List; 21 | import java.util.Set; 22 | import java.util.concurrent.TimeUnit; 23 | 24 | import org.apache.log4j.Logger; 25 | import org.junit.After; 26 | import org.junit.Before; 27 | import org.junit.BeforeClass; 28 | 29 | import biz.paluch.spinach.DisqueClient; 30 | import biz.paluch.spinach.DisqueURI; 31 | import biz.paluch.spinach.TestSettings; 32 | import biz.paluch.spinach.api.sync.DisqueCommands; 33 | import biz.paluch.spinach.support.DefaultDisqueClient; 34 | 35 | import com.lambdaworks.redis.KeyValue; 36 | import com.lambdaworks.redis.ScoredValue; 37 | 38 | public abstract class AbstractCommandTest { 39 | public static final String host = TestSettings.host(); 40 | public static final int port = TestSettings.port(); 41 | public static final String passwd = TestSettings.password(); 42 | 43 | protected static DisqueClient client; 44 | protected Logger log = Logger.getLogger(getClass()); 45 | protected DisqueCommands disque; 46 | protected String key = "key"; 47 | protected String queue = "queue"; 48 | protected String value = "value"; 49 | 50 | @BeforeClass 51 | public static void setupClient() { 52 | client = DefaultDisqueClient.get(); 53 | } 54 | 55 | protected static DisqueClient getDisqueClient() { 56 | return DisqueClient.create(DefaultDisqueClient.getClientResources(), DisqueURI.create(host, port)); 57 | } 58 | 59 | @Before 60 | public void openConnection() throws Exception { 61 | disque = client.connect().sync(); 62 | disque.debugFlushall(); 63 | } 64 | 65 | @After 66 | public void closeConnection() throws Exception { 67 | disque.close(); 68 | } 69 | 70 | protected List list(String... args) { 71 | return Arrays.asList(args); 72 | } 73 | 74 | protected List list(Object... args) { 75 | return Arrays.asList(args); 76 | } 77 | 78 | protected KeyValue kv(String key, String value) { 79 | return new KeyValue(key, value); 80 | } 81 | 82 | protected ScoredValue sv(double score, String value) { 83 | return new ScoredValue(score, value); 84 | } 85 | 86 | protected Set set(String... args) { 87 | return new HashSet(Arrays.asList(args)); 88 | } 89 | 90 | protected void addJobs(int jobsPerQueue, String queue, int queues, String body) { 91 | 92 | for (int i = 0; i < queues; i++) { 93 | String queueName = getQueueName(queue, i, queues); 94 | for (int j = 0; j < jobsPerQueue; j++) { 95 | disque.addjob(queueName, body, 5, TimeUnit.MINUTES); 96 | } 97 | } 98 | 99 | } 100 | 101 | protected String getQueueName(String prefix, int i, int queues) { 102 | 103 | if (queues != 1) { 104 | return prefix + i; 105 | } 106 | return prefix; 107 | } 108 | 109 | public abstract class WithPasswordRequired { 110 | protected abstract void run(DisqueClient client) throws Exception; 111 | 112 | public WithPasswordRequired() throws Exception { 113 | try { 114 | disque.configSet("requirepass", passwd); 115 | disque.auth(passwd); 116 | 117 | DisqueClient client = getDisqueClient(); 118 | try { 119 | run(client); 120 | } finally { 121 | client.shutdown(100, 100, TimeUnit.MILLISECONDS); 122 | } 123 | } finally { 124 | 125 | disque.configSet("requirepass", ""); 126 | } 127 | } 128 | } 129 | 130 | } -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/ClientMetricsTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach; 17 | 18 | import static com.google.code.tempusfugit.temporal.Duration.seconds; 19 | import static com.google.code.tempusfugit.temporal.Timeout.timeout; 20 | import static org.assertj.core.api.Assertions.assertThat; 21 | 22 | import org.junit.After; 23 | import org.junit.Before; 24 | import org.junit.Test; 25 | import org.springframework.test.util.ReflectionTestUtils; 26 | 27 | import com.google.code.tempusfugit.temporal.Condition; 28 | import com.google.code.tempusfugit.temporal.WaitFor; 29 | import com.lambdaworks.redis.event.DefaultEventPublisherOptions; 30 | import com.lambdaworks.redis.event.Event; 31 | import com.lambdaworks.redis.event.EventBus; 32 | import com.lambdaworks.redis.event.metrics.CommandLatencyEvent; 33 | import com.lambdaworks.redis.event.metrics.MetricEventPublisher; 34 | import com.lambdaworks.redis.resource.ClientResources; 35 | import com.lambdaworks.redis.resource.DefaultClientResources; 36 | 37 | import biz.paluch.spinach.api.sync.DisqueCommands; 38 | import biz.paluch.spinach.support.FastShutdown; 39 | import rx.Subscription; 40 | import rx.functions.Func1; 41 | import rx.observers.TestSubscriber; 42 | 43 | /** 44 | * @author Mark Paluch 45 | */ 46 | public class ClientMetricsTest { 47 | 48 | private ClientResources clientResources; 49 | private DisqueClient disqueClient; 50 | private DisqueCommands disque; 51 | 52 | @Before 53 | public void before() throws Exception { 54 | 55 | clientResources = new DefaultClientResources.Builder() 56 | .commandLatencyPublisherOptions(DefaultEventPublisherOptions.create()).build(); 57 | disqueClient = DisqueClient.create(clientResources, DisqueURI.create(TestSettings.host(), TestSettings.port())); 58 | disque = disqueClient.connect().sync(); 59 | } 60 | 61 | @After 62 | public void after() throws Exception { 63 | disque.close(); 64 | 65 | FastShutdown.shutdown(disqueClient); 66 | FastShutdown.shutdown(clientResources); 67 | } 68 | 69 | @Test 70 | public void testMetricsEvent() throws Exception { 71 | 72 | EventBus eventBus = clientResources.eventBus(); 73 | MetricEventPublisher publisher = (MetricEventPublisher) ReflectionTestUtils.getField(clientResources, 74 | "metricEventPublisher"); 75 | publisher.emitMetricsEvent(); 76 | 77 | final TestSubscriber subscriber = new TestSubscriber(); 78 | Subscription subscription = eventBus.get().filter(new Func1() { 79 | @Override 80 | public Boolean call(Event redisEvent) { 81 | 82 | return redisEvent instanceof CommandLatencyEvent; 83 | } 84 | }).cast(CommandLatencyEvent.class).subscribe(subscriber); 85 | 86 | generateTestData(); 87 | 88 | publisher.emitMetricsEvent(); 89 | 90 | WaitFor.waitOrTimeout(new Condition() { 91 | @Override 92 | public boolean isSatisfied() { 93 | return !subscriber.getOnNextEvents().isEmpty(); 94 | } 95 | }, timeout(seconds(5))); 96 | 97 | subscription.unsubscribe(); 98 | 99 | subscriber.assertValueCount(1); 100 | 101 | CommandLatencyEvent event = subscriber.getOnNextEvents().get(0); 102 | 103 | assertThat(event.getLatencies()).hasSize(2); 104 | assertThat(event.toString()).contains("local:any ->"); 105 | assertThat(event.toString()).contains("commandType=PING"); 106 | } 107 | 108 | private void generateTestData() { 109 | 110 | for (int i = 0; i < 10; i++) { 111 | disque.info(); 112 | } 113 | 114 | for (int i = 0; i < 10; i++) { 115 | disque.ping(); 116 | } 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/test/java/biz/paluch/spinach/commands/ClusterCommandTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.commands; 17 | 18 | import static org.assertj.core.api.Assertions.assertThat; 19 | import static org.assertj.core.api.Assertions.fail; 20 | 21 | import java.util.Collection; 22 | 23 | import com.lambdaworks.redis.RedisException; 24 | import org.junit.Ignore; 25 | import org.junit.Test; 26 | 27 | import biz.paluch.spinach.TestSettings; 28 | import biz.paluch.spinach.cluster.ClusterNodesParser; 29 | import biz.paluch.spinach.cluster.DisqueNode; 30 | 31 | public class ClusterCommandTest extends AbstractCommandTest { 32 | 33 | @Test 34 | public void clusterMeet() throws Exception { 35 | String result = disque.clusterMeet(TestSettings.hostAddr(), TestSettings.port(1)); 36 | assertThat(result).isEqualTo("OK"); 37 | } 38 | 39 | @Test 40 | @Ignore("Run me manually otherwise I will affect all the other tests") 41 | public void clusterReset() throws Exception { 42 | 43 | assertThat(disque.clusterReset(false)).isEqualTo("OK"); 44 | disque.clusterMeet(TestSettings.hostAddr(), TestSettings.port(1)); 45 | } 46 | 47 | @Test 48 | public void clusterSaveconfig() throws Exception { 49 | String result = disque.clusterSaveconfig(); 50 | assertThat(result).isEqualTo("OK"); 51 | } 52 | 53 | @Test 54 | @Ignore("Run me manually otherwise I will affect all the other tests") 55 | public void clusterForget() throws Exception { 56 | 57 | String output = disque.clusterNodes(); 58 | Collection result = ClusterNodesParser.parse(output); 59 | 60 | DisqueNode otherNode = getOtherNode(result); 61 | 62 | assertThat(disque.clusterForget(otherNode.getNodeId())).isEqualTo("OK"); 63 | disque.clusterMeet(otherNode.getAddr(), otherNode.getPort()); 64 | } 65 | 66 | @Test 67 | public void clusterMyId() throws Exception { 68 | 69 | String output = disque.clusterNodes(); 70 | Collection result = ClusterNodesParser.parse(output); 71 | 72 | DisqueNode ownNode = getOwnNode(result); 73 | 74 | assertThat(disque.clusterMyId()).isEqualTo(ownNode.getNodeId()); 75 | } 76 | 77 | @Test 78 | public void clusterNodes() throws Exception { 79 | String output = disque.clusterNodes(); 80 | 81 | Collection result = ClusterNodesParser.parse(output); 82 | assertThat(result.size()).isGreaterThan(1); 83 | } 84 | 85 | @Test 86 | public void clusterLeaving() throws Exception { 87 | assertThat(disque.clusterLeaving()).isEqualTo("no"); 88 | assertThat(disque.clusterLeaving(true)).isEqualTo("OK"); 89 | assertThat(disque.clusterLeaving()).isEqualTo("yes"); 90 | 91 | try { 92 | disque.getjob(queue); 93 | fail("Missing exception"); 94 | }catch (RedisException e) { 95 | assertThat(e).hasMessageStartingWith("LEAVING"); 96 | } 97 | 98 | assertThat(disque.clusterLeaving(false)).isEqualTo("OK"); 99 | } 100 | 101 | @Test 102 | public void clusterInfo() throws Exception { 103 | String output = disque.clusterInfo(); 104 | 105 | assertThat(output).contains("cluster_state:").contains("cluster_stats_messages_sent"); 106 | } 107 | 108 | private DisqueNode getOtherNode(Collection nodes) { 109 | for (DisqueNode node : nodes) { 110 | if (node.getFlags().contains(DisqueNode.NodeFlag.MYSELF)) { 111 | continue; 112 | } 113 | 114 | return node; 115 | } 116 | 117 | return null; 118 | } 119 | 120 | private DisqueNode getOwnNode(Collection nodes) { 121 | for (DisqueNode node : nodes) { 122 | if (node.getFlags().contains(DisqueNode.NodeFlag.MYSELF)) { 123 | return node; 124 | } 125 | } 126 | return null; 127 | } 128 | 129 | } 130 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/cluster/DisqueNode.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.cluster; 17 | 18 | import java.io.Serializable; 19 | import java.util.Set; 20 | 21 | import com.lambdaworks.redis.internal.LettuceAssert; 22 | 23 | /** 24 | * Representation of a redis cluster node. 25 | * 26 | * @author Mark Paluch 27 | * @since 0.2 28 | */ 29 | @SuppressWarnings("serial") 30 | public class DisqueNode implements Serializable { 31 | private String addr; 32 | private int port; 33 | private String nodeId; 34 | 35 | private boolean connected; 36 | private long pingSentTimestamp; 37 | private long pongReceivedTimestamp; 38 | 39 | private Set flags; 40 | 41 | public DisqueNode() { 42 | 43 | } 44 | 45 | public DisqueNode(String addr, int port, String nodeId, boolean connected, long pingSentTimestamp, 46 | long pongReceivedTimestamp, Set flags) { 47 | this.addr = addr; 48 | this.port = port; 49 | this.nodeId = nodeId; 50 | this.connected = connected; 51 | this.pingSentTimestamp = pingSentTimestamp; 52 | this.pongReceivedTimestamp = pongReceivedTimestamp; 53 | this.flags = flags; 54 | } 55 | 56 | public String getAddr() { 57 | return addr; 58 | } 59 | 60 | public void setAddr(String addr) { 61 | this.addr = addr; 62 | } 63 | 64 | public int getPort() { 65 | return port; 66 | } 67 | 68 | public void setPort(int port) { 69 | this.port = port; 70 | } 71 | 72 | public String getNodeId() { 73 | return nodeId; 74 | } 75 | 76 | public void setNodeId(String nodeId) { 77 | LettuceAssert.notNull(nodeId, "nodeId must not be null"); 78 | this.nodeId = nodeId; 79 | } 80 | 81 | public boolean isConnected() { 82 | return connected; 83 | } 84 | 85 | public void setConnected(boolean connected) { 86 | this.connected = connected; 87 | } 88 | 89 | public long getPingSentTimestamp() { 90 | return pingSentTimestamp; 91 | } 92 | 93 | public void setPingSentTimestamp(long pingSentTimestamp) { 94 | this.pingSentTimestamp = pingSentTimestamp; 95 | } 96 | 97 | public long getPongReceivedTimestamp() { 98 | return pongReceivedTimestamp; 99 | } 100 | 101 | public void setPongReceivedTimestamp(long pongReceivedTimestamp) { 102 | this.pongReceivedTimestamp = pongReceivedTimestamp; 103 | } 104 | 105 | public Set getFlags() { 106 | return flags; 107 | } 108 | 109 | public void setFlags(Set flags) { 110 | this.flags = flags; 111 | } 112 | 113 | @Override 114 | public boolean equals(Object o) { 115 | if (this == o) { 116 | return true; 117 | } 118 | if (!(o instanceof DisqueNode)) { 119 | return false; 120 | } 121 | 122 | DisqueNode that = (DisqueNode) o; 123 | 124 | if (nodeId != null ? !nodeId.equals(that.nodeId) : that.nodeId != null) { 125 | return false; 126 | } 127 | 128 | return true; 129 | } 130 | 131 | @Override 132 | public int hashCode() { 133 | int result = 31 * (nodeId != null ? nodeId.hashCode() : 0); 134 | return result; 135 | } 136 | 137 | @Override 138 | public String toString() { 139 | final StringBuilder sb = new StringBuilder(); 140 | sb.append(getClass().getSimpleName()); 141 | sb.append(" [addr=").append(addr); 142 | sb.append(", port='").append(port).append('\''); 143 | sb.append(", nodeId='").append(nodeId).append('\''); 144 | sb.append(", connected=").append(connected); 145 | sb.append(", pingSentTimestamp=").append(pingSentTimestamp); 146 | sb.append(", pongReceivedTimestamp=").append(pongReceivedTimestamp); 147 | sb.append(", flags=").append(flags); 148 | sb.append(']'); 149 | return sb.toString(); 150 | } 151 | 152 | public enum NodeFlag { 153 | NOFLAGS, MYSELF, EVENTUAL_FAIL, FAIL, HANDSHAKE, NOADDR; 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/cluster/NodeIdAwareSocketAddressSupplier.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.cluster; 17 | 18 | import java.net.InetSocketAddress; 19 | import java.net.SocketAddress; 20 | 21 | import com.lambdaworks.redis.internal.LettuceAssert; 22 | 23 | import biz.paluch.spinach.impl.HelloClusterSocketAddressSupplier; 24 | import biz.paluch.spinach.impl.RoundRobin; 25 | import biz.paluch.spinach.impl.SocketAddressSupplier; 26 | 27 | /** 28 | * This mechanism allows to set a preferred node Id for next {@code HELLO} handshake. If the 29 | * {@link #setPreferredNodeIdPrefix(String)} is set, the selection mechanism tries to provide a {@link SocketAddress} from the 30 | * preferred node. 31 | * 32 | * @see biz.paluch.spinach.impl.HelloClusterSocketAddressSupplier 33 | */ 34 | public class NodeIdAwareSocketAddressSupplier extends HelloClusterSocketAddressSupplier { 35 | private transient String currentNodeId; 36 | private transient InetSocketAddress currentSocketAddress; 37 | private String preferredNodeIdPrefix; 38 | 39 | /** 40 | * 41 | * @param bootstrap bootstrap/fallback {@link SocketAddressSupplier} for bootstrapping before any communication is done. 42 | */ 43 | public NodeIdAwareSocketAddressSupplier(SocketAddressSupplier bootstrap) { 44 | super(bootstrap); 45 | } 46 | 47 | @Override 48 | public SocketAddress get() { 49 | currentSocketAddress = (InetSocketAddress) super.get(); 50 | resolveCurrentNodeId(); 51 | return currentSocketAddress; 52 | } 53 | 54 | @Override 55 | public void reloadNodes() { 56 | super.reloadNodes(); 57 | resolveCurrentNodeId(); 58 | } 59 | 60 | private void resolveCurrentNodeId() { 61 | if (currentSocketAddress == null) { 62 | return; 63 | } 64 | 65 | for (DisqueNode disqueNode : getNodes()) { 66 | 67 | if (currentSocketAddress.isUnresolved()) { 68 | if (currentSocketAddress.getHostString().equals(disqueNode.getAddr()) 69 | && disqueNode.getPort() == currentSocketAddress.getPort()) { 70 | currentNodeId = disqueNode.getNodeId(); 71 | break; 72 | } 73 | } else { 74 | if (currentSocketAddress.getAddress().getHostAddress().equals(disqueNode.getAddr()) 75 | && disqueNode.getPort() == currentSocketAddress.getPort()) { 76 | currentNodeId = disqueNode.getNodeId(); 77 | break; 78 | } 79 | } 80 | } 81 | } 82 | 83 | /** 84 | * 85 | * @return the current connected nodeId, may be {@literal null} if not resolvable 86 | */ 87 | public String getCurrentNodeId() { 88 | return currentNodeId; 89 | } 90 | 91 | /** 92 | * Set the id prefix of the preferred node. 93 | * 94 | * @param preferredNodeIdPrefix the id prefix of the preferred node 95 | */ 96 | public void setPreferredNodeIdPrefix(String preferredNodeIdPrefix) { 97 | LettuceAssert.notNull(preferredNodeIdPrefix, "preferredNodeIdPrefix must not be null"); 98 | boolean resetRoundRobin = false; 99 | 100 | if (this.preferredNodeIdPrefix == null || !preferredNodeIdPrefix.equals(this.preferredNodeIdPrefix)) { 101 | resetRoundRobin = true; 102 | } 103 | 104 | this.preferredNodeIdPrefix = preferredNodeIdPrefix; 105 | 106 | if (resetRoundRobin) { 107 | resetRoundRobin(preferredNodeIdPrefix); 108 | } 109 | } 110 | 111 | /** 112 | * Reset the {@link RoundRobin} to start with the node matching the {@code preferredNodeIdPrefix}. 113 | * 114 | * @param preferredNodeIdPrefix the id prefix of the preferred node 115 | */ 116 | private void resetRoundRobin(String preferredNodeIdPrefix) { 117 | DisqueNode previous = null; // remember the previous node because the offset is a marker to start with the next 118 | // element 119 | for (DisqueNode disqueNode : getNodes()) { 120 | if (disqueNode.getNodeId().startsWith(preferredNodeIdPrefix)) { 121 | roundRobin = new RoundRobin(getNodes(), previous); 122 | return; 123 | } 124 | previous = disqueNode; 125 | } 126 | } 127 | 128 | public String getPreferredNodeIdPrefix() { 129 | return preferredNodeIdPrefix; 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | spinach - A scalable Java Disque client 2 | ====================================== 3 | 4 | [![Build Status](https://travis-ci.org/mp911de/spinach.svg)](https://travis-ci.org/mp911de/spinach) [![Coverage Status](https://coveralls.io/repos/mp911de/spinach/badge.svg?branch=master)](https://coveralls.io/r/mp911de/spinach?branch=master) [![Maven Central](https://maven-badges.herokuapp.com/maven-central/biz.paluch.redis/spinach/badge.svg)](https://maven-badges.herokuapp.com/maven-central/biz.paluch.redis/spinach) 5 | 6 | 7 | Spinach is a scalable thread-safe Disque client providing both synchronous and 8 | asynchronous APIs. Multiple threads may share one connection if they do not use blocking commands. Spinach is based on 9 | [lettuce](https://github.com/mp911de/lettuce) 4. 10 | Multiple connections are efficiently managed by the excellent netty NIO 11 | framework. 12 | 13 | * Requires Java 8 14 | * [synchronous](https://github.com/mp911de/spinach/wiki/Basic-usage), [asynchronous](https://github.com/mp911de/spinach/wiki/Asynchronous-API) and [reactive](https://github.com/mp911de/spinach/wiki/Reactive-API) APIs 15 | * [SSL](https://github.com/mp911de/spinach/wiki/SSL-Connections) and [Unix Domain Socket](https://github.com/mp911de/spinach/wiki/Unix-Domain-Sockets) connections 16 | * [Codecs](https://github.com/mp911de/lettuce/wiki/Codecs) (for UTF8/bit/JSON etc. representation of your data) 17 | 18 | See the [Wiki](https://github.com/mp911de/spinach/wiki) for more docs. 19 | 20 | Communication 21 | --------------- 22 | 23 | * [Github Issues](https://github.com/mp911de/spinach/issues) 24 | 25 | Documentation 26 | --------------- 27 | 28 | * [Wiki](https://github.com/mp911de/spinach/wiki) 29 | * [Javadoc](http://spinach.paluch.biz/apidocs/) 30 | 31 | Binaries/Download 32 | ---------------- 33 | 34 | Binaries and dependency information for Maven, Ivy, Gradle and others can be found at http://search.maven.org. 35 | 36 | Releases of spinach are available in the maven central repository. Take also a look at the [Download](https://github.com/mp911de/spinach/wiki/Download) page in the [Wiki](https://github.com/mp911de/lettuce/wiki). 37 | 38 | Example for Maven: 39 | 40 | ```xml 41 | 42 | biz.paluch.redis 43 | spinach 44 | x.y.z 45 | 46 | ``` 47 | 48 | All versions: [Maven Central](http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22biz.paluch.redis%22%20AND%20a%3A%spinach%22) 49 | 50 | Snapshots: [Sonatype OSS Repository](https://oss.sonatype.org/#nexus-search;gav~biz.paluch.redis~spinach~~~) 51 | 52 | 53 | Basic Usage 54 | ----------- 55 | 56 | ```java 57 | DisqueClient client = DisqueClient.create(DisqueURI.create("host", 7711)); 58 | DisqueConnection connection = client.connect().sync(); 59 | DisqueCommands sync = connection.sync(); 60 | String jobId = sync.addjob("queue", "body", 1, TimeUnit.MINUTES); 61 | 62 | Job job = sync.getjob("queue"); 63 | connection.ackjob(job.getId()); 64 | ``` 65 | 66 | Each Disque command is implemented by one or more methods with names identical 67 | to the lowercase Disque command name. Complex commands with multiple modifiers 68 | that change the result type include the CamelCased modifier as part of the 69 | command name. 70 | 71 | Disque connections are designed to be long-lived, and if the connection is lost 72 | will reconnect until close() is called. Pending commands that have not timed 73 | out will be (re)sent after successful reconnection. 74 | 75 | All connections inherit a default timeout from their DisqueClient and 76 | and will throw a DisqueException when non-blocking commands fail to return a 77 | result before the timeout expires. The timeout defaults to 60 seconds and 78 | may be changed in the DisqueClient or for each individual connection. 79 | 80 | Asynchronous API 81 | ------------------------ 82 | 83 | ```java 84 | DisqueConnection connection = client.connect(); 85 | DisqueAsyncCommands async = connection.async(); 86 | RedisFuture jobId1 = async.addjob("queue", "body1", 1, SECONDS); 87 | RedisFuture jobId2 = async.addjob("queue", "body2", 1, SECONDS); 88 | 89 | async.awaitAll(jobId1, jobId2) == true 90 | 91 | jobId1.get() == "D-...a1" 92 | jobId2.get() == "D-...a1" 93 | ``` 94 | 95 | Building 96 | ----------- 97 | 98 | Spinach is built with Apache Maven. The tests require multiple running Disque instances for different test cases which 99 | are configured using a ```Makefile```. 100 | 101 | * Run the build: ```make test``` 102 | * Start Disque (manually): ```make start``` 103 | * Stop Disque (manually): ```make stop``` 104 | 105 | License 106 | ------- 107 | 108 | * [Apache License 2.0] (http://www.apache.org/licenses/LICENSE-2.0) 109 | 110 | Contributing 111 | ------- 112 | 113 | Github is for social coding: if you want to write code, I encourage contributions through pull requests from forks of this repository. 114 | Create Github tickets for bugs and new features and comment on the ones that you are interested in and take a look into [CONTRIBUTING.md](https://github.com/mp911de/spinach/blob/master/.github/CONTRIBUTING.md) 115 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/cluster/ClusterNodesParser.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.cluster; 17 | 18 | import java.util.*; 19 | import java.util.regex.Pattern; 20 | 21 | import com.lambdaworks.redis.RedisException; 22 | import com.lambdaworks.redis.internal.HostAndPort; 23 | import com.lambdaworks.redis.internal.LettuceLists; 24 | 25 | import biz.paluch.spinach.DisqueURI; 26 | 27 | /** 28 | * Parser for node information output of {@code CLUSTER NODES}. 29 | * 30 | * @author Mark Paluch 31 | */ 32 | public class ClusterNodesParser { 33 | public static final String CONNECTED = "connected"; 34 | 35 | private static final char TOKEN_NODE_SEPARATOR = '\n'; 36 | private static final Pattern TOKEN_PATTERN = Pattern.compile(Character.toString(TOKEN_NODE_SEPARATOR)); 37 | private static final Pattern SPACE_PATTERN = Pattern.compile(" "); 38 | private static final Pattern COMMA_PATTERN = Pattern.compile("\\,"); 39 | 40 | private static final Map FLAG_MAPPING; 41 | 42 | static { 43 | Map map = new HashMap<>(); 44 | 45 | map.put("noflags", DisqueNode.NodeFlag.NOFLAGS); 46 | map.put("myself", DisqueNode.NodeFlag.MYSELF); 47 | map.put("fail?", DisqueNode.NodeFlag.EVENTUAL_FAIL); 48 | map.put("fail", DisqueNode.NodeFlag.FAIL); 49 | map.put("handshake", DisqueNode.NodeFlag.HANDSHAKE); 50 | map.put("noaddr", DisqueNode.NodeFlag.NOADDR); 51 | FLAG_MAPPING = Collections.unmodifiableMap(map); 52 | } 53 | 54 | /** 55 | * Utility constructor. 56 | */ 57 | private ClusterNodesParser() { 58 | 59 | } 60 | 61 | /** 62 | * Parse partition lines into Partitions object. 63 | * 64 | * @param nodes output of CLUSTER NODES 65 | * @return the partitions object. 66 | */ 67 | public static Collection parse(String nodes) { 68 | List result = new ArrayList<>(); 69 | 70 | Iterator iterator = TOKEN_PATTERN.splitAsStream(nodes).iterator(); 71 | 72 | try { 73 | while (iterator.hasNext()) { 74 | String node = iterator.next(); 75 | DisqueNode partition = parseNode(node); 76 | result.add(partition); 77 | } 78 | 79 | } catch (Exception e) { 80 | throw new RedisException("Cannot parse " + nodes, e); 81 | } 82 | 83 | return result; 84 | } 85 | 86 | private static DisqueNode parseNode(String nodeInformation) { 87 | 88 | Iterator iterator = SPACE_PATTERN.splitAsStream(nodeInformation).iterator(); 89 | 90 | String nodeId = iterator.next(); 91 | boolean connected = false; 92 | 93 | HostAndPort hostAndPort = HostAndPort.parse(iterator.next()); 94 | 95 | String flags = iterator.next(); 96 | List flagStrings = LettuceLists.newList(COMMA_PATTERN.splitAsStream(flags).iterator()); 97 | 98 | Set nodeFlags = readFlags(flagStrings); 99 | 100 | long pingSentTs = getLongFromIterator(iterator, 0); 101 | long pongReceivedTs = getLongFromIterator(iterator, 0); 102 | 103 | String connectedFlags = iterator.next(); // "connected" : "disconnected" 104 | 105 | if (CONNECTED.equals(connectedFlags)) { 106 | connected = true; 107 | } 108 | 109 | DisqueNode partition = new DisqueNode(hostAndPort.getHostText(), 110 | hostAndPort.hasPort() ? hostAndPort.getPort() : DisqueURI.DEFAULT_DISQUE_PORT, nodeId, connected, pingSentTs, 111 | pongReceivedTs, nodeFlags); 112 | 113 | return partition; 114 | 115 | } 116 | 117 | private static Set readFlags(List flagStrings) { 118 | 119 | Set flags = new HashSet<>(); 120 | for (String flagString : flagStrings) { 121 | if (FLAG_MAPPING.containsKey(flagString)) { 122 | flags.add(FLAG_MAPPING.get(flagString)); 123 | } 124 | } 125 | return Collections.unmodifiableSet(flags); 126 | } 127 | 128 | private static long getLongFromIterator(Iterator iterator, long defaultValue) { 129 | if (iterator.hasNext()) { 130 | Object object = iterator.next(); 131 | if (object instanceof String) { 132 | return Long.parseLong((String) object); 133 | } 134 | } 135 | return defaultValue; 136 | } 137 | 138 | } 139 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/sync/DisqueQueueCommands.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api.sync; 17 | 18 | import java.util.List; 19 | import java.util.Map; 20 | 21 | import biz.paluch.spinach.api.Job; 22 | import biz.paluch.spinach.api.PauseArgs; 23 | import biz.paluch.spinach.api.QScanArgs; 24 | 25 | import com.lambdaworks.redis.KeyScanCursor; 26 | import com.lambdaworks.redis.ScanCursor; 27 | 28 | /** 29 | * 30 | * Synchronous executed commands related with Disque Queues. 31 | * 32 | * @param Key type. 33 | * @param Value type. 34 | * @author Mark Paluch 35 | */ 36 | public interface DisqueQueueCommands { 37 | 38 | /** 39 | * Remove the job from the queue. 40 | * 41 | * @param jobIds the job Id's 42 | * @return the number of jobs actually moved from queue to active state 43 | */ 44 | long dequeue(String... jobIds); 45 | 46 | /** 47 | * Queue jobs if not already queued. 48 | * 49 | * @param jobIds the job Id's 50 | * @return the number of jobs actually move from active to queued state 51 | */ 52 | long enqueue(String... jobIds); 53 | 54 | /** 55 | * Queue jobs if not already queued and increment the nack counter. 56 | * 57 | * @param jobIds the job Id's 58 | * @return the number of jobs actually move from active to queued state 59 | */ 60 | long nack(String... jobIds); 61 | 62 | /** 63 | * Change the {@literal PAUSE} pause state to: 64 | *
    65 | *
  • Pause a queue
  • 66 | *
  • Clear the pause state for a queue
  • 67 | *
  • Query the pause state
  • 68 | *
  • Broadcast the pause state
  • 69 | *
70 | * 71 | * @param queue the queue name 72 | * @param pauseArgs the pause args 73 | * @return pause state of the queue. 74 | */ 75 | String pause(K queue, PauseArgs pauseArgs); 76 | 77 | /** 78 | * Return the number of jobs queued. 79 | * 80 | * @param queue the queue name 81 | * @return the number of jobs queued 82 | */ 83 | long qlen(K queue); 84 | 85 | /** 86 | * Return an array of at most "count" jobs available inside the queue "queue" without removing the jobs from the queue. This 87 | * is basically an introspection and debugging command. 88 | * 89 | * @param queue the queue name 90 | * @param count number of jobs to return 91 | * @return List of jobs. 92 | */ 93 | List> qpeek(K queue, long count); 94 | 95 | /** 96 | * Incrementally iterate the keys space. 97 | * 98 | * @return KeyScanCursor<K> scan cursor. 99 | */ 100 | KeyScanCursor qscan(); 101 | 102 | /** 103 | * Incrementally iterate the keys space. 104 | * 105 | * @param scanArgs scan arguments 106 | * @return KeyScanCursor<K> scan cursor. 107 | */ 108 | KeyScanCursor qscan(QScanArgs scanArgs); 109 | 110 | /** 111 | * Incrementally iterate the keys space. 112 | * 113 | * @param scanCursor cursor to resume from a previous scan 114 | * @return KeyScanCursor<K> scan cursor. 115 | */ 116 | KeyScanCursor qscan(ScanCursor scanCursor); 117 | 118 | /** 119 | * Incrementally iterate the keys space. 120 | * 121 | * @param scanCursor cursor to resume from a previous scan 122 | * @param scanArgs scan arguments 123 | * @return KeyScanCursor<K> scan cursor. 124 | */ 125 | KeyScanCursor qscan(ScanCursor scanCursor, QScanArgs scanArgs); 126 | 127 | /** 128 | * Retrieve information about a queue as key value pairs. 129 | * 130 | * @param queue the queue name 131 | * @return map containing the statistics (key value pairs) 132 | */ 133 | Map qstat(K queue); 134 | 135 | /** 136 | * If the job is queued, remove it from queue and change state to active. Postpone the job requeue time in the future so 137 | * that we'll wait the retry time before enqueueing again. 138 | * 139 | * Return how much time the worker likely have before the next requeue event or an error: 140 | *
    141 | *
  • -ACKED: The job is already acknowledged, so was processed already.
  • 142 | *
  • -NOJOB We don't know about this job. The job was either already acknowledged and purged, or this node never received 143 | * a copy.
  • 144 | *
  • -TOOLATE 50% of the job TTL already elapsed, is no longer possible to delay it.
  • 145 | *
146 | * 147 | * @param jobId the job Id 148 | * @return retry count. 149 | */ 150 | long working(String jobId); 151 | } 152 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/QScanArgs.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api; 17 | 18 | import static com.lambdaworks.redis.protocol.CommandKeyword.*; 19 | 20 | import com.lambdaworks.redis.protocol.CommandArgs; 21 | 22 | /** 23 | * Arguments for scanning queues/jobs. 24 | * 25 | * @author Mark Paluch 26 | */ 27 | public class QScanArgs { 28 | 29 | private Long count; 30 | private Integer minlen; 31 | private Integer maxlen; 32 | private Integer importrate; 33 | private boolean busyloop; 34 | 35 | public Long getCount() { 36 | return count; 37 | } 38 | 39 | public void setCount(Long count) { 40 | this.count = count; 41 | } 42 | 43 | public Integer getMinlen() { 44 | return minlen; 45 | } 46 | 47 | public void setMinlen(Integer minlen) { 48 | this.minlen = minlen; 49 | } 50 | 51 | public Integer getMaxlen() { 52 | return maxlen; 53 | } 54 | 55 | public void setMaxlen(Integer maxlen) { 56 | this.maxlen = maxlen; 57 | } 58 | 59 | public Integer getImportrate() { 60 | return importrate; 61 | } 62 | 63 | public void setImportrate(Integer importrate) { 64 | this.importrate = importrate; 65 | } 66 | 67 | public boolean isBusyloop() { 68 | return busyloop; 69 | } 70 | 71 | public void setBusyloop(boolean busyloop) { 72 | this.busyloop = busyloop; 73 | } 74 | 75 | public static Builder builder() { 76 | return new Builder(); 77 | } 78 | 79 | /** 80 | * Static builder methods. 81 | */ 82 | public static class Builder { 83 | 84 | private Long count; 85 | private Integer minlen; 86 | private Integer maxlen; 87 | private Integer importrate; 88 | private boolean busyloop; 89 | 90 | /** 91 | * Utility constructor. 92 | */ 93 | private Builder() { 94 | 95 | } 96 | 97 | /** 98 | * Limit result to {@code count} items. 99 | * 100 | * @param count number of items 101 | * @return the current builder 102 | */ 103 | public Builder count(long count) { 104 | this.count = count; 105 | return this; 106 | } 107 | 108 | public Builder minlen(int minlen) { 109 | this.minlen = minlen; 110 | return this; 111 | } 112 | 113 | public Builder maxlen(int maxlen) { 114 | this.maxlen = maxlen; 115 | return this; 116 | } 117 | 118 | public Builder importrate(int importrate) { 119 | this.importrate = importrate; 120 | return this; 121 | } 122 | 123 | /** 124 | * Enable blocking loop mode. 125 | * 126 | * @return the current builder 127 | */ 128 | public Builder busyloop() { 129 | return busyloop(true); 130 | } 131 | 132 | /** 133 | * Enable/disable blocking loop mode 134 | * 135 | * @param busyloop {@literal true} or {@literal false} 136 | * @return the current builder 137 | */ 138 | public Builder busyloop(boolean busyloop) { 139 | this.busyloop = busyloop; 140 | return this; 141 | } 142 | 143 | 144 | /** 145 | * Build an instance of {@link QScanArgs} 146 | * 147 | * @return a new instance of {@link QScanArgs} 148 | */ 149 | public QScanArgs build() { 150 | 151 | QScanArgs result = new QScanArgs(); 152 | result.setBusyloop(busyloop); 153 | result.setMinlen(minlen); 154 | result.setMaxlen(maxlen); 155 | result.setImportrate(importrate); 156 | result.setCount(count); 157 | 158 | return result; 159 | 160 | } 161 | } 162 | 163 | public void build(CommandArgs args) { 164 | 165 | // QSCAN [COUNT ] [BUSYLOOP] [MINLEN ] [MAXLEN ] [IMPORTRATE ] 166 | 167 | if (count != null) { 168 | args.add(COUNT).add(count); 169 | } 170 | 171 | if (busyloop) { 172 | args.add(CommandKeyword.BUSYLOOP); 173 | } 174 | 175 | if (minlen != null) { 176 | args.add(CommandKeyword.MINLEN).add(minlen); 177 | } 178 | 179 | if (minlen != null) { 180 | args.add(CommandKeyword.MAXLEN).add(maxlen); 181 | } 182 | 183 | if (importrate != null) { 184 | args.add(CommandKeyword.IMPORTRATE).add(importrate); 185 | } 186 | } 187 | 188 | } 189 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/impl/ClusterAwareNodeSupport.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.impl; 17 | 18 | import java.io.Serializable; 19 | import java.util.*; 20 | 21 | import com.lambdaworks.redis.internal.LettuceAssert; 22 | 23 | import biz.paluch.spinach.api.DisqueConnection; 24 | import biz.paluch.spinach.cluster.DisqueNode; 25 | 26 | /** 27 | * Convenient base class for classes that rely on the cluster topology of Disque. Typically subclassed by 28 | * {@link SocketAddressSupplier SocketAddressSuppliers}. 29 | * 30 | * @author Mark Paluch 31 | */ 32 | public abstract class ClusterAwareNodeSupport { 33 | 34 | public final static int MAX_ALLOWED_PRIORITY = 99; 35 | 36 | private DisqueConnection disqueConnection; 37 | private final List nodes = new ArrayList<>(); 38 | 39 | /** 40 | * Load/reload cluster nodes and order the nodes by its priority. 41 | */ 42 | protected void reloadNodes() { 43 | 44 | Hello hello = HelloParser.parse(disqueConnection.sync().hello()); 45 | Collections.sort(hello.nodes, new Comparator() { 46 | @Override 47 | public int compare(PrioritizedDisqueNode o1, PrioritizedDisqueNode o2) { 48 | 49 | if (o1.priority == o2.priority) { 50 | return o1.disqueNode.getPort() - o2.disqueNode.getPort(); 51 | } 52 | return o1.priority - o2.priority; 53 | } 54 | }); 55 | this.nodes.clear(); 56 | 57 | for (PrioritizedDisqueNode node : hello.nodes) { 58 | if (isFiltered(node)) { 59 | continue; 60 | } 61 | 62 | this.nodes.add(node.disqueNode); 63 | } 64 | } 65 | 66 | /** 67 | * @param node the cluster node 68 | * @return {@literal true} if the {@code node} is filtered 69 | */ 70 | protected boolean isFiltered(PrioritizedDisqueNode node) { 71 | if (node.priority > MAX_ALLOWED_PRIORITY) { 72 | return true; 73 | } 74 | return false; 75 | } 76 | 77 | public void setConnection(DisqueConnection disqueConnection) { 78 | this.disqueConnection = (DisqueConnection) disqueConnection; 79 | } 80 | 81 | /** 82 | * 83 | * @return the list of {@link DisqueNode nodes} 84 | */ 85 | public List getNodes() { 86 | return nodes; 87 | } 88 | 89 | /** 90 | * Disque node with priority. 91 | */ 92 | static class PrioritizedDisqueNode implements Serializable { 93 | 94 | DisqueNode disqueNode; 95 | int priority; 96 | 97 | } 98 | 99 | static class Hello { 100 | long version; 101 | String nodeId; 102 | List nodes = new ArrayList<>(); 103 | 104 | } 105 | 106 | static class HelloParser { 107 | 108 | public static Hello parse(List hello) { 109 | 110 | LettuceAssert.isTrue(hello.size() > 2, "HELLO output must contain more than two elements"); 111 | LettuceAssert.isTrue(Long.valueOf(1).equals(hello.get(0)), 112 | "Only HELLO version 1 supported. Received HELLO version is " + hello.get(0)); 113 | 114 | Hello result = new Hello(); 115 | result.version = (Long) hello.get(0); 116 | result.nodeId = (String) hello.get(1); 117 | 118 | for (int i = 2; i < hello.size(); i++) { 119 | LettuceAssert.assertState(hello.get(i) instanceof Collection, 120 | "HELLO output at index " + i + " is not a collection"); 121 | Collection nodeDetails = (Collection) hello.get(i); 122 | LettuceAssert.assertState(nodeDetails.size() > 3, "HELLO output at index " + i + " has less than 4 elements"); 123 | 124 | Iterator iterator = nodeDetails.iterator(); 125 | 126 | DisqueNode disqueNode = new DisqueNode(); 127 | disqueNode.setNodeId((String) iterator.next()); 128 | disqueNode.setAddr((String) iterator.next()); 129 | disqueNode.setPort(Integer.parseInt((String) iterator.next())); 130 | 131 | PrioritizedDisqueNode prioritizedDisqueNode = new PrioritizedDisqueNode(); 132 | prioritizedDisqueNode.disqueNode = disqueNode; 133 | prioritizedDisqueNode.priority = Integer.parseInt((String) iterator.next()); 134 | 135 | result.nodes.add(prioritizedDisqueNode); 136 | } 137 | 138 | return result; 139 | } 140 | } 141 | 142 | } 143 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/PauseArgs.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api; 17 | 18 | import com.lambdaworks.redis.protocol.CommandArgs; 19 | 20 | /** 21 | * Arguments for adding a job. 22 | * 23 | * @author Mark Paluch 24 | */ 25 | public class PauseArgs { 26 | 27 | private String option; 28 | private Boolean state; 29 | private Boolean bcast; 30 | 31 | /** 32 | * 33 | * @return the PAUSE option, IN, OUT, NONE or ALL 34 | */ 35 | public String getOption() { 36 | return option; 37 | } 38 | 39 | public void setOption(String option) { 40 | this.option = option; 41 | } 42 | 43 | /** 44 | * 45 | * @return {@literal true} if the state should be queried 46 | */ 47 | public Boolean getState() { 48 | return state; 49 | } 50 | 51 | public void setState(Boolean state) { 52 | this.state = state; 53 | } 54 | 55 | /** 56 | * 57 | * @return {@literal true} if the pause command should be broadcasted also to other cluster nodes 58 | */ 59 | public Boolean getBcast() { 60 | return bcast; 61 | } 62 | 63 | public void setBcast(Boolean bcast) { 64 | this.bcast = bcast; 65 | } 66 | 67 | /** 68 | * Create a new builder for {@link PauseArgs}. 69 | * 70 | * @return a new builder for {@link PauseArgs} 71 | */ 72 | public static Builder builder() { 73 | return new Builder(); 74 | } 75 | 76 | /** 77 | * Static builder methods. 78 | */ 79 | public static class Builder { 80 | 81 | private String option; 82 | private Boolean state; 83 | private Boolean bcast; 84 | 85 | /** 86 | * Utility constructor. 87 | */ 88 | private Builder() { 89 | 90 | } 91 | 92 | /** 93 | * Set the queue as paused for incoming messages using the {@code IN} option. 94 | * 95 | * @return the builder 96 | */ 97 | public Builder in() { 98 | this.option = CommandKeyword.IN.name(); 99 | return this; 100 | } 101 | 102 | /** 103 | * Set the queue as paused for outgoing messages using the {@code OUT} option. 104 | * 105 | * @return the builder 106 | */ 107 | public Builder out() { 108 | this.option = CommandKeyword.OUT.name(); 109 | return this; 110 | } 111 | 112 | /** 113 | * Clear the pause if any, both {@code IN} and {@code OUT}. 114 | * 115 | * @return the builder 116 | */ 117 | public Builder none() { 118 | this.option = CommandKeyword.NONE.name(); 119 | return this; 120 | } 121 | 122 | /** 123 | * Same as {@link #in()} and {@link #out()}. 124 | * 125 | * @return the builder 126 | */ 127 | public Builder all() { 128 | this.option = CommandKeyword.ALL.name(); 129 | return this; 130 | } 131 | 132 | /** 133 | * Query the current paused state and reports one of the strings "in", "out", "all", "none". 134 | * 135 | * @return the builder 136 | */ 137 | public Builder state() { 138 | this.state = true; 139 | return this; 140 | } 141 | 142 | /** 143 | * Broadcast the PAUSE command to other nodes. 144 | * 145 | * @return the builder 146 | */ 147 | public Builder bcast() { 148 | this.bcast = true; 149 | return this; 150 | } 151 | 152 | /** 153 | * Build the {@link PauseArgs}. 154 | * 155 | * @return a new instance of {@link PauseArgs} 156 | */ 157 | public PauseArgs build() { 158 | PauseArgs pauseArgs = new PauseArgs(); 159 | pauseArgs.setBcast(bcast); 160 | pauseArgs.setOption(option); 161 | pauseArgs.setState(state); 162 | return pauseArgs; 163 | 164 | } 165 | } 166 | 167 | /** 168 | * Build argument sequence and populate {@code args}. 169 | * 170 | * @param args the target command args, must not be {@literal null} 171 | */ 172 | public void build(CommandArgs args) { 173 | 174 | // PAUSE [option option ... option] 175 | 176 | if (bcast != null) { 177 | args.add(CommandKeyword.BCAST.getBytes()); 178 | } 179 | 180 | if (state != null) { 181 | args.add(CommandKeyword.STATE.getBytes()); 182 | } 183 | 184 | if (option != null) { 185 | args.add(option); 186 | } 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/rx/DisqueQueueReactiveCommands.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api.rx; 17 | 18 | import java.util.Map; 19 | 20 | import rx.Observable; 21 | import biz.paluch.spinach.api.Job; 22 | import biz.paluch.spinach.api.PauseArgs; 23 | import biz.paluch.spinach.api.QScanArgs; 24 | 25 | import com.lambdaworks.redis.KeyScanCursor; 26 | import com.lambdaworks.redis.ScanCursor; 27 | 28 | /** 29 | * Reactive commands related with Disque Queues. 30 | * 31 | * @param Key type. 32 | * @param Value type. 33 | * @author Mark Paluch 34 | */ 35 | public interface DisqueQueueReactiveCommands { 36 | 37 | /** 38 | * Remove the job from the queue. 39 | * 40 | * @param jobIds the job Id's 41 | * @return the number of jobs actually moved from queue to active state 42 | */ 43 | Observable dequeue(String... jobIds); 44 | 45 | /** 46 | * Queue jobs if not already queued. 47 | * 48 | * @param jobIds the job Id's 49 | * @return the number of jobs actually move from active to queued state 50 | */ 51 | Observable enqueue(String... jobIds); 52 | 53 | /** 54 | * Queue jobs if not already queued and increment the nack counter. 55 | * 56 | * @param jobIds the job Id's 57 | * @return the number of jobs actually move from active to queued state 58 | */ 59 | Observable nack(String... jobIds); 60 | 61 | /** 62 | * Change the {@literal PAUSE} pause state to: 63 | *
    64 | *
  • Pause a queue
  • 65 | *
  • Clear the pause state for a queue
  • 66 | *
  • Query the pause state
  • 67 | *
  • Broadcast the pause state
  • 68 | *
69 | * 70 | * @param queue the queue name 71 | * @param pauseArgs the pause args 72 | * @return pause state of the queue. 73 | */ 74 | Observable pause(K queue, PauseArgs pauseArgs); 75 | 76 | /** 77 | * Return the number of jobs queued. 78 | * 79 | * @param queue the queue name 80 | * @return the number of jobs queued 81 | */ 82 | Observable qlen(K queue); 83 | 84 | /** 85 | * Return an array of at most "count" jobs available inside the queue "queue" without removing the jobs from the queue. This 86 | * is basically an introspection and debugging command. 87 | * 88 | * @param queue the queue name 89 | * @param count number of jobs to return 90 | * @return List of jobs. 91 | */ 92 | Observable> qpeek(K queue, long count); 93 | 94 | /** 95 | * Incrementally iterate the keys space. 96 | * 97 | * @return KeyScanCursor<K> scan cursor. 98 | */ 99 | Observable> qscan(); 100 | 101 | /** 102 | * Incrementally iterate the keys space. 103 | * 104 | * @param scanArgs scan arguments 105 | * @return KeyScanCursor<K> scan cursor. 106 | */ 107 | Observable> qscan(QScanArgs scanArgs); 108 | 109 | /** 110 | * Incrementally iterate the keys space. 111 | * 112 | * @param scanCursor cursor to resume from a previous scan 113 | * @return KeyScanCursor<K> scan cursor. 114 | */ 115 | Observable> qscan(ScanCursor scanCursor); 116 | 117 | /** 118 | * Incrementally iterate the keys space. 119 | * 120 | * @param scanCursor cursor to resume from a previous scan 121 | * @param scanArgs scan arguments 122 | * @return KeyScanCursor<K> scan cursor. 123 | */ 124 | Observable> qscan(ScanCursor scanCursor, QScanArgs scanArgs); 125 | 126 | /** 127 | * Retrieve information about a queue as key value pairs. 128 | * 129 | * @param queue the queue name 130 | * @return map containing the statistics (key value pairs) 131 | */ 132 | Observable> qstat(K queue); 133 | 134 | /** 135 | * If the job is queued, remove it from queue and change state to active. Postpone the job requeue time in the future so 136 | * that we'll wait the retry time before enqueueing again. 137 | * 138 | * * Return how much time the worker likely have before the next requeue event or an error: 139 | *
    140 | *
  • -ACKED: The job is already acknowledged, so was processed already.
  • 141 | *
  • -NOJOB We don't know about this job. The job was either already acknowledged and purged, or this node never received 142 | * a copy.
  • 143 | *
  • -TOOLATE 50% of the job TTL already elapsed, is no longer possible to delay it.
  • 144 | *
145 | * 146 | * @param jobId the job Id 147 | * @return retry count. 148 | */ 149 | Observable working(String jobId); 150 | 151 | } 152 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/async/DisqueQueueAsyncCommands.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api.async; 17 | 18 | import java.util.List; 19 | import java.util.Map; 20 | 21 | import biz.paluch.spinach.api.Job; 22 | import biz.paluch.spinach.api.PauseArgs; 23 | import biz.paluch.spinach.api.QScanArgs; 24 | 25 | import com.lambdaworks.redis.KeyScanCursor; 26 | import com.lambdaworks.redis.RedisFuture; 27 | import com.lambdaworks.redis.ScanCursor; 28 | 29 | /** 30 | * Asynchronous executed commands related with Disque Queues. 31 | * 32 | * @param Key type. 33 | * @param Value type. 34 | * @author Mark Paluch 35 | */ 36 | public interface DisqueQueueAsyncCommands { 37 | 38 | /** 39 | * Remove the job from the queue. 40 | * 41 | * @param jobIds the job Id's 42 | * @return the number of jobs actually moved from queue to active state 43 | */ 44 | RedisFuture dequeue(String... jobIds); 45 | 46 | /** 47 | * Queue jobs if not already queued. 48 | * 49 | * @param jobIds the job Id's 50 | * @return the number of jobs actually move from active to queued state 51 | */ 52 | RedisFuture enqueue(String... jobIds); 53 | 54 | /** 55 | * Queue jobs if not already queued and increment the nack counter. 56 | * 57 | * @param jobIds the job Id's 58 | * @return the number of jobs actually move from active to queued state 59 | */ 60 | RedisFuture nack(String... jobIds); 61 | 62 | /** 63 | * Change the {@literal PAUSE} pause state to: 64 | *
    65 | *
  • Pause a queue
  • 66 | *
  • Clear the pause state for a queue
  • 67 | *
  • Query the pause state
  • 68 | *
  • Broadcast the pause state
  • 69 | *
70 | * 71 | * @param queue the queue name 72 | * @param pauseArgs the pause args 73 | * @return pause state of the queue. 74 | */ 75 | RedisFuture pause(K queue, PauseArgs pauseArgs); 76 | 77 | /** 78 | * Return the number of jobs queued. 79 | * 80 | * @param queue the queue name 81 | * @return the number of jobs queued 82 | */ 83 | RedisFuture qlen(K queue); 84 | 85 | /** 86 | * Return an array of at most "count" jobs available inside the queue "queue" without removing the jobs from the queue. This 87 | * is basically an introspection and debugging command. 88 | * 89 | * @param queue the queue name 90 | * @param count number of jobs to return 91 | * @return List of jobs. 92 | */ 93 | RedisFuture>> qpeek(K queue, long count); 94 | 95 | /** 96 | * Incrementally iterate the keys space. 97 | * 98 | * @return KeyScanCursor<K> scan cursor. 99 | */ 100 | RedisFuture> qscan(); 101 | 102 | /** 103 | * Incrementally iterate the keys space. 104 | * 105 | * @param scanArgs scan arguments 106 | * @return KeyScanCursor<K> scan cursor. 107 | */ 108 | RedisFuture> qscan(QScanArgs scanArgs); 109 | 110 | /** 111 | * Incrementally iterate the keys space. 112 | * 113 | * @param scanCursor cursor to resume from a previous scan 114 | * @return KeyScanCursor<K> scan cursor. 115 | */ 116 | RedisFuture> qscan(ScanCursor scanCursor); 117 | 118 | /** 119 | * Incrementally iterate the keys space. 120 | * 121 | * @param scanCursor cursor to resume from a previous scan 122 | * @param scanArgs scan arguments 123 | * @return KeyScanCursor<K> scan cursor. 124 | */ 125 | RedisFuture> qscan(ScanCursor scanCursor, QScanArgs scanArgs); 126 | 127 | /** 128 | * Retrieve information about a queue as key value pairs. 129 | * 130 | * @param queue the queue name 131 | * @return map containing the statistics (key value pairs) 132 | */ 133 | RedisFuture> qstat(K queue); 134 | 135 | /** 136 | * If the job is queued, remove it from queue and change state to active. Postpone the job requeue time in the future so 137 | * that we'll wait the retry time before enqueueing again. 138 | * 139 | * * Return how much time the worker likely have before the next requeue event or an error: 140 | *
    141 | *
  • -ACKED: The job is already acknowledged, so was processed already.
  • 142 | *
  • -NOJOB We don't know about this job. The job was either already acknowledged and purged, or this node never received 143 | * a copy.
  • 144 | *
  • -TOOLATE 50% of the job TTL already elapsed, is no longer possible to delay it.
  • 145 | *
146 | * 147 | * @param jobId the job Id 148 | * @return retry count. 149 | */ 150 | RedisFuture working(String jobId); 151 | } 152 | -------------------------------------------------------------------------------- /src/main/java/biz/paluch/spinach/api/JScanArgs.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package biz.paluch.spinach.api; 17 | 18 | import static com.lambdaworks.redis.protocol.CommandKeyword.COUNT; 19 | 20 | import java.util.HashSet; 21 | import java.util.Set; 22 | 23 | import com.lambdaworks.redis.protocol.CommandArgs; 24 | 25 | /** 26 | * Arguments for scanning queues/jobs. 27 | * 28 | * @param the queue id type. 29 | * 30 | * @author Mark Paluch 31 | */ 32 | public class JScanArgs { 33 | 34 | private Long count; 35 | private boolean busyloop; 36 | private K queue; 37 | private Set jobStates = new HashSet<>(); 38 | 39 | public Long getCount() { 40 | return count; 41 | } 42 | 43 | public void setCount(Long count) { 44 | this.count = count; 45 | } 46 | 47 | public boolean isBusyloop() { 48 | return busyloop; 49 | } 50 | 51 | public void setBusyloop(boolean busyloop) { 52 | this.busyloop = busyloop; 53 | } 54 | 55 | public K getQueue() { 56 | return queue; 57 | } 58 | 59 | public void setQueue(K queue) { 60 | this.queue = queue; 61 | } 62 | 63 | public Set getJobStates() { 64 | return jobStates; 65 | } 66 | 67 | public void setJobStates(Set jobStates) { 68 | this.jobStates = jobStates; 69 | } 70 | 71 | public static Builder builder() { 72 | return new Builder(); 73 | } 74 | 75 | /** 76 | * Static builder methods. 77 | */ 78 | public static class Builder { 79 | 80 | private Long count; 81 | private boolean busyloop; 82 | private K queue; 83 | private Set jobStates = new HashSet<>(); 84 | 85 | /** 86 | * Utility constructor. 87 | */ 88 | private Builder() 89 | { 90 | 91 | } 92 | 93 | /** 94 | * Limit result to {@code count} items. 95 | * 96 | * @param count number of items 97 | * @return the current builder 98 | */ 99 | public Builder count(long count) { 100 | this.count = count; 101 | return this; 102 | } 103 | 104 | /** 105 | * Enable blocking loop mode. 106 | * 107 | * @return the current builder 108 | */ 109 | public Builder busyloop() { 110 | return busyloop(true); 111 | } 112 | 113 | /** 114 | * Enable/disable blocking loop mode 115 | * 116 | * @param busyloop {@literal true} or {@literal false} 117 | * @return the current builder 118 | */ 119 | public Builder busyloop(boolean busyloop) { 120 | this.busyloop = busyloop; 121 | return this; 122 | } 123 | 124 | /** 125 | * Scan a specific queue 126 | * 127 | * @param queue the queue name 128 | * @return the current builder 129 | */ 130 | public Builder queue(K queue) { 131 | this.queue = queue; 132 | return this; 133 | } 134 | 135 | /** 136 | * Limit to specific {@link biz.paluch.spinach.api.JScanArgs.JobState}'s. 137 | * 138 | * @param jobState Array of job states. Duplicate states are omitted. 139 | * @return the current builder 140 | */ 141 | public Builder jobstates(JobState... jobState) { 142 | for (JobState state : jobState) { 143 | this.jobStates.add(state); 144 | } 145 | return this; 146 | } 147 | 148 | /** 149 | * Build an instance of {@link JScanArgs} 150 | * 151 | * @return a new instance of {@link JScanArgs} 152 | */ 153 | public JScanArgs build() { 154 | 155 | JScanArgs result = new JScanArgs(); 156 | result.setBusyloop(busyloop); 157 | result.setCount(count); 158 | result.getJobStates().addAll(jobStates); 159 | result.setQueue(queue); 160 | 161 | return result; 162 | 163 | } 164 | } 165 | 166 | public void build(CommandArgs args) { 167 | 168 | // JSCAN [] [COUNT ] [BUSYLOOP] [QUEUE ] [STATE STATE ... STATE ] [REPLY 169 | // all|id] 170 | 171 | if (count != null) { 172 | args.add(COUNT).add(count); 173 | } 174 | 175 | if (busyloop) { 176 | args.add(CommandKeyword.BUSYLOOP); 177 | } 178 | 179 | if (queue != null) { 180 | args.add(CommandKeyword.QUEUE).addKey((K) queue); 181 | } 182 | 183 | for (JobState jobState : jobStates) { 184 | args.add(CommandKeyword.STATE).add(jobState.id); 185 | } 186 | 187 | } 188 | 189 | public enum JobState { 190 | /** 191 | * Waiting to be replicated enough times. 192 | */ 193 | WAIT_REPL("wait-repl"), 194 | 195 | /** 196 | * Not acked, not queued, still active job. 197 | */ 198 | ACTIVE("active"), 199 | 200 | /** 201 | * Not acked, but queued in this node. 202 | */ 203 | QUEUED("queued"), 204 | 205 | /** 206 | * Acked, no longer active, to garbage collect. 207 | */ 208 | ACKED("acked"); 209 | 210 | private final String id; 211 | 212 | JobState(String id) { 213 | this.id = id; 214 | } 215 | } 216 | 217 | } 218 | --------------------------------------------------------------------------------