├── RedisCluster
├── README.md
├── target
│ ├── RedisCluster.jar
│ └── lib
│ │ ├── junit-4.4.jar
│ │ ├── mail-1.4.5.jar
│ │ ├── guava-11.0.1.jar
│ │ ├── jedis-2.1.0.jar
│ │ ├── jline-0.9.94.jar
│ │ ├── jsr305-1.3.9.jar
│ │ ├── log4j-1.2.16.jar
│ │ ├── ezmorph-1.0.6.jar
│ │ ├── slf4j-api-1.5.8.jar
│ │ ├── zookeeper-3.3.6.jar
│ │ ├── activation-1.1.1.jar
│ │ ├── commons-codec-1.4.jar
│ │ ├── commons-dbcp-1.4.jar
│ │ ├── commons-lang-2.6.jar
│ │ ├── commons-pool-1.6.jar
│ │ ├── commons-digester-1.8.jar
│ │ ├── commons-email-1.3.1.jar
│ │ ├── curator-client-1.0.9.jar
│ │ ├── hadoop-common-0.21.0.jar
│ │ ├── json-lib-2.4-jdk15.jar
│ │ ├── slf4j-log4j12-1.5.8.jar
│ │ ├── commons-httpclient-3.1.jar
│ │ ├── commons-logging-1.0.4.jar
│ │ ├── commons-beanutils-1.8.0.jar
│ │ ├── commons-collections-3.2.1.jar
│ │ ├── commons-configuration-1.6.jar
│ │ ├── curator-framework-1.0.9.jar
│ │ └── commons-beanutils-core-1.8.0.jar
├── src
│ ├── main
│ │ ├── java
│ │ │ └── com
│ │ │ │ └── rr
│ │ │ │ └── redis
│ │ │ │ ├── client
│ │ │ │ ├── hash
│ │ │ │ │ ├── IHashFunc.java
│ │ │ │ │ └── SimpleHashing.java
│ │ │ │ ├── exception
│ │ │ │ │ └── ClusterOpException.java
│ │ │ │ ├── RedisClusterPool.java
│ │ │ │ ├── model
│ │ │ │ │ └── Node.java
│ │ │ │ ├── zookeeper
│ │ │ │ │ ├── ZKClient.java
│ │ │ │ │ └── ZookeeperService.java
│ │ │ │ ├── RedisClusterPoolProvider.java
│ │ │ │ └── RedisClusterPoolClient.java
│ │ │ │ └── simple
│ │ │ │ ├── RedisUtil.java
│ │ │ │ └── RedisAPI.java
│ │ └── resources
│ │ │ └── log4j.xml
│ └── test
│ │ └── java
│ │ └── com
│ │ └── rr
│ │ └── redis
│ │ └── test
│ │ ├── ZKTest.java
│ │ └── TestRedisService.java
└── pom.xml
├── README.md
├── simple
├── redis.properties
├── README.md
├── applicationContext-core.xml
└── RedisService.java
├── .gitignore
├── lock
└── RedisTool.java
└── install
├── README.md
└── redis.conf
/RedisCluster/README.md:
--------------------------------------------------------------------------------
1 | # redis
2 |
3 | RedisCluster 基于zookeeper的 分布式redis服务
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # redis
2 |
3 | RedisCluster 基于zookeeper的 分布式redis服务
4 |
5 | lock 基于Redis的分布式锁
6 |
--------------------------------------------------------------------------------
/RedisCluster/target/RedisCluster.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/RedisCluster.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/junit-4.4.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/junit-4.4.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/mail-1.4.5.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/mail-1.4.5.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/guava-11.0.1.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/guava-11.0.1.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/jedis-2.1.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/jedis-2.1.0.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/jline-0.9.94.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/jline-0.9.94.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/jsr305-1.3.9.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/jsr305-1.3.9.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/log4j-1.2.16.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/log4j-1.2.16.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/ezmorph-1.0.6.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/ezmorph-1.0.6.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/slf4j-api-1.5.8.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/slf4j-api-1.5.8.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/zookeeper-3.3.6.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/zookeeper-3.3.6.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/activation-1.1.1.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/activation-1.1.1.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/commons-codec-1.4.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/commons-codec-1.4.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/commons-dbcp-1.4.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/commons-dbcp-1.4.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/commons-lang-2.6.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/commons-lang-2.6.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/commons-pool-1.6.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/commons-pool-1.6.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/commons-digester-1.8.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/commons-digester-1.8.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/commons-email-1.3.1.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/commons-email-1.3.1.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/curator-client-1.0.9.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/curator-client-1.0.9.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/hadoop-common-0.21.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/hadoop-common-0.21.0.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/json-lib-2.4-jdk15.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/json-lib-2.4-jdk15.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/slf4j-log4j12-1.5.8.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/slf4j-log4j12-1.5.8.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/commons-httpclient-3.1.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/commons-httpclient-3.1.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/commons-logging-1.0.4.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/commons-logging-1.0.4.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/commons-beanutils-1.8.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/commons-beanutils-1.8.0.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/commons-collections-3.2.1.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/commons-collections-3.2.1.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/commons-configuration-1.6.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/commons-configuration-1.6.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/curator-framework-1.0.9.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/curator-framework-1.0.9.jar
--------------------------------------------------------------------------------
/RedisCluster/target/lib/commons-beanutils-core-1.8.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kongzhidea/redis/HEAD/RedisCluster/target/lib/commons-beanutils-core-1.8.0.jar
--------------------------------------------------------------------------------
/simple/redis.properties:
--------------------------------------------------------------------------------
1 | #redis
2 | redis.host=
3 | redis.port=
4 | redis.password=
5 | redis.jPoolCfgMaxActive=1000
6 | redis.jPoolCfgMaxIdle=100
7 | redis.jPoolCfgMaxWait=1000
8 |
--------------------------------------------------------------------------------
/RedisCluster/src/main/java/com/rr/redis/client/hash/IHashFunc.java:
--------------------------------------------------------------------------------
1 | package com.rr.redis.client.hash;
2 |
3 | /**
4 | * Interface for hash function.
5 | *
6 | */
7 | public interface IHashFunc {
8 | public int hash(String key);
9 |
10 | public int hash(byte[] key);
11 |
12 | public int getHashSlotNum();
13 | }
14 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.class
2 | # Package Files #
3 | *.war
4 | *.ear
5 | #eclipse
6 | .settings
7 | .classpath
8 | .project
9 | #target
10 | classes
11 |
12 | # idea
13 | *.iml
14 | *.eml
15 | *.imd
16 | *.ipr
17 | *.iws
18 | #idea
19 | .iml
20 | .im
21 | .idea
22 | run_task
23 | .DS_Store
24 | .tomcatplugin
25 | .settings
26 | META-INF
27 | .mymetadata
28 | .externalToolBuilders
29 | .mymetadata
30 | out
31 | logs
32 | classes
33 |
34 | *.war
35 | maven-archiver
36 | generated-sources
37 |
--------------------------------------------------------------------------------
/RedisCluster/src/main/java/com/rr/redis/client/exception/ClusterOpException.java:
--------------------------------------------------------------------------------
1 | package com.rr.redis.client.exception;
2 |
3 | public class ClusterOpException extends RuntimeException {
4 | private static final long serialVersionUID = 1L;
5 |
6 | public ClusterOpException(String msg, Throwable e) {
7 | super(msg, e);
8 | }
9 |
10 | public ClusterOpException(String msg) {
11 | super(msg);
12 | }
13 |
14 | public ClusterOpException(Throwable e) {
15 | super(e);
16 | }
17 | }
--------------------------------------------------------------------------------
/RedisCluster/src/test/java/com/rr/redis/test/ZKTest.java:
--------------------------------------------------------------------------------
1 | package com.rr.redis.test;
2 |
3 | import com.rr.redis.client.zookeeper.ZookeeperService;
4 |
5 | public class ZKTest {
6 | public static void main(String[] args) {
7 | String clusterName = "pubilc.remote";
8 | String zkHost = "10.4.28.172:2181,10.4.28.179:2181";
9 |
10 | // 更新服务的时候,先把partition下面的节点更换,然后再更新clusterName的值为改节点对应的partition
11 | //更新的时候 如果当前有请求过来,则会抛出异常,请注意!!!
12 | int zkTimeout = 2000;
13 | ZookeeperService zookeeper = new ZookeeperService(zkHost, clusterName,
14 | zkTimeout);
15 | try {
16 | zookeeper.updateState(0);
17 | } catch (Exception e) {
18 | e.printStackTrace();
19 | }
20 | System.exit(0);
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/simple/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ### 依赖版本
4 | ```
5 |
6 | redis.clients
7 | jedis
8 | 2.6.2
9 |
10 |
11 | 2.1.0 采用的连接池为 common-pool
12 |
13 | 2.6.2 采用的连接池为 common-pool2
14 | ```
15 |
16 | #### 2.6.2 版本
17 | * maxTotal:链接池中最大连接数,默认值8。
18 | * commons-pool1 中maxActive
19 | * maxIdle:连接池中最大空闲的连接数,默认为8
20 | * minIdle: 连接池中最少空闲的连接数,默认为0
21 | * maxWaitMillis:当连接池资源耗尽时,调用者最大阻塞的时间,超时将跑出异常。单位,毫秒数;默认为-1.表示永不超时. 默认值 -1。
22 | * maxWait:commons-pool1中
23 | * testOnBorrow:向调用者输出“链接”资源时,是否检测是有有效,如果无效则从连接池中移除,并尝试获取继续获取。默认为false。建议保持默认值.
24 | * testOnReturn:默认值false
25 | * testWhileIdle:向调用者输出“链接”对象时,是否检测它的空闲超时;默认为false。如果“链接”空闲超时,将会被移除;建议保持默认值。默认值false
26 | whenExhaustedAction: 当“连接池”中active数量达到阀值时,即“链接”资源耗尽时,连接池需要采取的手段, 默认为1:
27 | * 0:抛出异常
28 | * 1:阻塞,直到有可用链接资源
29 | * 2:强制创建新的链接资源
30 |
31 |
--------------------------------------------------------------------------------
/RedisCluster/src/main/java/com/rr/redis/client/hash/SimpleHashing.java:
--------------------------------------------------------------------------------
1 | package com.rr.redis.client.hash;
2 |
3 | import java.util.Arrays;
4 |
5 | /**
6 | * Simplest hash function. Use the hash in java itself.
7 | *
8 | */
9 | public class SimpleHashing implements IHashFunc {
10 | private final int hashSlotNum;
11 |
12 | public SimpleHashing(int hashNum) {
13 | this.hashSlotNum = hashNum;
14 | }
15 |
16 | /*
17 | * (non-Javadoc)
18 | *
19 | * @see com.renren.ad.jedis.util.Hashing#hash(java.lang.String)
20 | */
21 | @Override
22 | public int hash(String key) {
23 | return Math.abs(key.hashCode() % hashSlotNum);
24 | }
25 |
26 | /*
27 | * (non-Javadoc)
28 | *
29 | * @see com.renren.ad.jedis.util.Hashing#hash(byte[])
30 | */
31 | @Override
32 | public int hash(byte[] key) {
33 | return Math.abs(Arrays.hashCode(key) % hashSlotNum);
34 | }
35 |
36 | public int getHashSlotNum() {
37 | return hashSlotNum;
38 | }
39 |
40 | }
41 |
--------------------------------------------------------------------------------
/RedisCluster/src/main/resources/log4j.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/RedisCluster/src/main/java/com/rr/redis/client/RedisClusterPool.java:
--------------------------------------------------------------------------------
1 | package com.rr.redis.client;
2 |
3 | import redis.clients.jedis.Jedis;
4 | import redis.clients.jedis.JedisPool;
5 | import redis.clients.jedis.JedisPoolConfig;
6 | import redis.clients.jedis.exceptions.JedisException;
7 |
8 | /**
9 | *
10 | * JedisPool
11 | *
12 | *
13 | */
14 | public class RedisClusterPool {
15 |
16 | private JedisPool pool;
17 |
18 | public RedisClusterPool(JedisPoolConfig jPoolCfg, String Addr, int port,
19 | int timeout) {
20 | this.pool = new JedisPool(jPoolCfg, Addr, port, timeout);
21 | }
22 |
23 | public void destroy() throws JedisException {
24 | this.pool.destroy();
25 | }
26 |
27 | public Jedis getResource() throws JedisException {
28 | Jedis j = this.pool.getResource();
29 | if (j != null) {
30 | return j;
31 | } else {
32 | throw new JedisException("Null getting Jedis Resource from Pool.");
33 | }
34 | }
35 |
36 | public void returnResource(Jedis jedis) throws JedisException {
37 | this.pool.returnResource(jedis);
38 | }
39 |
40 | public void returnBrokenResource(Jedis jedis) throws JedisException {
41 | this.pool.returnBrokenResource(jedis);
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/simple/applicationContext-core.xml:
--------------------------------------------------------------------------------
1 |
2 |
9 |
10 |
12 |
13 |
14 | classpath*:*.properties
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/RedisCluster/src/main/java/com/rr/redis/client/model/Node.java:
--------------------------------------------------------------------------------
1 | package com.rr.redis.client.model;
2 |
3 | import java.util.ArrayList;
4 | import java.util.List;
5 |
6 | import org.apache.commons.lang.StringUtils;
7 |
8 | public class Node {
9 | private String host;
10 | private int port;
11 |
12 | public Node() {
13 | }
14 |
15 | public Node(String host, int port) {
16 | this.host = host;
17 | this.port = port;
18 | }
19 |
20 | public String getHost() {
21 | return host;
22 | }
23 |
24 | public int getPort() {
25 | return port;
26 | }
27 |
28 | @Override
29 | public boolean equals(Object o) {
30 | if (o == null) {
31 | return false;
32 | }
33 | if (!(o instanceof Node)) {
34 | return false;
35 | }
36 | Node otherNode = (Node) o;
37 | if (StringUtils.equals(host, otherNode.getHost())
38 | && port == otherNode.getPort()) {
39 | return true;
40 | }
41 | return false;
42 | }
43 |
44 | public int hashCode() {
45 | return (host + ":" + port).hashCode();
46 | }
47 |
48 | public String toString() {
49 | return host + ":" + port;
50 | }
51 |
52 | public String getIdentity() {
53 | return host + ":" + port;
54 | }
55 |
56 | public static Node getNodeFromIdentity(String identity) {
57 | try {
58 | String[] conts = StringUtils.split(identity, ":");
59 | String host = conts[0];
60 | String port = conts[1];
61 | Node node = new Node(host, Integer.valueOf(port));
62 | return node;
63 | } catch (Exception e) {
64 | e.printStackTrace();
65 | return null;
66 | }
67 | }
68 |
69 | }
--------------------------------------------------------------------------------
/lock/RedisTool.java:
--------------------------------------------------------------------------------
1 | package com.kk.redis.lock;
2 |
3 | import redis.clients.jedis.Jedis;
4 |
5 | import java.util.Collections;
6 |
7 | /**
8 | * http://mp.weixin.qq.com/s/qJK61ew0kCExvXrqb7-RSg
9 | */
10 | public class RedisTool {
11 |
12 | private static final Long RELEASE_SUCCESS = 1L;
13 |
14 |
15 | private static final String LOCK_SUCCESS = "OK";
16 | private static final String SET_IF_NOT_EXIST = "NX";
17 | private static final String SET_WITH_EXPIRE_TIME = "PX";
18 |
19 | /**
20 | * 尝试获取分布式锁
21 | *
22 | * @param jedis Redis客户端
23 | * @param lockKey 锁
24 | * @param requestId 请求标识
25 | * @param expireTime 超期时间,PX 单位为毫秒。
26 | * @return 是否获取成功
27 | */
28 | public static boolean tryGetDistributedLock(Jedis jedis, String lockKey, String requestId, int expireTime) {
29 |
30 | String result = jedis.set(lockKey, requestId, SET_IF_NOT_EXIST, SET_WITH_EXPIRE_TIME, expireTime);
31 |
32 | if (LOCK_SUCCESS.equals(result)) {
33 | return true;
34 | }
35 | return false;
36 |
37 | }
38 |
39 |
40 | /**
41 | * 释放分布式锁
42 | *
43 | * @param jedis Redis客户端
44 | * @param lockKey 锁
45 | * @param requestId 请求标识
46 | * @return 是否释放成功
47 | */
48 | public static boolean releaseDistributedLock(Jedis jedis, String lockKey, String requestId) {
49 |
50 | String script = "if redis.call('get', KEYS[1]) == ARGV[1] then return redis.call('del', KEYS[1]) else return 0 end";
51 | Object result = jedis.eval(script, Collections.singletonList(lockKey), Collections.singletonList(requestId));
52 |
53 | if (RELEASE_SUCCESS.equals(result)) {
54 | return true;
55 | }
56 | return false;
57 |
58 | }
59 |
60 | }
--------------------------------------------------------------------------------
/RedisCluster/src/main/java/com/rr/redis/client/zookeeper/ZKClient.java:
--------------------------------------------------------------------------------
1 | package com.rr.redis.client.zookeeper;
2 |
3 | import java.io.IOException;
4 |
5 | import org.apache.commons.logging.Log;
6 | import org.apache.commons.logging.LogFactory;
7 |
8 | import com.netflix.curator.framework.CuratorFramework;
9 | import com.netflix.curator.framework.CuratorFrameworkFactory;
10 | import com.netflix.curator.retry.RetryNTimes;
11 |
12 | public class ZKClient {
13 | public static Log logger = LogFactory.getLog(ZKClient.class);
14 | private CuratorFramework client;
15 |
16 | // private static String zkHosts = "10.4.28.172:2181,10.4.28.179:2181";
17 |
18 | // 命名空间 zkClient下所有的data都在该地址下 zk中使用 '/' 可以指定目录结构
19 | private String namespace = "redis";
20 | private int zkTime = 5 * 60 * 1000;
21 |
22 | // public ZKClient(String zkHosts) {
23 | // init(zkHosts, namespace, zkTime);
24 | // }
25 |
26 | public ZKClient(String zkHosts, int zkTime) {
27 | init(zkHosts, namespace, zkTime);
28 | }
29 |
30 | // public ZKClient(String zkHosts, String namespace) {
31 | // init(zkHosts, namespace, zkTime);
32 | // }
33 | //
34 | // public ZKClient(String zkHosts, String namespace, int time) {
35 | // init(zkHosts, namespace, time);
36 | // }
37 |
38 | public void init(String zkHosts, String namespace, int time) {
39 | try {
40 | logger.info("zkHosts: " + zkHosts);
41 | client = CuratorFrameworkFactory.builder().connectString(zkHosts)
42 | .namespace(namespace)
43 | .retryPolicy(new RetryNTimes(Integer.MAX_VALUE, time))
44 | .connectionTimeoutMs(5000).build();
45 | } catch (IOException e) {
46 | logger.error("get client error", e);
47 | }
48 | client.start();
49 | }
50 |
51 | public CuratorFramework getClient() {
52 | return client;
53 | }
54 |
55 | public void destroy() throws Exception {
56 | client.close();
57 | }
58 |
59 | }
60 |
--------------------------------------------------------------------------------
/install/README.md:
--------------------------------------------------------------------------------
1 | ## [redis官方网站](http://redis.cn/)
2 |
3 | ### redis客户端
4 | * fastonosql
5 | * phpRedisAdmin
6 | * redis-cli 推荐
7 |
8 | ### [redis Java客户端使用](https://github.com/kongzhidea/redis)
9 |
10 | ### linux下redis安装部署
11 | ```
12 |
13 |
14 | 1、安装
15 | tar -zxvf redis-2.8.7.tar.gz
16 | cd redis-2.8.7
17 | make
18 |
19 | 2、调整内存
20 | 如果内存情况比较紧张的话,需要设定内核参数:
21 | 该配置文件依据linux系统不同而不同,参考启动时候的提示
22 | echo 1 > /proc/sys/vm/overcommit_memory
23 |
24 | 这里说一下这个配置的含义:
25 | /proc/sys/vm/overcommit_memory
26 | 该文件指定了内核针对内存分配的策略,其值可以是0、1、2。
27 | 0,表示内核将检查是否有足够的可用内存供应用进程使用;如果有足够的可用内存,内存申请允许;否则,内存申请失败,并把错误返回给应用进程。
28 | 1,表示内核允许分配所有的物理内存,而不管当前的内存状态如何。
29 | 2,表示内核允许分配超过所有物理内存和交换空间总和的内存
30 |
31 | 3.redis.conf 配置文件
32 | 参考redis.conf,主要配置项
33 | dir 数据存储目录
34 | pidfile 运行时进程id文件,默认为 /var/run/redis.pid
35 | port 监听端口,默认为6379
36 | daemonize 是否以后台进程运行,默认为no
37 | requirepass 是否需要密码,如果不需要密码则注释此行
38 | maxmemory 申请最大内存,如:maxmemory 4g
39 |
40 | timeout 超时时间,默认为300(秒)
41 | loglevel 日志记录等级,有4个可选值,debug,verbose(默认值),notice,warning
42 | logfile 日志记录方式,默认值为stdout
43 | databases 可用数据库数,默认值为16,默认数据库为0
44 | save 指出在多长时间内,有多少次更新操作,就将数据同步到数据文件。
45 | save可以多个条件配合,比如默认配置文件中的设置,就设置了三个条件。
46 | save 900 1 900秒(15分钟)内至少有1个key被改变
47 | save 300 10 300秒(5分钟)内至少有300个key被改变
48 | save 60 10000 60秒内至少有10000个key被改变
49 | rdbcompression 存储至本地数据库时是否压缩数据,默认为yes
50 | dbfilename 本地数据库文件名,默认值为dump.rdb
51 |
52 | 4、启动服务
53 | redis-2.8.7/src/redis-server
54 | redis-server redis.conf #启动服务
55 |
56 | 启动脚本: 端口,数据存储路径(要预先创建好),在配置文件中配置,
57 | nohup redis-server /data/redis/redis-3.0.5/conf/redis.conf > log 2>&1 &
58 |
59 | killall -9 redis-server #关闭服务
60 | redis-2.8.7/src/redis-cli shutdown #关闭服务
61 | redis-cli -p 6380 shutdown #指定端口 关闭服务
62 |
63 | redis-cli (-a password) #连接客户端
64 |
65 | redis-cli -h localhost -p 6379 #连接客户端 指定ip和端口
66 |
67 | redis-cli -h localhost -p 6379 -a password #连接客户端 指定ip和端口,指定密码,
68 | 与requirepass配合使用,也可以在redis-cli中使用 auth命令,如 auth password。
69 |
70 |
71 | 后台启动命令:
72 | nohup redis-server /data/redis/redis-3.0.5/conf/redis.conf > log 2>&1 &
73 | ```
74 |
75 | ### 批量删除key
76 | * redis-cli keys "keyword*" | xargs redis-cli del
77 |
78 |
--------------------------------------------------------------------------------
/RedisCluster/src/test/java/com/rr/redis/test/TestRedisService.java:
--------------------------------------------------------------------------------
1 | package com.rr.redis.test;
2 |
3 | import java.util.ArrayList;
4 | import java.util.HashMap;
5 | import java.util.List;
6 | import java.util.Map;
7 |
8 | import com.rr.redis.client.RedisClusterPoolClient;
9 |
10 | public class TestRedisService {
11 | public static String zkHost = "10.4.28.172:2181,10.4.28.179:2181";
12 | public static String clusterName = "pubilc.remote";
13 |
14 | public static void main(String[] args) throws Exception {
15 | RedisClusterPoolClient client = new RedisClusterPoolClient(clusterName,
16 | zkHost);
17 | client.init();
18 |
19 | String[] keys = new String[20];
20 | List ks = new ArrayList();
21 | byte[][] keys2 = new byte[20][];
22 | List ks2 = new ArrayList();
23 | Map mat = new HashMap();
24 | Map mat2 = new HashMap();
25 | while (true) {
26 | for (int i = 0; i < 20; i++) {
27 | try {
28 |
29 | String str = "test_" + i;
30 | // System.out.println("hash:" + str + "=" + getHash(str));
31 | // client.set(str, "" + i);
32 | // mat.put(str, "" + i);
33 | // client.get(str);
34 | // client.set(str.getBytes(), ("" + i).getBytes());
35 | // mat2.put(str.getBytes(), ("" + i).getBytes());
36 | // System.out.println(client.type(str));
37 | keys[i] = str;
38 | ks.add(str);
39 | keys2[i] = str.getBytes();
40 | ks2.add(str.getBytes());
41 |
42 | } catch (Exception e) {
43 | e.printStackTrace();
44 | }
45 | }
46 |
47 | try {
48 | System.out.println(client.mget(keys));
49 | } catch (Exception e) {
50 | e.printStackTrace();
51 | }
52 | Thread.sleep(300);
53 | }
54 | // client.del(keys);
55 | // client.del(ks);
56 | // client.del(keys2);
57 | // System.out.println(client.mget(keys));
58 | // System.out.println(new String(client.mget(keys2).get(16)));
59 | // client.delBinary(ks2);
60 |
61 | // client.flushAll();
62 |
63 | // client.mset(mat);
64 | // client.msetnx(mat);
65 |
66 | // client.msetBinary(mat2);
67 |
68 | // System.exit(0);
69 | }
70 |
71 | private static int getHash(String key) {
72 | return Math.abs(key.hashCode() % 4);
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/RedisCluster/src/main/java/com/rr/redis/client/zookeeper/ZookeeperService.java:
--------------------------------------------------------------------------------
1 | package com.rr.redis.client.zookeeper;
2 |
3 | import java.io.UnsupportedEncodingException;
4 | import java.util.ArrayList;
5 | import java.util.List;
6 |
7 | import org.apache.commons.logging.Log;
8 | import org.apache.commons.logging.LogFactory;
9 | import org.apache.zookeeper.WatchedEvent;
10 | import org.apache.zookeeper.Watcher;
11 | import org.apache.zookeeper.data.Stat;
12 |
13 | import com.netflix.curator.framework.CuratorFramework;
14 | import com.rr.redis.client.RedisClusterPoolProvider;
15 |
16 | /**
17 | * create(): 发起一个create操作. 可以组合其他方法 (比如mode 或background) 最后以forPath()方法结尾
18 | *
19 | *
20 | * delete(): 发起一个删除操作. 可以组合其他方法(version 或background) 最后以forPath()方法结尾
21 | *
22 | *
23 | * checkExists(): 发起一个检查ZNode 是否存在的操作. 可以组合其他方法(watch 或background)
24 | * 最后以forPath()方法结尾
25 | *
26 | *
27 | * getData(): 发起一个获取ZNode数据的操作. 可以组合其他方法(watch, background 或get stat)
28 | * 最后以forPath()方法结尾
29 | *
30 | *
31 | * setData(): 发起一个设置ZNode数据的操作. 可以组合其他方法(version 或background) 最后以forPath()方法结尾
32 | *
33 | *
34 | * getChildren(): 发起一个获取ZNode子节点的操作. 可以组合其他方法(watch, background 或get stat)
35 | * 最后以forPath()方法结尾
36 | *
37 | * @author Administrator
38 | *
39 | */
40 | public class ZookeeperService {
41 | private static final Log logger = LogFactory.getLog(ZookeeperService.class);
42 |
43 | // 监听的地址的值变化情况
44 | public String WATCH_PATH = "";
45 | private String clusterName;
46 |
47 | private CuratorFramework client;
48 | private ZKClient zkClient;
49 | private RedisClusterPoolProvider provider;
50 |
51 | public ZookeeperService(String zkHosts, String clusterName, int zkTime) {
52 | WATCH_PATH = clusterName;
53 | this.clusterName = clusterName;
54 | zkClient = new ZKClient(zkHosts, zkTime);
55 | client = zkClient.getClient();
56 | }
57 |
58 | public void setProvider(RedisClusterPoolProvider provider) {
59 | this.provider = provider;
60 | }
61 |
62 | /**
63 | * 监控WATCH_PATH,若该值在zk上有变化,则通知所有监听该值的warcher
64 | */
65 | public void regWatcherOnLineRserver() {
66 | logger.info("[zk watcher] register Watcher " + WATCH_PATH);
67 | try {
68 | client.getData().usingWatcher(new Watcher() {
69 | @Override
70 | public void process(WatchedEvent event) {
71 | logger.info("recieved zk change " + event.getPath() + " "
72 | + event.getType().name());
73 | if (event.getType() == Event.EventType.NodeDataChanged) {
74 | try {
75 | byte[] b = client.getData().forPath(WATCH_PATH);
76 |
77 | String evt = new String(b, "utf-8");
78 | logger.info("update remote service config " + evt);
79 |
80 | updateClusterPool(Integer.valueOf(evt));
81 | } catch (Exception e) {
82 | logger.error(e.getMessage(), e);
83 | }
84 | }
85 | regWatcherOnLineRserver();
86 | }
87 |
88 | /**
89 | * 更新连接池
90 | *
91 | * 更新服务的时候,先把partition下面的节点更换,
92 | * 然后再更新clusterName的值为改节点对应的partition
93 | *
94 | *
95 | * 如果当前有请求过来,则会抛出异常,请注意!!!
96 | *
97 | * @param partition
98 | */
99 | private void updateClusterPool(Integer partition) {
100 | if (provider == null) {
101 | logger.error("Usage: provider is null!");
102 | return;
103 | }
104 | synchronized (provider) {
105 | try {
106 | provider.destroyClusterPool(partition);
107 | String partPath = clusterName + "/" + partition;
108 | List nodes = getNodeList(partPath);
109 | // 建立连接池
110 | provider.createClusterPool(partition, nodes.get(0));
111 | } catch (Exception e) {
112 | logger.error(e.getMessage(), e);
113 | }
114 | }
115 | }
116 | }).forPath(WATCH_PATH);
117 | } catch (Exception e) {
118 | logger.error("zk watcher register error!" + e.getMessage(), e);
119 | }
120 | }
121 |
122 | /**
123 | * 得到该节点下的所有子节点
124 | *
125 | * @return
126 | */
127 | public List getNodeList(String path) {
128 | List ret = new ArrayList();
129 | try {
130 | // 得到该节点下的所有子节点
131 | ret = client.getChildren().forPath(path);
132 | } catch (Exception e) {
133 | logger.error(
134 | "get node list error! " + path + ".." + e.getMessage(), e);
135 | }
136 |
137 | return ret;
138 | }
139 |
140 | /**
141 | * 得到某节点的状态,不存在则返回null
142 | *
143 | * @param gid
144 | * @return
145 | * @throws Exception
146 | */
147 | public Stat getStat(String path, String identify) throws Exception {
148 | // 得到某节点的状态,不存在则返回null
149 | return client.checkExists().forPath(path + "/" + identify);
150 | }
151 |
152 | /**
153 | * 服务更新时请调用此方法
154 | *
155 | * @throws UnsupportedEncodingException
156 | * @throws Exception
157 | */
158 | public void updateState(int evt) throws UnsupportedEncodingException,
159 | Exception {
160 | // 更新在zk上该地址的值
161 | client.setData().forPath(WATCH_PATH,
162 | String.valueOf(evt).getBytes("utf-8"));
163 | }
164 |
165 | }
166 |
--------------------------------------------------------------------------------
/RedisCluster/src/main/java/com/rr/redis/simple/RedisUtil.java:
--------------------------------------------------------------------------------
1 | package com.rr.redis.simple;
2 |
3 | import java.util.List;
4 | import java.util.Set;
5 |
6 | import redis.clients.jedis.Jedis;
7 |
8 | /**
9 | * 没有使用连接池 不推荐使用
10 | *
11 | * @author kk
12 | *
13 | */
14 | public class RedisUtil {
15 | private static RedisUtil _instance = null;
16 | private static Jedis jedis;
17 | private static final String host = "10.4.28.172";
18 | private static final int port = 6379;
19 |
20 | private RedisUtil() {
21 | jedis = new Jedis(host, port);
22 | }
23 |
24 | public static RedisUtil getInstance() {
25 | if (_instance == null) {
26 | synchronized (RedisUtil.class) {
27 | if (_instance == null) {
28 | _instance = new RedisUtil();
29 | }
30 | }
31 | }
32 | return _instance;
33 | }
34 |
35 | public String get(String key) {
36 | return jedis.get(key);
37 | }
38 |
39 | /********************** 字符串 ****************/
40 | public void set(String key, int value) {
41 | jedis.set(key, String.valueOf(value));
42 | }
43 |
44 | public void set(String key, String value) {
45 | jedis.set(key, value);
46 | }
47 |
48 | public void delete(String... keys) {
49 | jedis.del(keys);
50 | }
51 |
52 | public void setex(String key, int seconds, String value) {
53 | jedis.setex(key, seconds, value);
54 | }
55 |
56 | public long ttl(String key) {
57 | return jedis.ttl(key);
58 | }
59 |
60 | /********************* 列表 ****************/
61 | public void lpush(String key, String value) {
62 | jedis.lpush(key, value);
63 | }
64 |
65 | public void lpush(String key, String... value) {
66 | jedis.lpush(key, value);
67 | }
68 |
69 | public void rpush(String key, String value) {
70 | jedis.rpush(key, value);
71 | }
72 |
73 | public void rpush(String key, String... value) {
74 | jedis.rpush(key, value);
75 | }
76 |
77 | public void lpop(String key) {
78 | jedis.lpop(key);
79 | }
80 |
81 | public void rpop(String key) {
82 | jedis.rpop(key);
83 | }
84 |
85 | public long llen(String key) {
86 | return jedis.llen(key);
87 | }
88 |
89 | /**
90 | *
91 | * @param key
92 | * @param start
93 | * 从0开始
94 | * @param end
95 | * -1表示到末尾, 取值范围[start,end]
96 | * @return
97 | */
98 | public List lrange(String key, long start, long end) {
99 | return jedis.lrange(key, start, end);
100 | }
101 |
102 | /************************ 有序列表 *****************/
103 | /**
104 | * 添加元素
105 | *
106 | * @param key
107 | * @param score
108 | * @param member
109 | */
110 | public void zadd(String key, double score, String member) {
111 | jedis.zadd(key, score, member);
112 | }
113 |
114 | /**
115 | * 得到member排名
116 | *
117 | * @param key
118 | * @param member
119 | * @return 从0开始,-1表示不存在
120 | */
121 | public long zrank(String key, String member) {
122 | Long ret = jedis.zrank(key, member);
123 | if (ret == null) {
124 | return -1;
125 | }
126 | return ret;
127 | }
128 |
129 | /**
130 | * 得到member倒序排名
131 | *
132 | * @param key
133 | * @param member
134 | * @return 从0开始,-1表示不存在
135 | */
136 | public long zrevrank(String key, String member) {
137 | Long ret = jedis.zrevrank(key, member);
138 | if (ret == null) {
139 | return -1;
140 | }
141 | return ret;
142 | }
143 |
144 | /**
145 | * 集合size
146 | *
147 | * @param key
148 | * @return
149 | */
150 | public long zcard(String key) {
151 | return jedis.zcard(key);
152 | }
153 |
154 | /**
155 | * 删除member
156 | *
157 | * @param key
158 | * @param members
159 | */
160 | public void zrem(String key, String... members) {
161 | jedis.zrem(key, members);
162 | }
163 |
164 | /**
165 | * 得到分数
166 | *
167 | * @param key
168 | * @param member
169 | * @return
170 | */
171 | public double zscore(String key, String member) {
172 | Double ret = jedis.zscore(key, member);
173 | if (ret == null) {
174 | return 0.0;
175 | }
176 | return ret;
177 | }
178 |
179 | /**
180 | * 返回的成员在排序设置的范围
181 | *
182 | * @param key
183 | * @param start
184 | * @param end
185 | * @return
186 | */
187 | public Set zrange(String key, long start, long end) {
188 | return jedis.zrange(key, start, end);
189 | }
190 |
191 | /**
192 | * 在排序的设置返回的成员范围,通过索引,下令从分数高到低
193 | *
194 | * @param key
195 | * @param start
196 | * @param end
197 | * @return
198 | */
199 | public Set zrevrange(String key, long start, long end) {
200 | return jedis.zrevrange(key, start, end);
201 | }
202 |
203 | /**
204 | * 返回指定分数段内的集合元素个数
205 | *
206 | * @param key
207 | * @param min
208 | * @param max
209 | * @return
210 | */
211 | public long zcount(String key, double min, double max) {
212 | return jedis.zcount(key, min, max);
213 | }
214 |
215 | /**
216 | * 向member增加/减少score
217 | *
218 | * @param key
219 | * @param score
220 | * @param member
221 | * @return
222 | */
223 | public double zincrby(String key, double score, String member) {
224 | return jedis.zincrby(key, score, member);
225 | }
226 |
227 | }
228 |
--------------------------------------------------------------------------------
/RedisCluster/src/main/java/com/rr/redis/client/RedisClusterPoolProvider.java:
--------------------------------------------------------------------------------
1 | package com.rr.redis.client;
2 |
3 | import java.util.Collection;
4 | import java.util.List;
5 | import java.util.Map;
6 | import java.util.concurrent.ConcurrentHashMap;
7 |
8 | import org.apache.commons.lang.StringUtils;
9 | import org.apache.commons.logging.Log;
10 | import org.apache.commons.logging.LogFactory;
11 |
12 | import redis.clients.jedis.JedisPoolConfig;
13 | import redis.clients.jedis.exceptions.JedisException;
14 |
15 | import com.rr.redis.client.hash.IHashFunc;
16 | import com.rr.redis.client.hash.SimpleHashing;
17 | import com.rr.redis.client.model.Node;
18 | import com.rr.redis.client.zookeeper.ZookeeperService;
19 |
20 | public class RedisClusterPoolProvider {
21 | private static final Log logger = LogFactory
22 | .getLog(RedisClusterPoolProvider.class);
23 | /**
24 | * key:redis中key的hash值,value:对应的服务的连接池,每个连接池中有个JedisPool
25 | *
26 | * 由于是根据key的hash来分布,所以redis服务的数量不能改变
27 | *
28 | * zk上样例:pubilc.remote/1/10.4.28.172:6379->clusterName:pubilc.remote,key:1(
29 | * 从0开始), value:10.4.28.172:6379的连接池
30 | */
31 | private Map pools;
32 | private static int jPoolCfgMaxActive = 1000;
33 | private static int jPoolCfgMaxIdle = 100;
34 | private static int jPoolCfgMaxWait = 1000;
35 |
36 | /**
37 | * Number of hash slot(node).
38 | */
39 | private int hashSlotNum;
40 | private IHashFunc hashFunc;
41 | private int redisTimeOut;
42 |
43 | private ZookeeperService zookeeper;
44 |
45 | /**
46 | * 构造函数,包含初始化过程
47 | *
48 | * @param clusterName
49 | * @param zkHost
50 | * @param zkTimeout
51 | * @param redisTimeout
52 | */
53 | public RedisClusterPoolProvider(String clusterName, String zkHost,
54 | int zkTimeout, int redisTimeout) {
55 |
56 | zookeeper = new ZookeeperService(zkHost, clusterName, zkTimeout);
57 | zookeeper.setProvider(this);// 监听zk变化时候 执行update方法
58 | zookeeper.regWatcherOnLineRserver();// 开始监听zk
59 |
60 | redisTimeOut = redisTimeout;
61 | pools = new ConcurrentHashMap();
62 |
63 | init(clusterName);
64 | }
65 |
66 | /**
67 | * 初始化过程: 从zk上获取节点,并建立连接池,初始化hash算法
68 | *
69 | * @param clusterName
70 | */
71 | private void init(String clusterName) {
72 | List parts = zookeeper.getNodeList(clusterName);
73 | this.hashSlotNum = parts.size();
74 | this.hashFunc = new SimpleHashing(hashSlotNum);
75 | for (String part : parts) {
76 | int pat = Integer.valueOf(part);
77 | try {
78 | String partPath = clusterName + "/" + part;
79 | List nodes = zookeeper.getNodeList(partPath);
80 | // 建立连接池
81 | createClusterPool(pat, nodes.get(0));
82 | } catch (Exception e) {
83 | logger.error(e.getMessage(), e);
84 | destroyClusterPool(pat);
85 | }
86 | }
87 | }
88 |
89 | /**
90 | * 建立连接池
91 | *
92 | * @param partition
93 | * @param identity
94 | */
95 | public void createClusterPool(int partition, String identity) {
96 | logger.info("create pool:" + partition + ".." + identity);
97 | Node node = Node.getNodeFromIdentity(identity);
98 | if (!this.pools.containsKey(partition)) {
99 | RedisClusterPool pool = new RedisClusterPool(getJedisPoolConfig(),
100 | node.getHost(), node.getPort(), redisTimeOut);
101 | this.pools.put(partition, pool);
102 | }
103 | }
104 |
105 | /**
106 | * 销毁连接池
107 | *
108 | * @param partition
109 | */
110 | public void destroyClusterPool(int partition) {
111 | if (this.pools != null && this.pools.containsKey(partition)) {
112 | try {
113 | this.pools.get(partition).destroy();
114 | this.pools.remove(partition);
115 | logger.info("destroy pool:" + partition);
116 | } catch (JedisException e) {
117 | logger.error("fail destroy ClusterPool", e);
118 | }
119 | }
120 | }
121 |
122 | /**
123 | * 得到hash算法
124 | *
125 | * @return
126 | */
127 | public IHashFunc getHashFunc() {
128 | return hashFunc;
129 | }
130 |
131 | /**
132 | * 设置连接池配置项
133 | *
134 | * @param maxActive
135 | * @param maxIdle
136 | * @param maxWait
137 | */
138 | public static void setJedisPoolConfig(int maxActive, int maxIdle,
139 | int maxWait) {
140 | jPoolCfgMaxActive = maxActive;
141 | jPoolCfgMaxIdle = maxIdle;
142 | jPoolCfgMaxWait = maxWait;
143 | }
144 |
145 | /**
146 | * 得到 连接池配置项
147 | *
148 | * @return
149 | */
150 | public static JedisPoolConfig getJedisPoolConfig() {
151 | JedisPoolConfig config = new JedisPoolConfig();
152 | // 控制一个pool可分配多少个jedis实例,通过pool.getResource()来获取;
153 | // 如果赋值为-1,则表示不限制;如果pool已经分配了maxActive个jedis实例,则此时pool的状态为exhausted(耗尽)。
154 | config.setMaxActive(jPoolCfgMaxActive);
155 | // 控制一个pool最多有多少个状态为idle(空闲的)的jedis实例。
156 | config.setMaxIdle(jPoolCfgMaxIdle);
157 | // 表示当borrow(引入)一个jedis实例时,最大的等待时间,如果超过等待时间,则直接抛出JedisConnectionException;
158 | config.setMaxWait(jPoolCfgMaxWait);
159 | // 在borrow一个jedis实例时,是否提前进行validate操作;如果为true,则得到的jedis实例均是可用的;
160 | config.setTestOnBorrow(false);
161 | return config;
162 | }
163 |
164 | /**
165 | * 得到redis所有的partition
166 | */
167 | public Collection getAllPartitions() {
168 | return pools.keySet();
169 | }
170 |
171 | /**
172 | * 得到所有的连接池
173 | *
174 | * @return
175 | */
176 | public Map getPoolsMap() {
177 | return pools;
178 | }
179 |
180 | /**
181 | * 得到key对应的连接池
182 | *
183 | * @param partition
184 | * key的hash值
185 | * @return
186 | */
187 | public RedisClusterPool getPool(int partition) {
188 | return pools.get(partition);
189 | }
190 |
191 | /**
192 | * 得到redis连接池的数量
193 | *
194 | * @return
195 | */
196 | public int getPoolsLen() {
197 | return pools.size();
198 | }
199 |
200 | }
201 |
--------------------------------------------------------------------------------
/RedisCluster/pom.xml:
--------------------------------------------------------------------------------
1 |
3 | 4.0.0
4 |
5 | com.rr.redis
6 | RedisCluster
7 | 1.0-SNAPSHOT
8 | jar
9 |
10 | RedisCluster
11 | http://maven.apache.org
12 |
13 |
14 | UTF-8
15 |
16 |
17 |
18 |
19 | junit
20 | junit
21 | 4.4
22 | test
23 |
24 |
25 | log4j
26 | log4j
27 | 1.2.16
28 |
29 |
30 | org.slf4j
31 | slf4j-api
32 | 1.5.8
33 |
34 |
35 | org.slf4j
36 | slf4j-log4j12
37 | 1.5.8
38 |
39 |
40 | commons-lang
41 | commons-lang
42 | 2.6
43 |
44 |
45 | commons-collections
46 | commons-collections
47 | 3.2.1
48 |
49 |
50 | commons-httpclient
51 | commons-httpclient
52 | 3.1
53 |
54 |
55 | commons-configuration
56 | commons-configuration
57 | 1.6
58 |
59 |
60 | commons-dbcp
61 | commons-dbcp
62 | 1.4
63 |
64 |
65 | commons-pool
66 | commons-pool
67 | 1.6
68 |
69 |
70 | commons-collections
71 | commons-collections
72 | 3.2.1
73 |
74 |
75 | commons-codec
76 | commons-codec
77 | 1.4
78 |
79 |
80 | org.apache.hadoop
81 | hadoop-common
82 | 0.21.0
83 |
84 |
85 | net.sf.json-lib
86 | json-lib
87 | 2.4
88 | jdk15
89 |
90 |
91 | org.apache.zookeeper
92 | zookeeper
93 | 3.3.6
94 |
95 |
96 | com.netflix.curator
97 | curator-framework
98 | 1.0.9
99 |
100 |
101 | org.apache.commons
102 | commons-email
103 | 1.3.1
104 |
105 |
106 | redis.clients
107 | jedis
108 | 2.1.0
109 |
110 |
111 |
112 |
113 |
114 | RedisCluster
115 |
116 |
117 | src/main/java
118 |
119 | .svn
120 | **.xml
121 | **.dtd
122 | **.conf
123 |
124 |
125 | **/*.xml
126 | **/*.properties
127 | **/*.java
128 |
129 |
130 |
131 | src/main/resources
132 |
133 | **/*.xml
134 | **/*.dtd
135 | **/*.properties
136 |
137 |
138 |
139 |
140 |
141 | src/test/java
142 |
143 | **/*.java
144 |
145 |
146 |
147 | src/test/resources
148 |
149 | **/*.xml
150 |
151 |
152 |
153 |
154 |
155 | org.apache.maven.plugins
156 | maven-jar-plugin
157 |
158 |
159 | org.apache.maven.plugins
160 | maven-compiler-plugin
161 |
162 | 1.6
163 | 1.6
164 | true
165 | true
166 | UTF-8
167 |
168 | ${project.basedir}/src/main/java
169 |
170 |
171 |
172 |
173 | org.apache.maven.plugins
174 | maven-surefire-plugin
175 |
176 | true
177 |
178 |
179 |
180 | org.apache.maven.plugins
181 | maven-dependency-plugin
182 |
183 |
184 | copy-dependencies
185 | package
186 |
187 | copy-dependencies
188 |
189 |
190 | ${project.build.directory}/lib
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
--------------------------------------------------------------------------------
/simple/RedisService.java:
--------------------------------------------------------------------------------
1 | package com.kk.service;
2 |
3 | import org.apache.commons.lang.StringUtils;
4 | import org.apache.commons.logging.Log;
5 | import org.apache.commons.logging.LogFactory;
6 | import redis.clients.jedis.Jedis;
7 | import redis.clients.jedis.JedisPool;
8 | import redis.clients.jedis.JedisPoolConfig;
9 |
10 | import java.util.ArrayList;
11 | import java.util.HashSet;
12 | import java.util.List;
13 | import java.util.Set;
14 |
15 | /**
16 | * Redis操作接口 连接池
17 | *
18 | */
19 | public class RedisService {
20 | private final Log logger = LogFactory.getLog(RedisService.class);
21 |
22 | private JedisPool pool = null;
23 |
24 | private String host;
25 | private int port;
26 |
27 | private int jPoolCfgMaxActive;
28 | private int jPoolCfgMaxIdle;
29 | private int jPoolCfgMaxWait;
30 | private String password;// 密码
31 |
32 | public String getHost() {
33 | return host;
34 | }
35 |
36 | public void setHost(String host) {
37 | this.host = host;
38 | }
39 |
40 | public int getPort() {
41 | return port;
42 | }
43 |
44 | public void setPort(int port) {
45 | this.port = port;
46 | }
47 |
48 | public int getjPoolCfgMaxActive() {
49 | return jPoolCfgMaxActive;
50 | }
51 |
52 | public void setjPoolCfgMaxActive(int jPoolCfgMaxActive) {
53 | this.jPoolCfgMaxActive = jPoolCfgMaxActive;
54 | }
55 |
56 | public int getjPoolCfgMaxIdle() {
57 | return jPoolCfgMaxIdle;
58 | }
59 |
60 | public void setjPoolCfgMaxIdle(int jPoolCfgMaxIdle) {
61 | this.jPoolCfgMaxIdle = jPoolCfgMaxIdle;
62 | }
63 |
64 | public int getjPoolCfgMaxWait() {
65 | return jPoolCfgMaxWait;
66 | }
67 |
68 | public void setjPoolCfgMaxWait(int jPoolCfgMaxWait) {
69 | this.jPoolCfgMaxWait = jPoolCfgMaxWait;
70 | }
71 |
72 | public String getPassword() {
73 | return password;
74 | }
75 |
76 | public void setPassword(String password) {
77 | this.password = password;
78 | }
79 |
80 | /**
81 | * 构建redis连接池 初始化
82 | */
83 | public void init() {
84 | JedisPoolConfig config = new JedisPoolConfig();
85 | // 控制一个pool可分配多少个jedis实例,通过pool.getResource()来获取;
86 | // 如果赋值为-1,则表示不限制;如果pool已经分配了maxActive个jedis实例,则此时pool的状态为exhausted(耗尽)。
87 | config.setMaxTotal(jPoolCfgMaxActive);
88 | // 控制一个pool最多有多少个状态为idle(空闲的)的jedis实例。
89 | config.setMaxIdle(jPoolCfgMaxIdle);
90 | // 表示当borrow(引入)一个jedis实例时,最大的等待时间,如果超过等待时间,则直接抛出JedisConnectionException;
91 | config.setMaxWaitMillis(jPoolCfgMaxWait);
92 | // 在borrow一个jedis实例时,是否提前进行validate操作;如果为true,则得到的jedis实例均是可用的;
93 | config.setTestOnBorrow(true);
94 | if(StringUtils.isBlank(password)) {
95 | pool = new JedisPool(config, host, port, 2000);
96 | }else {
97 | pool = new JedisPool(config, host, port, 2000, password);
98 | }
99 | }
100 |
101 | private JedisPool getPool() {
102 | return pool;
103 | }
104 |
105 | /**
106 | * 返还到连接池
107 | *
108 | * @param pool
109 | * @param redis
110 | */
111 | public void returnResource(JedisPool pool, Jedis redis) {
112 | if (redis != null) {
113 | pool.returnResource(redis);
114 | }
115 | }
116 |
117 | /**
118 | * 获取数据
119 | *
120 | * @param key
121 | * @return
122 | */
123 | public String get(String key) {
124 | String value = null;
125 |
126 | JedisPool pool = null;
127 | Jedis jedis = null;
128 | try {
129 | pool = getPool();
130 | jedis = pool.getResource();
131 |
132 | value = jedis.get(key);
133 | } catch (Exception e) {
134 | // 释放redis对象
135 | pool.returnBrokenResource(jedis);
136 | logger.error(e.getMessage(), e);
137 | } finally {
138 | // 返还到连接池
139 | returnResource(pool, jedis);
140 | }
141 |
142 | return value;
143 | }
144 |
145 |
146 | /**
147 | * 获取枚举类型key的值,适用于get/set
148 | *
149 | * @param key
150 | * @return
151 | */
152 | public int getInt(String key) {
153 | try {
154 | return Integer.parseInt(get(key));
155 | } catch (Exception e) {
156 | return 0;
157 | }
158 | }
159 |
160 | // 如果key不存在则返回1,存在则返回0
161 | public long setnx(String key, String value) {
162 | JedisPool pool = null;
163 | Jedis jedis = null;
164 | long ret = 0;
165 | try {
166 | pool = getPool();
167 | jedis = pool.getResource();
168 |
169 | ret = jedis.setnx(key, value);
170 | } catch (Exception e) {
171 | // 释放redis对象
172 | pool.returnBrokenResource(jedis);
173 | logger.error(e.getMessage(), e);
174 | } finally {
175 | // 返还到连接池
176 | returnResource(pool, jedis);
177 | }
178 | return ret;
179 | }
180 |
181 | public void set(String key, String value) {
182 | JedisPool pool = null;
183 | Jedis jedis = null;
184 | try {
185 | pool = getPool();
186 | jedis = pool.getResource();
187 |
188 | jedis.set(key, value);
189 | } catch (Exception e) {
190 | // 释放redis对象
191 | pool.returnBrokenResource(jedis);
192 | logger.error(e.getMessage(), e);
193 | } finally {
194 | // 返还到连接池
195 | returnResource(pool, jedis);
196 | }
197 | }
198 |
199 | public boolean exists(String key) {
200 | boolean value = false;
201 |
202 | JedisPool pool = null;
203 | Jedis jedis = null;
204 | try {
205 | pool = getPool();
206 | jedis = pool.getResource();
207 |
208 | value = jedis.exists(key);
209 | } catch (Exception e) {
210 | // 释放redis对象
211 | pool.returnBrokenResource(jedis);
212 | logger.error(e.getMessage(), e);
213 | } finally {
214 | // 返还到连接池
215 | returnResource(pool, jedis);
216 | }
217 |
218 | return value;
219 | }
220 |
221 | public void delete(String... keys) {
222 | JedisPool pool = null;
223 | Jedis jedis = null;
224 | try {
225 | pool = getPool();
226 | jedis = pool.getResource();
227 |
228 | jedis.del(keys);
229 | } catch (Exception e) {
230 | // 释放redis对象
231 | pool.returnBrokenResource(jedis);
232 | logger.error(e.getMessage(), e);
233 | } finally {
234 | // 返还到连接池
235 | returnResource(pool, jedis);
236 | }
237 | }
238 |
239 | public void setex(String key, int seconds, String value) {
240 | JedisPool pool = null;
241 | Jedis jedis = null;
242 | try {
243 | pool = getPool();
244 | jedis = pool.getResource();
245 |
246 | jedis.setex(key, seconds, value);
247 | } catch (Exception e) {
248 | // 释放redis对象
249 | pool.returnBrokenResource(jedis);
250 | logger.error(e.getMessage(), e);
251 | } finally {
252 | // 返还到连接池
253 | returnResource(pool, jedis);
254 | }
255 | }
256 |
257 | public long ttl(String key) {
258 | long t = 0;
259 | JedisPool pool = null;
260 | Jedis jedis = null;
261 | try {
262 | pool = getPool();
263 | jedis = pool.getResource();
264 |
265 | t = jedis.ttl(key);
266 | } catch (Exception e) {
267 | // 释放redis对象
268 | pool.returnBrokenResource(jedis);
269 | logger.error(e.getMessage(), e);
270 | } finally {
271 | // 返还到连接池
272 | returnResource(pool, jedis);
273 | }
274 | return t;
275 | }
276 |
277 | /********************* 列表 ****************/
278 | public void lpush(String key, String value) {
279 | JedisPool pool = null;
280 | Jedis jedis = null;
281 | try {
282 | pool = getPool();
283 | jedis = pool.getResource();
284 |
285 | jedis.lpush(key, value);
286 | } catch (Exception e) {
287 | // 释放redis对象
288 | pool.returnBrokenResource(jedis);
289 | logger.error(e.getMessage(), e);
290 | } finally {
291 | // 返还到连接池
292 | returnResource(pool, jedis);
293 | }
294 | }
295 |
296 | public void rpush(String key, String value) {
297 | JedisPool pool = null;
298 | Jedis jedis = null;
299 | try {
300 | pool = getPool();
301 | jedis = pool.getResource();
302 |
303 | jedis.rpush(key, value);
304 | } catch (Exception e) {
305 | // 释放redis对象
306 | pool.returnBrokenResource(jedis);
307 | logger.error(e.getMessage(), e);
308 | } finally {
309 | // 返还到连接池
310 | returnResource(pool, jedis);
311 | }
312 | }
313 |
314 | public void lpop(String key) {
315 | JedisPool pool = null;
316 | Jedis jedis = null;
317 | try {
318 | pool = getPool();
319 | jedis = pool.getResource();
320 |
321 | jedis.lpop(key);
322 | } catch (Exception e) {
323 | // 释放redis对象
324 | pool.returnBrokenResource(jedis);
325 | logger.error(e.getMessage(), e);
326 | } finally {
327 | // 返还到连接池
328 | returnResource(pool, jedis);
329 | }
330 | }
331 |
332 | public void rpop(String key) {
333 | JedisPool pool = null;
334 | Jedis jedis = null;
335 | try {
336 | pool = getPool();
337 | jedis = pool.getResource();
338 |
339 | jedis.rpop(key);
340 | } catch (Exception e) {
341 | // 释放redis对象
342 | pool.returnBrokenResource(jedis);
343 | logger.error(e.getMessage(), e);
344 | } finally {
345 | // 返还到连接池
346 | returnResource(pool, jedis);
347 | }
348 | }
349 |
350 | public long llen(String key) {
351 | Long len = null;
352 | JedisPool pool = null;
353 | Jedis jedis = null;
354 | try {
355 | pool = getPool();
356 | jedis = pool.getResource();
357 |
358 | len = jedis.llen(key);
359 | } catch (Exception e) {
360 | // 释放redis对象
361 | pool.returnBrokenResource(jedis);
362 | logger.error(e.getMessage(), e);
363 | } finally {
364 | // 返还到连接池
365 | returnResource(pool, jedis);
366 | }
367 | return len == null ? 0 : len;
368 | }
369 |
370 | /**
371 | *
372 | * @param key
373 | * @param start
374 | * 从0开始
375 | * @param end
376 | * -1表示到末尾, 取值范围[start,end]
377 | * @return
378 | */
379 | public List lrange(String key, long start, long end) {
380 | List list = new ArrayList();
381 | JedisPool pool = null;
382 | Jedis jedis = null;
383 | try {
384 | pool = getPool();
385 | jedis = pool.getResource();
386 |
387 | list = jedis.lrange(key, start, end);
388 | } catch (Exception e) {
389 | // 释放redis对象
390 | pool.returnBrokenResource(jedis);
391 | logger.error(e.getMessage(), e);
392 | } finally {
393 | // 返还到连接池
394 | returnResource(pool, jedis);
395 | }
396 | return list;
397 | }
398 |
399 | /**
400 | * Returns all the keys matching the glob-style pattern as space separated
401 | * strings
402 | *
403 | * @param pattern
404 | * @return
405 | */
406 | public Set keys(String pattern) {
407 | Set set = new HashSet();
408 | JedisPool pool = null;
409 | Jedis jedis = null;
410 | try {
411 | pool = getPool();
412 | jedis = pool.getResource();
413 |
414 | set = jedis.keys(pattern);
415 | } catch (Exception e) {
416 | // 释放redis对象
417 | pool.returnBrokenResource(jedis);
418 | logger.error(e.getMessage(), e);
419 | } finally {
420 | // 返还到连接池
421 | returnResource(pool, jedis);
422 | }
423 | return set;
424 | }
425 |
426 | /**
427 | * Return the number of keys in the currently selected database.
428 | *
429 | *
430 | * @return
431 | */
432 | public long dbSize() {
433 | Long ret = 0l;
434 | JedisPool pool = null;
435 | Jedis jedis = null;
436 | try {
437 | pool = getPool();
438 | jedis = pool.getResource();
439 |
440 | ret = jedis.dbSize();
441 | } catch (Exception e) {
442 | // 释放redis对象
443 | pool.returnBrokenResource(jedis);
444 | logger.error(e.getMessage(), e);
445 | } finally {
446 | // 返还到连接池
447 | returnResource(pool, jedis);
448 | }
449 | return ret;
450 | }
451 |
452 | }
--------------------------------------------------------------------------------
/RedisCluster/src/main/java/com/rr/redis/simple/RedisAPI.java:
--------------------------------------------------------------------------------
1 | package com.rr.redis.simple;
2 |
3 | import java.util.ArrayList;
4 | import java.util.HashSet;
5 | import java.util.List;
6 | import java.util.Set;
7 |
8 | import org.apache.commons.logging.Log;
9 | import org.apache.commons.logging.LogFactory;
10 |
11 | import redis.clients.jedis.Jedis;
12 | import redis.clients.jedis.JedisPool;
13 | import redis.clients.jedis.JedisPoolConfig;
14 |
15 | /**
16 | * Redis操作接口 连接池,但是没有实现连接集群
17 | *
18 | */
19 | public class RedisAPI {
20 | private static JedisPool pool = null;
21 |
22 | private static final String host = "10.4.28.172";
23 | private static final int port = 6379;
24 |
25 | public static final Log logger = LogFactory.getLog(RedisAPI.class);
26 |
27 | private static int jPoolCfgMaxActive = 1000;
28 | private static int jPoolCfgMaxIdle = 100;
29 | private static int jPoolCfgMaxWait = 1000;
30 |
31 | /**
32 | * 构建redis连接池
33 | *
34 | * @param ip
35 | * @param port
36 | * @return JedisPool
37 | */
38 |
39 | public static JedisPool getPool() {
40 | if (pool == null) {
41 | synchronized (RedisAPI.class) {
42 | if (pool == null) {
43 | JedisPoolConfig config = new JedisPoolConfig();
44 | // 控制一个pool可分配多少个jedis实例,通过pool.getResource()来获取;
45 | // 如果赋值为-1,则表示不限制;如果pool已经分配了maxActive个jedis实例,则此时pool的状态为exhausted(耗尽)。
46 | config.setMaxActive(jPoolCfgMaxActive);
47 | // 控制一个pool最多有多少个状态为idle(空闲的)的jedis实例。
48 | config.setMaxIdle(jPoolCfgMaxIdle);
49 | // 表示当borrow(引入)一个jedis实例时,最大的等待时间,如果超过等待时间,则直接抛出JedisConnectionException;
50 | config.setMaxWait(jPoolCfgMaxWait);
51 | // 在borrow一个jedis实例时,是否提前进行validate操作;如果为true,则得到的jedis实例均是可用的;
52 | config.setTestOnBorrow(true);
53 | pool = new JedisPool(config, host, port, 2000);
54 | }
55 | }
56 |
57 | }
58 | return pool;
59 | }
60 |
61 | /**
62 | * 返还到连接池
63 | *
64 | * @param pool
65 | * @param redis
66 | */
67 | public static void returnResource(JedisPool pool, Jedis redis) {
68 | if (redis != null) {
69 | pool.returnResource(redis);
70 | }
71 | }
72 |
73 | /**
74 | * 获取数据
75 | *
76 | * @param key
77 | * @return
78 | */
79 | public static String get(String key) {
80 | String value = null;
81 |
82 | JedisPool pool = null;
83 | Jedis jedis = null;
84 | try {
85 | pool = getPool();
86 | jedis = pool.getResource();
87 |
88 | value = jedis.get(key);
89 | } catch (Exception e) {
90 | // 释放redis对象
91 | pool.returnBrokenResource(jedis);
92 | logger.error(e.getMessage(), e);
93 | } finally {
94 | // 返还到连接池
95 | returnResource(pool, jedis);
96 | }
97 |
98 | return value;
99 | }
100 |
101 | public static void set(String key, String value) {
102 | JedisPool pool = null;
103 | Jedis jedis = null;
104 | try {
105 | pool = getPool();
106 | jedis = pool.getResource();
107 |
108 | jedis.set(key, value);
109 | } catch (Exception e) {
110 | // 释放redis对象
111 | pool.returnBrokenResource(jedis);
112 | logger.error(e.getMessage(), e);
113 | } finally {
114 | // 返还到连接池
115 | returnResource(pool, jedis);
116 | }
117 | }
118 |
119 | public static void set(String key, int value) {
120 | set(key, String.valueOf(value));
121 | }
122 |
123 | public static void delete(String... keys) {
124 | JedisPool pool = null;
125 | Jedis jedis = null;
126 | try {
127 | pool = getPool();
128 | jedis = pool.getResource();
129 |
130 | jedis.del(keys);
131 | } catch (Exception e) {
132 | // 释放redis对象
133 | pool.returnBrokenResource(jedis);
134 | logger.error(e.getMessage(), e);
135 | } finally {
136 | // 返还到连接池
137 | returnResource(pool, jedis);
138 | }
139 | }
140 |
141 | public static void setex(String key, int seconds, String value) {
142 | JedisPool pool = null;
143 | Jedis jedis = null;
144 | try {
145 | pool = getPool();
146 | jedis = pool.getResource();
147 |
148 | jedis.setex(key, seconds, value);
149 | } catch (Exception e) {
150 | // 释放redis对象
151 | pool.returnBrokenResource(jedis);
152 | logger.error(e.getMessage(), e);
153 | } finally {
154 | // 返还到连接池
155 | returnResource(pool, jedis);
156 | }
157 | }
158 |
159 | public static long ttl(String key) {
160 | long t = 0;
161 | JedisPool pool = null;
162 | Jedis jedis = null;
163 | try {
164 | pool = getPool();
165 | jedis = pool.getResource();
166 |
167 | t = jedis.ttl(key);
168 | } catch (Exception e) {
169 | // 释放redis对象
170 | pool.returnBrokenResource(jedis);
171 | logger.error(e.getMessage(), e);
172 | } finally {
173 | // 返还到连接池
174 | returnResource(pool, jedis);
175 | }
176 | return t;
177 | }
178 |
179 | /********************* 列表 ****************/
180 | public static void lpush(String key, String value) {
181 | JedisPool pool = null;
182 | Jedis jedis = null;
183 | try {
184 | pool = getPool();
185 | jedis = pool.getResource();
186 |
187 | jedis.lpush(key, value);
188 | } catch (Exception e) {
189 | // 释放redis对象
190 | pool.returnBrokenResource(jedis);
191 | logger.error(e.getMessage(), e);
192 | } finally {
193 | // 返还到连接池
194 | returnResource(pool, jedis);
195 | }
196 | }
197 |
198 | public static void lpush(String key, String... value) {
199 | JedisPool pool = null;
200 | Jedis jedis = null;
201 | try {
202 | pool = getPool();
203 | jedis = pool.getResource();
204 |
205 | jedis.lpush(key, value);
206 | } catch (Exception e) {
207 | // 释放redis对象
208 | pool.returnBrokenResource(jedis);
209 | logger.error(e.getMessage(), e);
210 | } finally {
211 | // 返还到连接池
212 | returnResource(pool, jedis);
213 | }
214 | }
215 |
216 | public static void rpush(String key, String value) {
217 | JedisPool pool = null;
218 | Jedis jedis = null;
219 | try {
220 | pool = getPool();
221 | jedis = pool.getResource();
222 |
223 | jedis.rpush(key, value);
224 | } catch (Exception e) {
225 | // 释放redis对象
226 | pool.returnBrokenResource(jedis);
227 | logger.error(e.getMessage(), e);
228 | } finally {
229 | // 返还到连接池
230 | returnResource(pool, jedis);
231 | }
232 | }
233 |
234 | public static void rpush(String key, String... value) {
235 | JedisPool pool = null;
236 | Jedis jedis = null;
237 | try {
238 | pool = getPool();
239 | jedis = pool.getResource();
240 |
241 | jedis.rpush(key, value);
242 | } catch (Exception e) {
243 | // 释放redis对象
244 | pool.returnBrokenResource(jedis);
245 | logger.error(e.getMessage(), e);
246 | } finally {
247 | // 返还到连接池
248 | returnResource(pool, jedis);
249 | }
250 | }
251 |
252 | public static void lpop(String key) {
253 | JedisPool pool = null;
254 | Jedis jedis = null;
255 | try {
256 | pool = getPool();
257 | jedis = pool.getResource();
258 |
259 | jedis.lpop(key);
260 | } catch (Exception e) {
261 | // 释放redis对象
262 | pool.returnBrokenResource(jedis);
263 | logger.error(e.getMessage(), e);
264 | } finally {
265 | // 返还到连接池
266 | returnResource(pool, jedis);
267 | }
268 | }
269 |
270 | public static void rpop(String key) {
271 | JedisPool pool = null;
272 | Jedis jedis = null;
273 | try {
274 | pool = getPool();
275 | jedis = pool.getResource();
276 |
277 | jedis.rpop(key);
278 | } catch (Exception e) {
279 | // 释放redis对象
280 | pool.returnBrokenResource(jedis);
281 | logger.error(e.getMessage(), e);
282 | } finally {
283 | // 返还到连接池
284 | returnResource(pool, jedis);
285 | }
286 | }
287 |
288 | public static long llen(String key) {
289 | Long len = null;
290 | JedisPool pool = null;
291 | Jedis jedis = null;
292 | try {
293 | pool = getPool();
294 | jedis = pool.getResource();
295 |
296 | len = jedis.llen(key);
297 | } catch (Exception e) {
298 | // 释放redis对象
299 | pool.returnBrokenResource(jedis);
300 | logger.error(e.getMessage(), e);
301 | } finally {
302 | // 返还到连接池
303 | returnResource(pool, jedis);
304 | }
305 | return len == null ? 0 : len;
306 | }
307 |
308 | /**
309 | *
310 | * @param key
311 | * @param start
312 | * 从0开始
313 | * @param end
314 | * -1表示到末尾, 取值范围[start,end]
315 | * @return
316 | */
317 | public static List lrange(String key, long start, long end) {
318 | List list = new ArrayList();
319 | JedisPool pool = null;
320 | Jedis jedis = null;
321 | try {
322 | pool = getPool();
323 | jedis = pool.getResource();
324 |
325 | list = jedis.lrange(key, start, end);
326 | } catch (Exception e) {
327 | // 释放redis对象
328 | pool.returnBrokenResource(jedis);
329 | logger.error(e.getMessage(), e);
330 | } finally {
331 | // 返还到连接池
332 | returnResource(pool, jedis);
333 | }
334 | return list;
335 | }
336 |
337 | /************************ 有序列表 *****************/
338 | /**
339 | * 添加元素
340 | *
341 | * @param key
342 | * @param score
343 | * @param member
344 | */
345 | public static void zadd(String key, double score, String member) {
346 | JedisPool pool = null;
347 | Jedis jedis = null;
348 | try {
349 | pool = getPool();
350 | jedis = pool.getResource();
351 |
352 | jedis.zadd(key, score, member);
353 | } catch (Exception e) {
354 | // 释放redis对象
355 | pool.returnBrokenResource(jedis);
356 | logger.error(e.getMessage(), e);
357 | } finally {
358 | // 返还到连接池
359 | returnResource(pool, jedis);
360 | }
361 | }
362 |
363 | /**
364 | * 得到member排名
365 | *
366 | * @param key
367 | * @param member
368 | * @return 从0开始,-1表示不存在
369 | */
370 | public static long zrank(String key, String member) {
371 | Long ret = null;
372 | JedisPool pool = null;
373 | Jedis jedis = null;
374 | try {
375 | pool = getPool();
376 | jedis = pool.getResource();
377 |
378 | ret = jedis.zrank(key, member);
379 | } catch (Exception e) {
380 | // 释放redis对象
381 | pool.returnBrokenResource(jedis);
382 | logger.error(e.getMessage(), e);
383 | } finally {
384 | // 返还到连接池
385 | returnResource(pool, jedis);
386 | }
387 | return ret == null ? -1 : ret;
388 | }
389 |
390 | /**
391 | * 得到member倒序排名
392 | *
393 | * @param key
394 | * @param member
395 | * @return 从0开始,-1表示不存在
396 | */
397 | public static long zrevrank(String key, String member) {
398 | Long ret = null;
399 | JedisPool pool = null;
400 | Jedis jedis = null;
401 | try {
402 | pool = getPool();
403 | jedis = pool.getResource();
404 |
405 | ret = jedis.zrevrank(key, member);
406 | } catch (Exception e) {
407 | // 释放redis对象
408 | pool.returnBrokenResource(jedis);
409 | logger.error(e.getMessage(), e);
410 | } finally {
411 | // 返还到连接池
412 | returnResource(pool, jedis);
413 | }
414 | return ret == null ? -1 : ret;
415 | }
416 |
417 | /**
418 | * 集合size
419 | *
420 | * @param key
421 | * @return
422 | */
423 | public static long zcard(String key) {
424 | Long len = null;
425 | JedisPool pool = null;
426 | Jedis jedis = null;
427 | try {
428 | pool = getPool();
429 | jedis = pool.getResource();
430 |
431 | len = jedis.zcard(key);
432 | } catch (Exception e) {
433 | // 释放redis对象
434 | pool.returnBrokenResource(jedis);
435 | logger.error(e.getMessage(), e);
436 | } finally {
437 | // 返还到连接池
438 | returnResource(pool, jedis);
439 | }
440 | return len == null ? 0 : len;
441 | }
442 |
443 | /**
444 | * 删除member
445 | *
446 | * @param key
447 | * @param members
448 | */
449 | public static void zrem(String key, String... members) {
450 | JedisPool pool = null;
451 | Jedis jedis = null;
452 | try {
453 | pool = getPool();
454 | jedis = pool.getResource();
455 |
456 | jedis.zrem(key, members);
457 | } catch (Exception e) {
458 | // 释放redis对象
459 | pool.returnBrokenResource(jedis);
460 | logger.error(e.getMessage(), e);
461 | } finally {
462 | // 返还到连接池
463 | returnResource(pool, jedis);
464 | }
465 | }
466 |
467 | /**
468 | * 得到分数
469 | *
470 | * @param key
471 | * @param member
472 | * @return
473 | */
474 | public static double zscore(String key, String member) {
475 | Double ret = null;
476 | JedisPool pool = null;
477 | Jedis jedis = null;
478 | try {
479 | pool = getPool();
480 | jedis = pool.getResource();
481 |
482 | ret = jedis.zscore(key, member);
483 | } catch (Exception e) {
484 | // 释放redis对象
485 | pool.returnBrokenResource(jedis);
486 | logger.error(e.getMessage(), e);
487 | } finally {
488 | // 返还到连接池
489 | returnResource(pool, jedis);
490 | }
491 | return ret == null ? 0.0 : ret;
492 | }
493 |
494 | /**
495 | * 返回的成员在排序设置的范围
496 | *
497 | * @param key
498 | * @param start
499 | * @param end
500 | * @return
501 | */
502 | public static Set zrange(String key, long start, long end) {
503 | Set ret = new HashSet();
504 | JedisPool pool = null;
505 | Jedis jedis = null;
506 | try {
507 | pool = getPool();
508 | jedis = pool.getResource();
509 |
510 | ret = jedis.zrange(key, start, end);
511 | } catch (Exception e) {
512 | // 释放redis对象
513 | pool.returnBrokenResource(jedis);
514 | logger.error(e.getMessage(), e);
515 | } finally {
516 | // 返还到连接池
517 | returnResource(pool, jedis);
518 | }
519 | return ret;
520 | }
521 |
522 | /**
523 | * 在排序的设置返回的成员范围,通过索引,下令从分数高到低
524 | *
525 | * @param key
526 | * @param start
527 | * @param end
528 | * @return
529 | */
530 | public static Set zrevrange(String key, long start, long end) {
531 | Set ret = new HashSet();
532 | JedisPool pool = null;
533 | Jedis jedis = null;
534 | try {
535 | pool = getPool();
536 | jedis = pool.getResource();
537 |
538 | ret = jedis.zrevrange(key, start, end);
539 | } catch (Exception e) {
540 | // 释放redis对象
541 | pool.returnBrokenResource(jedis);
542 | logger.error(e.getMessage(), e);
543 | } finally {
544 | // 返还到连接池
545 | returnResource(pool, jedis);
546 | }
547 | return ret;
548 | }
549 |
550 | /**
551 | * 返回指定分数段内的集合元素个数
552 | *
553 | * @param key
554 | * @param min
555 | * @param max
556 | * @return
557 | */
558 | public static long zcount(String key, double min, double max) {
559 | Long len = null;
560 | JedisPool pool = null;
561 | Jedis jedis = null;
562 | try {
563 | pool = getPool();
564 | jedis = pool.getResource();
565 |
566 | len = jedis.zcount(key, min, max);
567 | } catch (Exception e) {
568 | // 释放redis对象
569 | pool.returnBrokenResource(jedis);
570 | logger.error(e.getMessage(), e);
571 | } finally {
572 | // 返还到连接池
573 | returnResource(pool, jedis);
574 | }
575 | return len == null ? 0 : len;
576 | }
577 |
578 | /**
579 | * 向member增加/减少score
580 | *
581 | * @param key
582 | * @param score
583 | * @param member
584 | * @return
585 | */
586 | public static double zincrby(String key, double score, String member) {
587 | Double ret = null;
588 | JedisPool pool = null;
589 | Jedis jedis = null;
590 | try {
591 | pool = getPool();
592 | jedis = pool.getResource();
593 |
594 | ret = jedis.zincrby(key, score, member);
595 | } catch (Exception e) {
596 | // 释放redis对象
597 | pool.returnBrokenResource(jedis);
598 | logger.error(e.getMessage(), e);
599 | } finally {
600 | // 返还到连接池
601 | returnResource(pool, jedis);
602 | }
603 | return ret == null ? 0.0 : ret;
604 | }
605 |
606 | }
--------------------------------------------------------------------------------
/install/redis.conf:
--------------------------------------------------------------------------------
1 | # Redis configuration file example
2 |
3 | # Note on units: when memory size is needed, it is possible to specify
4 | # it in the usual form of 1k 5GB 4M and so forth:
5 | #
6 | # 1k => 1000 bytes
7 | # 1kb => 1024 bytes
8 | # 1m => 1000000 bytes
9 | # 1mb => 1024*1024 bytes
10 | # 1g => 1000000000 bytes
11 | # 1gb => 1024*1024*1024 bytes
12 | #
13 | # units are case insensitive so 1GB 1Gb 1gB are all the same.
14 |
15 | ################################## INCLUDES ###################################
16 |
17 | # Include one or more other config files here. This is useful if you
18 | # have a standard template that goes to all Redis server but also need
19 | # to customize a few per-server settings. Include files can include
20 | # other files, so use this wisely.
21 | #
22 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE"
23 | # from admin or Redis Sentinel. Since Redis always uses the last processed
24 | # line as value of a configuration directive, you'd better put includes
25 | # at the beginning of this file to avoid overwriting config change at runtime.
26 | #
27 | # If instead you are interested in using includes to override configuration
28 | # options, it is better to use include as the last line.
29 | #
30 | # include /path/to/local.conf
31 | # include /path/to/other.conf
32 |
33 | ################################ GENERAL #####################################
34 |
35 | # By default Redis does not run as a daemon. Use 'yes' if you need it.
36 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
37 | daemonize no
38 |
39 | # When running daemonized, Redis writes a pid file in /var/run/redis.pid by
40 | # default. You can specify a custom pid file location here.
41 | pidfile /var/run/redis.pid
42 |
43 | # Accept connections on the specified port, default is 6379.
44 | # If port 0 is specified Redis will not listen on a TCP socket.
45 | port 6379
46 |
47 | # TCP listen() backlog.
48 | #
49 | # In high requests-per-second environments you need an high backlog in order
50 | # to avoid slow clients connections issues. Note that the Linux kernel
51 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so
52 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog
53 | # in order to get the desired effect.
54 | tcp-backlog 511
55 |
56 | # By default Redis listens for connections from all the network interfaces
57 | # available on the server. It is possible to listen to just one or multiple
58 | # interfaces using the "bind" configuration directive, followed by one or
59 | # more IP addresses.
60 | #
61 | # Examples:
62 | #
63 | # bind 192.168.1.100 10.0.0.1
64 | # bind 127.0.0.1
65 |
66 | # Specify the path for the Unix socket that will be used to listen for
67 | # incoming connections. There is no default, so Redis will not listen
68 | # on a unix socket when not specified.
69 | #
70 | # unixsocket /tmp/redis.sock
71 | # unixsocketperm 755
72 |
73 | # Close the connection after a client is idle for N seconds (0 to disable)
74 | timeout 0
75 |
76 | # TCP keepalive.
77 | #
78 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
79 | # of communication. This is useful for two reasons:
80 | #
81 | # 1) Detect dead peers.
82 | # 2) Take the connection alive from the point of view of network
83 | # equipment in the middle.
84 | #
85 | # On Linux, the specified value (in seconds) is the period used to send ACKs.
86 | # Note that to close the connection the double of the time is needed.
87 | # On other kernels the period depends on the kernel configuration.
88 | #
89 | # A reasonable value for this option is 60 seconds.
90 | tcp-keepalive 0
91 |
92 | # Specify the server verbosity level.
93 | # This can be one of:
94 | # debug (a lot of information, useful for development/testing)
95 | # verbose (many rarely useful info, but not a mess like the debug level)
96 | # notice (moderately verbose, what you want in production probably)
97 | # warning (only very important / critical messages are logged)
98 | loglevel notice
99 |
100 | # Specify the log file name. Also the empty string can be used to force
101 | # Redis to log on the standard output. Note that if you use standard
102 | # output for logging but daemonize, logs will be sent to /dev/null
103 | logfile stdout
104 |
105 | # To enable logging to the system logger, just set 'syslog-enabled' to yes,
106 | # and optionally update the other syslog parameters to suit your needs.
107 | # syslog-enabled no
108 |
109 | # Specify the syslog identity.
110 | # syslog-ident redis
111 |
112 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
113 | # syslog-facility local0
114 |
115 | # Set the number of databases. The default database is DB 0, you can select
116 | # a different one on a per-connection basis using SELECT where
117 | # dbid is a number between 0 and 'databases'-1
118 | databases 16
119 |
120 | ################################ SNAPSHOTTING ################################
121 | #
122 | # Save the DB on disk:
123 | #
124 | # save
125 | #
126 | # Will save the DB if both the given number of seconds and the given
127 | # number of write operations against the DB occurred.
128 | #
129 | # In the example below the behaviour will be to save:
130 | # after 900 sec (15 min) if at least 1 key changed
131 | # after 300 sec (5 min) if at least 10 keys changed
132 | # after 60 sec if at least 10000 keys changed
133 | #
134 | # Note: you can disable saving at all commenting all the "save" lines.
135 | #
136 | # It is also possible to remove all the previously configured save
137 | # points by adding a save directive with a single empty string argument
138 | # like in the following example:
139 | #
140 | # save ""
141 |
142 | save 900 1
143 | save 300 10
144 | save 60 10000
145 |
146 | # By default Redis will stop accepting writes if RDB snapshots are enabled
147 | # (at least one save point) and the latest background save failed.
148 | # This will make the user aware (in a hard way) that data is not persisting
149 | # on disk properly, otherwise chances are that no one will notice and some
150 | # disaster will happen.
151 | #
152 | # If the background saving process will start working again Redis will
153 | # automatically allow writes again.
154 | #
155 | # However if you have setup your proper monitoring of the Redis server
156 | # and persistence, you may want to disable this feature so that Redis will
157 | # continue to work as usual even if there are problems with disk,
158 | # permissions, and so forth.
159 | stop-writes-on-bgsave-error yes
160 |
161 | # Compress string objects using LZF when dump .rdb databases?
162 | # For default that's set to 'yes' as it's almost always a win.
163 | # If you want to save some CPU in the saving child set it to 'no' but
164 | # the dataset will likely be bigger if you have compressible values or keys.
165 | rdbcompression yes
166 |
167 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
168 | # This makes the format more resistant to corruption but there is a performance
169 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it
170 | # for maximum performances.
171 | #
172 | # RDB files created with checksum disabled have a checksum of zero that will
173 | # tell the loading code to skip the check.
174 | rdbchecksum yes
175 |
176 | # The filename where to dump the DB
177 | dbfilename dump.rdb
178 |
179 | # The working directory.
180 | #
181 | # The DB will be written inside this directory, with the filename specified
182 | # above using the 'dbfilename' configuration directive.
183 | #
184 | # The Append Only File will also be created inside this directory.
185 | #
186 | # Note that you must specify a directory here, not a file name.
187 | dir /data/redis/data
188 |
189 | ################################# REPLICATION #################################
190 |
191 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of
192 | # another Redis server. Note that the configuration is local to the slave
193 | # so for example it is possible to configure the slave to save the DB with a
194 | # different interval, or to listen to another port, and so on.
195 | #
196 | # slaveof
197 |
198 | # If the master is password protected (using the "requirepass" configuration
199 | # directive below) it is possible to tell the slave to authenticate before
200 | # starting the replication synchronization process, otherwise the master will
201 | # refuse the slave request.
202 | #
203 | # masterauth
204 |
205 | # When a slave loses its connection with the master, or when the replication
206 | # is still in progress, the slave can act in two different ways:
207 | #
208 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
209 | # still reply to client requests, possibly with out of date data, or the
210 | # data set may just be empty if this is the first synchronization.
211 | #
212 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with
213 | # an error "SYNC with master in progress" to all the kind of commands
214 | # but to INFO and SLAVEOF.
215 | #
216 | slave-serve-stale-data yes
217 |
218 | # You can configure a slave instance to accept writes or not. Writing against
219 | # a slave instance may be useful to store some ephemeral data (because data
220 | # written on a slave will be easily deleted after resync with the master) but
221 | # may also cause problems if clients are writing to it because of a
222 | # misconfiguration.
223 | #
224 | # Since Redis 2.6 by default slaves are read-only.
225 | #
226 | # Note: read only slaves are not designed to be exposed to untrusted clients
227 | # on the internet. It's just a protection layer against misuse of the instance.
228 | # Still a read only slave exports by default all the administrative commands
229 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
230 | # security of read only slaves using 'rename-command' to shadow all the
231 | # administrative / dangerous commands.
232 | slave-read-only yes
233 |
234 | # Slaves send PINGs to server in a predefined interval. It's possible to change
235 | # this interval with the repl_ping_slave_period option. The default value is 10
236 | # seconds.
237 | #
238 | # repl-ping-slave-period 10
239 |
240 | # The following option sets the replication timeout for:
241 | #
242 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave.
243 | # 2) Master timeout from the point of view of slaves (data, pings).
244 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
245 | #
246 | # It is important to make sure that this value is greater than the value
247 | # specified for repl-ping-slave-period otherwise a timeout will be detected
248 | # every time there is low traffic between the master and the slave.
249 | #
250 | # repl-timeout 60
251 |
252 | # Disable TCP_NODELAY on the slave socket after SYNC?
253 | #
254 | # If you select "yes" Redis will use a smaller number of TCP packets and
255 | # less bandwidth to send data to slaves. But this can add a delay for
256 | # the data to appear on the slave side, up to 40 milliseconds with
257 | # Linux kernels using a default configuration.
258 | #
259 | # If you select "no" the delay for data to appear on the slave side will
260 | # be reduced but more bandwidth will be used for replication.
261 | #
262 | # By default we optimize for low latency, but in very high traffic conditions
263 | # or when the master and slaves are many hops away, turning this to "yes" may
264 | # be a good idea.
265 | repl-disable-tcp-nodelay no
266 |
267 | # Set the replication backlog size. The backlog is a buffer that accumulates
268 | # slave data when slaves are disconnected for some time, so that when a slave
269 | # wants to reconnect again, often a full resync is not needed, but a partial
270 | # resync is enough, just passing the portion of data the slave missed while
271 | # disconnected.
272 | #
273 | # The biggest the replication backlog, the longer the time the slave can be
274 | # disconnected and later be able to perform a partial resynchronization.
275 | #
276 | # The backlog is only allocated once there is at least a slave connected.
277 | #
278 | # repl-backlog-size 1mb
279 |
280 | # After a master has no longer connected slaves for some time, the backlog
281 | # will be freed. The following option configures the amount of seconds that
282 | # need to elapse, starting from the time the last slave disconnected, for
283 | # the backlog buffer to be freed.
284 | #
285 | # A value of 0 means to never release the backlog.
286 | #
287 | # repl-backlog-ttl 3600
288 |
289 | # The slave priority is an integer number published by Redis in the INFO output.
290 | # It is used by Redis Sentinel in order to select a slave to promote into a
291 | # master if the master is no longer working correctly.
292 | #
293 | # A slave with a low priority number is considered better for promotion, so
294 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will
295 | # pick the one with priority 10, that is the lowest.
296 | #
297 | # However a special priority of 0 marks the slave as not able to perform the
298 | # role of master, so a slave with priority of 0 will never be selected by
299 | # Redis Sentinel for promotion.
300 | #
301 | # By default the priority is 100.
302 | slave-priority 100
303 |
304 | # It is possible for a master to stop accepting writes if there are less than
305 | # N slaves connected, having a lag less or equal than M seconds.
306 | #
307 | # The N slaves need to be in "online" state.
308 | #
309 | # The lag in seconds, that must be <= the specified value, is calculated from
310 | # the last ping received from the slave, that is usually sent every second.
311 | #
312 | # This option does not GUARANTEES that N replicas will accept the write, but
313 | # will limit the window of exposure for lost writes in case not enough slaves
314 | # are available, to the specified number of seconds.
315 | #
316 | # For example to require at least 3 slaves with a lag <= 10 seconds use:
317 | #
318 | # min-slaves-to-write 3
319 | # min-slaves-max-lag 10
320 | #
321 | # Setting one or the other to 0 disables the feature.
322 | #
323 | # By default min-slaves-to-write is set to 0 (feature disabled) and
324 | # min-slaves-max-lag is set to 10.
325 |
326 | ################################## SECURITY ###################################
327 |
328 | # Require clients to issue AUTH before processing any other
329 | # commands. This might be useful in environments in which you do not trust
330 | # others with access to the host running redis-server.
331 | #
332 | # This should stay commented out for backward compatibility and because most
333 | # people do not need auth (e.g. they run their own servers).
334 | #
335 | # Warning: since Redis is pretty fast an outside user can try up to
336 | # 150k passwords per second against a good box. This means that you should
337 | # use a very strong password otherwise it will be very easy to break.
338 | #
339 | requirepass password
340 |
341 | # Command renaming.
342 | #
343 | # It is possible to change the name of dangerous commands in a shared
344 | # environment. For instance the CONFIG command may be renamed into something
345 | # hard to guess so that it will still be available for internal-use tools
346 | # but not available for general clients.
347 | #
348 | # Example:
349 | #
350 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
351 | #
352 | # It is also possible to completely kill a command by renaming it into
353 | # an empty string:
354 | #
355 | # rename-command CONFIG ""
356 | #
357 | # Please note that changing the name of commands that are logged into the
358 | # AOF file or transmitted to slaves may cause problems.
359 |
360 | ################################### LIMITS ####################################
361 |
362 | # Set the max number of connected clients at the same time. By default
363 | # this limit is set to 10000 clients, however if the Redis server is not
364 | # able to configure the process file limit to allow for the specified limit
365 | # the max number of allowed clients is set to the current file limit
366 | # minus 32 (as Redis reserves a few file descriptors for internal uses).
367 | #
368 | # Once the limit is reached Redis will close all the new connections sending
369 | # an error 'max number of clients reached'.
370 | #
371 | # maxclients 10000
372 |
373 | # Don't use more memory than the specified amount of bytes.
374 | # When the memory limit is reached Redis will try to remove keys
375 | # according to the eviction policy selected (see maxmemory-policy).
376 | #
377 | # If Redis can't remove keys according to the policy, or if the policy is
378 | # set to 'noeviction', Redis will start to reply with errors to commands
379 | # that would use more memory, like SET, LPUSH, and so on, and will continue
380 | # to reply to read-only commands like GET.
381 | #
382 | # This option is usually useful when using Redis as an LRU cache, or to set
383 | # a hard memory limit for an instance (using the 'noeviction' policy).
384 | #
385 | # WARNING: If you have slaves attached to an instance with maxmemory on,
386 | # the size of the output buffers needed to feed the slaves are subtracted
387 | # from the used memory count, so that network problems / resyncs will
388 | # not trigger a loop where keys are evicted, and in turn the output
389 | # buffer of slaves is full with DELs of keys evicted triggering the deletion
390 | # of more keys, and so forth until the database is completely emptied.
391 | #
392 | # In short... if you have slaves attached it is suggested that you set a lower
393 | # limit for maxmemory so that there is some free RAM on the system for slave
394 | # output buffers (but this is not needed if the policy is 'noeviction').
395 | #
396 | # maxmemory
397 |
398 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
399 | # is reached. You can select among five behaviors:
400 | #
401 | # volatile-lru -> remove the key with an expire set using an LRU algorithm
402 | # allkeys-lru -> remove any key accordingly to the LRU algorithm
403 | # volatile-random -> remove a random key with an expire set
404 | # allkeys-random -> remove a random key, any key
405 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
406 | # noeviction -> don't expire at all, just return an error on write operations
407 | #
408 | # Note: with any of the above policies, Redis will return an error on write
409 | # operations, when there are not suitable keys for eviction.
410 | #
411 | # At the date of writing this commands are: set setnx setex append
412 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
413 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
414 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
415 | # getset mset msetnx exec sort
416 | #
417 | # The default is:
418 | #
419 | # maxmemory-policy volatile-lru
420 |
421 | # LRU and minimal TTL algorithms are not precise algorithms but approximated
422 | # algorithms (in order to save memory), so you can select as well the sample
423 | # size to check. For instance for default Redis will check three keys and
424 | # pick the one that was used less recently, you can change the sample size
425 | # using the following configuration directive.
426 | #
427 | # maxmemory-samples 3
428 |
429 | ############################## APPEND ONLY MODE ###############################
430 |
431 | # By default Redis asynchronously dumps the dataset on disk. This mode is
432 | # good enough in many applications, but an issue with the Redis process or
433 | # a power outage may result into a few minutes of writes lost (depending on
434 | # the configured save points).
435 | #
436 | # The Append Only File is an alternative persistence mode that provides
437 | # much better durability. For instance using the default data fsync policy
438 | # (see later in the config file) Redis can lose just one second of writes in a
439 | # dramatic event like a server power outage, or a single write if something
440 | # wrong with the Redis process itself happens, but the operating system is
441 | # still running correctly.
442 | #
443 | # AOF and RDB persistence can be enabled at the same time without problems.
444 | # If the AOF is enabled on startup Redis will load the AOF, that is the file
445 | # with the better durability guarantees.
446 | #
447 | # Please check http://redis.io/topics/persistence for more information.
448 |
449 | appendonly no
450 |
451 | # The name of the append only file (default: "appendonly.aof")
452 |
453 | appendfilename "appendonly.aof"
454 |
455 | # The fsync() call tells the Operating System to actually write data on disk
456 | # instead to wait for more data in the output buffer. Some OS will really flush
457 | # data on disk, some other OS will just try to do it ASAP.
458 | #
459 | # Redis supports three different modes:
460 | #
461 | # no: don't fsync, just let the OS flush the data when it wants. Faster.
462 | # always: fsync after every write to the append only log . Slow, Safest.
463 | # everysec: fsync only one time every second. Compromise.
464 | #
465 | # The default is "everysec", as that's usually the right compromise between
466 | # speed and data safety. It's up to you to understand if you can relax this to
467 | # "no" that will let the operating system flush the output buffer when
468 | # it wants, for better performances (but if you can live with the idea of
469 | # some data loss consider the default persistence mode that's snapshotting),
470 | # or on the contrary, use "always" that's very slow but a bit safer than
471 | # everysec.
472 | #
473 | # More details please check the following article:
474 | # http://antirez.com/post/redis-persistence-demystified.html
475 | #
476 | # If unsure, use "everysec".
477 |
478 | # appendfsync always
479 | appendfsync everysec
480 | # appendfsync no
481 |
482 | # When the AOF fsync policy is set to always or everysec, and a background
483 | # saving process (a background save or AOF log background rewriting) is
484 | # performing a lot of I/O against the disk, in some Linux configurations
485 | # Redis may block too long on the fsync() call. Note that there is no fix for
486 | # this currently, as even performing fsync in a different thread will block
487 | # our synchronous write(2) call.
488 | #
489 | # In order to mitigate this problem it's possible to use the following option
490 | # that will prevent fsync() from being called in the main process while a
491 | # BGSAVE or BGREWRITEAOF is in progress.
492 | #
493 | # This means that while another child is saving, the durability of Redis is
494 | # the same as "appendfsync none". In practical terms, this means that it is
495 | # possible to lose up to 30 seconds of log in the worst scenario (with the
496 | # default Linux settings).
497 | #
498 | # If you have latency problems turn this to "yes". Otherwise leave it as
499 | # "no" that is the safest pick from the point of view of durability.
500 |
501 | no-appendfsync-on-rewrite no
502 |
503 | # Automatic rewrite of the append only file.
504 | # Redis is able to automatically rewrite the log file implicitly calling
505 | # BGREWRITEAOF when the AOF log size grows by the specified percentage.
506 | #
507 | # This is how it works: Redis remembers the size of the AOF file after the
508 | # latest rewrite (if no rewrite has happened since the restart, the size of
509 | # the AOF at startup is used).
510 | #
511 | # This base size is compared to the current size. If the current size is
512 | # bigger than the specified percentage, the rewrite is triggered. Also
513 | # you need to specify a minimal size for the AOF file to be rewritten, this
514 | # is useful to avoid rewriting the AOF file even if the percentage increase
515 | # is reached but it is still pretty small.
516 | #
517 | # Specify a percentage of zero in order to disable the automatic AOF
518 | # rewrite feature.
519 |
520 | auto-aof-rewrite-percentage 100
521 | auto-aof-rewrite-min-size 64mb
522 |
523 | ################################ LUA SCRIPTING ###############################
524 |
525 | # Max execution time of a Lua script in milliseconds.
526 | #
527 | # If the maximum execution time is reached Redis will log that a script is
528 | # still in execution after the maximum allowed time and will start to
529 | # reply to queries with an error.
530 | #
531 | # When a long running script exceed the maximum execution time only the
532 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
533 | # used to stop a script that did not yet called write commands. The second
534 | # is the only way to shut down the server in the case a write commands was
535 | # already issue by the script but the user don't want to wait for the natural
536 | # termination of the script.
537 | #
538 | # Set it to 0 or a negative value for unlimited execution without warnings.
539 | lua-time-limit 5000
540 |
541 | ################################## SLOW LOG ###################################
542 |
543 | # The Redis Slow Log is a system to log queries that exceeded a specified
544 | # execution time. The execution time does not include the I/O operations
545 | # like talking with the client, sending the reply and so forth,
546 | # but just the time needed to actually execute the command (this is the only
547 | # stage of command execution where the thread is blocked and can not serve
548 | # other requests in the meantime).
549 | #
550 | # You can configure the slow log with two parameters: one tells Redis
551 | # what is the execution time, in microseconds, to exceed in order for the
552 | # command to get logged, and the other parameter is the length of the
553 | # slow log. When a new command is logged the oldest one is removed from the
554 | # queue of logged commands.
555 |
556 | # The following time is expressed in microseconds, so 1000000 is equivalent
557 | # to one second. Note that a negative number disables the slow log, while
558 | # a value of zero forces the logging of every command.
559 | slowlog-log-slower-than 10000
560 |
561 | # There is no limit to this length. Just be aware that it will consume memory.
562 | # You can reclaim memory used by the slow log with SLOWLOG RESET.
563 | slowlog-max-len 128
564 |
565 | ############################# Event notification ##############################
566 |
567 | # Redis can notify Pub/Sub clients about events happening in the key space.
568 | # This feature is documented at http://redis.io/topics/keyspace-events
569 | #
570 | # For instance if keyspace events notification is enabled, and a client
571 | # performs a DEL operation on key "foo" stored in the Database 0, two
572 | # messages will be published via Pub/Sub:
573 | #
574 | # PUBLISH __keyspace@0__:foo del
575 | # PUBLISH __keyevent@0__:del foo
576 | #
577 | # It is possible to select the events that Redis will notify among a set
578 | # of classes. Every class is identified by a single character:
579 | #
580 | # K Keyspace events, published with __keyspace@__ prefix.
581 | # E Keyevent events, published with __keyevent@__ prefix.
582 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
583 | # $ String commands
584 | # l List commands
585 | # s Set commands
586 | # h Hash commands
587 | # z Sorted set commands
588 | # x Expired events (events generated every time a key expires)
589 | # e Evicted events (events generated when a key is evicted for maxmemory)
590 | # A Alias for g$lshzxe, so that the "AKE" string means all the events.
591 | #
592 | # The "notify-keyspace-events" takes as argument a string that is composed
593 | # by zero or multiple characters. The empty string means that notifications
594 | # are disabled at all.
595 | #
596 | # Example: to enable list and generic events, from the point of view of the
597 | # event name, use:
598 | #
599 | # notify-keyspace-events Elg
600 | #
601 | # Example 2: to get the stream of the expired keys subscribing to channel
602 | # name __keyevent@0__:expired use:
603 | #
604 | # notify-keyspace-events Ex
605 | #
606 | # By default all notifications are disabled because most users don't need
607 | # this feature and the feature has some overhead. Note that if you don't
608 | # specify at least one of K or E, no events will be delivered.
609 | notify-keyspace-events ""
610 |
611 | ############################### ADVANCED CONFIG ###############################
612 |
613 | # Hashes are encoded using a memory efficient data structure when they have a
614 | # small number of entries, and the biggest entry does not exceed a given
615 | # threshold. These thresholds can be configured using the following directives.
616 | hash-max-ziplist-entries 512
617 | hash-max-ziplist-value 64
618 |
619 | # Similarly to hashes, small lists are also encoded in a special way in order
620 | # to save a lot of space. The special representation is only used when
621 | # you are under the following limits:
622 | list-max-ziplist-entries 512
623 | list-max-ziplist-value 64
624 |
625 | # Sets have a special encoding in just one case: when a set is composed
626 | # of just strings that happens to be integers in radix 10 in the range
627 | # of 64 bit signed integers.
628 | # The following configuration setting sets the limit in the size of the
629 | # set in order to use this special memory saving encoding.
630 | set-max-intset-entries 512
631 |
632 | # Similarly to hashes and lists, sorted sets are also specially encoded in
633 | # order to save a lot of space. This encoding is only used when the length and
634 | # elements of a sorted set are below the following limits:
635 | zset-max-ziplist-entries 128
636 | zset-max-ziplist-value 64
637 |
638 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
639 | # order to help rehashing the main Redis hash table (the one mapping top-level
640 | # keys to values). The hash table implementation Redis uses (see dict.c)
641 | # performs a lazy rehashing: the more operation you run into a hash table
642 | # that is rehashing, the more rehashing "steps" are performed, so if the
643 | # server is idle the rehashing is never complete and some more memory is used
644 | # by the hash table.
645 | #
646 | # The default is to use this millisecond 10 times every second in order to
647 | # active rehashing the main dictionaries, freeing memory when possible.
648 | #
649 | # If unsure:
650 | # use "activerehashing no" if you have hard latency requirements and it is
651 | # not a good thing in your environment that Redis can reply form time to time
652 | # to queries with 2 milliseconds delay.
653 | #
654 | # use "activerehashing yes" if you don't have such hard requirements but
655 | # want to free memory asap when possible.
656 | activerehashing yes
657 |
658 | # The client output buffer limits can be used to force disconnection of clients
659 | # that are not reading data from the server fast enough for some reason (a
660 | # common reason is that a Pub/Sub client can't consume messages as fast as the
661 | # publisher can produce them).
662 | #
663 | # The limit can be set differently for the three different classes of clients:
664 | #
665 | # normal -> normal clients
666 | # slave -> slave clients and MONITOR clients
667 | # pubsub -> clients subscribed to at least one pubsub channel or pattern
668 | #
669 | # The syntax of every client-output-buffer-limit directive is the following:
670 | #
671 | # client-output-buffer-limit
672 | #
673 | # A client is immediately disconnected once the hard limit is reached, or if
674 | # the soft limit is reached and remains reached for the specified number of
675 | # seconds (continuously).
676 | # So for instance if the hard limit is 32 megabytes and the soft limit is
677 | # 16 megabytes / 10 seconds, the client will get disconnected immediately
678 | # if the size of the output buffers reach 32 megabytes, but will also get
679 | # disconnected if the client reaches 16 megabytes and continuously overcomes
680 | # the limit for 10 seconds.
681 | #
682 | # By default normal clients are not limited because they don't receive data
683 | # without asking (in a push way), but just after a request, so only
684 | # asynchronous clients may create a scenario where data is requested faster
685 | # than it can read.
686 | #
687 | # Instead there is a default limit for pubsub and slave clients, since
688 | # subscribers and slaves receive data in a push fashion.
689 | #
690 | # Both the hard or the soft limit can be disabled by setting them to zero.
691 | client-output-buffer-limit normal 0 0 0
692 | client-output-buffer-limit slave 256mb 64mb 60
693 | client-output-buffer-limit pubsub 32mb 8mb 60
694 |
695 | # Redis calls an internal function to perform many background tasks, like
696 | # closing connections of clients in timeout, purging expired keys that are
697 | # never requested, and so forth.
698 | #
699 | # Not all tasks are performed with the same frequency, but Redis checks for
700 | # tasks to perform accordingly to the specified "hz" value.
701 | #
702 | # By default "hz" is set to 10. Raising the value will use more CPU when
703 | # Redis is idle, but at the same time will make Redis more responsive when
704 | # there are many keys expiring at the same time, and timeouts may be
705 | # handled with more precision.
706 | #
707 | # The range is between 1 and 500, however a value over 100 is usually not
708 | # a good idea. Most users should use the default of 10 and raise this up to
709 | # 100 only in environments where very low latency is required.
710 | hz 10
711 |
712 | # When a child rewrites the AOF file, if the following option is enabled
713 | # the file will be fsync-ed every 32 MB of data generated. This is useful
714 | # in order to commit the file to the disk more incrementally and avoid
715 | # big latency spikes.
716 | aof-rewrite-incremental-fsync yes
717 |
718 |
--------------------------------------------------------------------------------
/RedisCluster/src/main/java/com/rr/redis/client/RedisClusterPoolClient.java:
--------------------------------------------------------------------------------
1 | package com.rr.redis.client;
2 |
3 | import java.util.ArrayList;
4 | import java.util.Collection;
5 | import java.util.HashMap;
6 | import java.util.List;
7 | import java.util.Map;
8 | import java.util.Map.Entry;
9 | import java.util.Set;
10 | import java.util.concurrent.Callable;
11 | import java.util.concurrent.ExecutorService;
12 | import java.util.concurrent.Executors;
13 | import java.util.concurrent.Future;
14 |
15 | import org.apache.commons.logging.Log;
16 | import org.apache.commons.logging.LogFactory;
17 |
18 | import redis.clients.jedis.BinaryClient.LIST_POSITION;
19 | import redis.clients.jedis.Jedis;
20 | import redis.clients.jedis.SortingParams;
21 | import redis.clients.jedis.Tuple;
22 |
23 | import com.rr.redis.client.exception.ClusterOpException;
24 | import com.rr.redis.client.hash.IHashFunc;
25 |
26 | /**
27 | *
28 | * Pool Client for redis cluster.
29 | *
30 | *
31 | * This class is thread safe since we use JedisPool.
32 | *
33 | * USAGE: private RedisClusterPoolClient client; client = new
34 | * RedisClusterPoolClient("testCluster", "10.22.241.233:2181");
35 | * client.set("key", "value"); System.out.println(client.get("key"));
36 | *
37 | *
38 | *
39 | * @author lei.gao
40 | */
41 | public class RedisClusterPoolClient {
42 | private static final Log logger = LogFactory
43 | .getLog(RedisClusterPoolClient.class);
44 | /**
45 | * Number of hash slot(node).
46 | */
47 | private IHashFunc hashFunc;
48 | private String clusterName;
49 | private String zkHost;
50 | private RedisClusterPoolProvider poolsObj;
51 | public static final int DEFAULT_ZK_TIMEOUT = 2000;
52 | public static final int DEFAULT_REDIS_TIMEOUT = 3000;
53 | private int zkTimeout = DEFAULT_ZK_TIMEOUT;
54 | private int redisTimeout = DEFAULT_REDIS_TIMEOUT;
55 |
56 | private int partsLen;
57 |
58 | /**
59 | * RedisClusterPoolClient的默认构造函数,可以满足大多数场景
60 | *
61 | * @param clusterName
62 | * 业务名称
63 | * @param zkHost
64 | * zookeeper地址
65 | */
66 | public RedisClusterPoolClient(String clusterName, String zkHost) {
67 | this.clusterName = clusterName;
68 | this.zkHost = zkHost;
69 | }
70 |
71 | /**
72 | * RedisClusterPoolClient的构造函数,如果你不确定请不要使用
73 | *
74 | * @param clusterName
75 | * @param zkHost
76 | * @param poolMaxActive
77 | * @param poolMaxIdle
78 | * @param poolMaxWait
79 | * maxActive: 1000 the maximum number of objects that can be
80 | * allocated by the pool (checked out to clients, or idle
81 | * awaiting checkout) at a given time. When non-positive, there
82 | * is no limit to the number of objects that can be managed by
83 | * the pool at one time. When maxActive is reached, the pool is
84 | * said to be exhausted. The default setting for this parameter
85 | * is 8. maxIdle: 100 the maximum number of objects that can sit
86 | * idle in the pool at any time. When negative, there is no limit
87 | * to the number of objects that may be idle at one time. The
88 | * default setting for this parameter is 8.
89 | */
90 | public RedisClusterPoolClient(String clusterName, String zkHost,
91 | int poolMaxActive, int poolMaxIdle, int poolMaxWait) {
92 | this.clusterName = clusterName;
93 | this.zkHost = zkHost;
94 | RedisClusterPoolProvider.setJedisPoolConfig(poolMaxActive, poolMaxIdle,
95 | poolMaxWait);
96 | }
97 |
98 | /**
99 | * RedisClusterPoolClient的构造函数
100 | *
101 | * @param clusterName
102 | * @param zkHost
103 | * @param redisTimeout
104 | * 设置redis超时时间,单位:ms
105 | */
106 | public RedisClusterPoolClient(String clusterName, String zkHost,
107 | int redisTimeout) {
108 | this.clusterName = clusterName;
109 | this.zkHost = zkHost;
110 | this.redisTimeout = redisTimeout;
111 | }
112 |
113 | /**
114 | * 初始化过程
115 | */
116 | public void init() {
117 | this.poolsObj = new RedisClusterPoolProvider(clusterName, zkHost,
118 | zkTimeout, redisTimeout);
119 | this.hashFunc = poolsObj.getHashFunc();
120 | this.partsLen = this.poolsObj.getPoolsLen();
121 | }
122 |
123 | /**
124 | * 得到hash算法
125 | *
126 | * @return
127 | */
128 | public IHashFunc getHashFunc() {
129 | return hashFunc;
130 | }
131 |
132 | /**
133 | * 设置key的值为value,永久性
134 | *
135 | * key是string和key是byte[] 不能混用,因为两者的hash值不同
136 | *
137 | * @param key
138 | * @param value
139 | * @return 成功返回 OK
140 | */
141 | public String set(String key, String value) {
142 | try {
143 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
144 | .getPool(hashFunc.hash(key));
145 | Jedis jedis = pool.getResource();
146 | String r = jedis.set(key, value);
147 | pool.returnResource(jedis);
148 | return r;
149 | } catch (Exception e) {
150 | throw new ClusterOpException(e);
151 | }
152 | }
153 |
154 | /**
155 | * 得到key的值
156 | *
157 | * @param key
158 | * @return
159 | */
160 | public String get(String key) {
161 | try {
162 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
163 | .getPool(hashFunc.hash(key));
164 | Jedis jedis = pool.getResource();
165 | String r = jedis.get(key);
166 | pool.returnResource(jedis);
167 | return r;
168 | } catch (Exception e) {
169 | throw new ClusterOpException(e);
170 | }
171 | }
172 |
173 | /**
174 | * 判断值是否存在
175 | *
176 | * @param key
177 | * @return
178 | */
179 | public Boolean exists(String key) {
180 | try {
181 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
182 | .getPool(hashFunc.hash(key));
183 | Jedis jedis = pool.getResource();
184 | Boolean r = jedis.exists(key);
185 | pool.returnResource(jedis);
186 | return r;
187 | } catch (Exception e) {
188 | throw new ClusterOpException(e);
189 | }
190 | }
191 |
192 | /**
193 | * 返回key的类型,不存在则为none
194 | *
195 | * @param key
196 | * @return
197 | */
198 | public String type(String key) {
199 | try {
200 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
201 | .getPool(hashFunc.hash(key));
202 | Jedis jedis = pool.getResource();
203 | String r = jedis.type(key);
204 | pool.returnResource(jedis);
205 | return r;
206 | } catch (Exception e) {
207 | throw new ClusterOpException(e);
208 | }
209 | }
210 |
211 | /**
212 | * 为 key 设置生存时间,过期时间为seconds秒
213 | *
214 | * @param key
215 | * @param seconds
216 | * @return
217 | */
218 | public Long expire(String key, int seconds) {
219 | try {
220 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
221 | .getPool(hashFunc.hash(key));
222 | Jedis jedis = pool.getResource();
223 | Long r = jedis.expire(key, seconds);
224 | pool.returnResource(jedis);
225 | return r;
226 | } catch (Exception e) {
227 | throw new ClusterOpException(e);
228 | }
229 | }
230 |
231 | public Long expire(int seconds, String... keys) {
232 | try {
233 | long result = 0;
234 | for (String key : keys) {
235 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
236 | .getPool(hashFunc.hash(key));
237 | Jedis jedis = pool.getResource();
238 | result += jedis.expire(key, seconds);
239 | pool.returnResource(jedis);
240 | }
241 | return result;
242 | } catch (Exception e) {
243 | throw new ClusterOpException(e);
244 | }
245 | }
246 |
247 | public Long expireBinary(final Set keys, int seconds) {
248 | try {
249 | long result = 0;
250 | for (byte[] key : keys) {
251 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
252 | .getPool(hashFunc.hash(key));
253 | Jedis jedis = pool.getResource();
254 | result += jedis.expire(key, seconds);
255 | pool.returnResource(jedis);
256 | }
257 | return result;
258 | } catch (Exception e) {
259 | throw new ClusterOpException(e);
260 | }
261 | }
262 |
263 | /**
264 | * EXPIREAT 命令接受的时间参数是 UNIX 时间戳
265 | *
266 | * 例如:EXPIREAT cache 1355292000 # 这个 key 将在 2012.12.12 过期
267 | *
268 | * @param key
269 | * @param unixTime
270 | * @return
271 | */
272 | public Long expireAt(String key, long unixTime) {
273 | try {
274 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
275 | .getPool(hashFunc.hash(key));
276 | Jedis jedis = pool.getResource();
277 | Long r = jedis.expireAt(key, unixTime);
278 | pool.returnResource(jedis);
279 | return r;
280 | } catch (Exception e) {
281 | throw new ClusterOpException(e);
282 | }
283 |
284 | }
285 |
286 | /**
287 | * 返回生存时间 如果是永久性则返回-1
288 | *
289 | * @param key
290 | * @return
291 | */
292 | public Long ttl(String key) {
293 | try {
294 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
295 | .getPool(hashFunc.hash(key));
296 | Jedis jedis = pool.getResource();
297 | Long r = jedis.ttl(key);
298 | pool.returnResource(jedis);
299 | return r;
300 | } catch (Exception e) {
301 | throw new ClusterOpException(e);
302 | }
303 |
304 | }
305 |
306 | /**
307 | * 设置或者清空key的value(字符串)在offset处的bit值
308 | *
309 | * http://redis.cn/commands/setbit.html
310 | *
311 | * @param key
312 | * @param offset
313 | * @param value
314 | * @return
315 | */
316 | public boolean setbit(String key, long offset, boolean value) {
317 | try {
318 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
319 | .getPool(hashFunc.hash(key));
320 | Jedis jedis = pool.getResource();
321 | boolean r = jedis.setbit(key, offset, value);
322 | pool.returnResource(jedis);
323 | return r;
324 | } catch (Exception e) {
325 | throw new ClusterOpException(e);
326 | }
327 |
328 | }
329 |
330 | /**
331 | * 返回位的值存储在关键的字符串值的偏移量。
332 | *
333 | * @param key
334 | * @param offset
335 | * @return
336 | */
337 | public boolean getbit(String key, long offset) {
338 | try {
339 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
340 | .getPool(hashFunc.hash(key));
341 | Jedis jedis = pool.getResource();
342 | boolean r = jedis.getbit(key, offset);
343 | pool.returnResource(jedis);
344 | return r;
345 | } catch (Exception e) {
346 | throw new ClusterOpException(e);
347 | }
348 |
349 | }
350 |
351 | /**
352 | * 覆盖key对应的string的一部分,从指定的offset处开始,覆盖value的长度
353 | *
354 | * @param key
355 | * @param offset
356 | * @param value
357 | * @return
358 | */
359 | public long setrange(String key, long offset, String value) {
360 | try {
361 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
362 | .getPool(hashFunc.hash(key));
363 | Jedis jedis = pool.getResource();
364 | long r = jedis.setrange(key, offset, value);
365 | pool.returnResource(jedis);
366 | return r;
367 | } catch (Exception e) {
368 | throw new ClusterOpException(e);
369 | }
370 |
371 | }
372 |
373 | /**
374 | * 获取存储在key上的值的一个子字符串
375 | *
376 | * @param key
377 | * @param startOffset
378 | * @param endOffset
379 | * @return
380 | */
381 | public String getrange(String key, long startOffset, long endOffset) {
382 | try {
383 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
384 | .getPool(hashFunc.hash(key));
385 | Jedis jedis = pool.getResource();
386 | String r = jedis.getrange(key, startOffset, endOffset);
387 | pool.returnResource(jedis);
388 | return r;
389 | } catch (Exception e) {
390 | throw new ClusterOpException(e);
391 | }
392 |
393 | }
394 |
395 | /**
396 | * 自动将key对应到value并且返回原来key对应的value
397 | *
398 | * @param key
399 | * @param value
400 | * @return
401 | */
402 | public String getSet(String key, String value) {
403 | try {
404 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
405 | .getPool(hashFunc.hash(key));
406 | Jedis jedis = pool.getResource();
407 | String r = jedis.getSet(key, value);
408 | pool.returnResource(jedis);
409 | return r;
410 | } catch (Exception e) {
411 | throw new ClusterOpException(e);
412 | }
413 |
414 | }
415 |
416 | /**
417 | * 如果key不存在,就设置key对应字符串value。在这种情况下,该命令和SET一样。当key已经存在时,就不做任何操作。SETNX是
418 | * "SET if Not eXists"。
419 | *
420 | * @param key
421 | * @param value
422 | * @return
423 | */
424 | public Long setnx(String key, String value) {
425 | try {
426 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
427 | .getPool(hashFunc.hash(key));
428 | Jedis jedis = pool.getResource();
429 | Long r = jedis.setnx(key, value);
430 | pool.returnResource(jedis);
431 | return r;
432 | } catch (Exception e) {
433 | throw new ClusterOpException(e);
434 | }
435 |
436 | }
437 |
438 | /**
439 | * 设置key-value并设置过期时间(单位:秒)
440 | *
441 | * @param key
442 | * @param seconds
443 | * @param value
444 | * @return
445 | */
446 | public String setex(String key, int seconds, String value) {
447 | try {
448 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
449 | .getPool(hashFunc.hash(key));
450 | Jedis jedis = pool.getResource();
451 | String r = jedis.setex(key, seconds, value);
452 | pool.returnResource(jedis);
453 | return r;
454 | } catch (Exception e) {
455 | throw new ClusterOpException(e);
456 | }
457 |
458 | }
459 |
460 | /**
461 | * 原子减指定的整数
462 | *
463 | * @param key
464 | * @param integer
465 | * @return
466 | */
467 | public Long decrBy(String key, long integer) {
468 | try {
469 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
470 | .getPool(hashFunc.hash(key));
471 | Jedis jedis = pool.getResource();
472 | Long r = jedis.decrBy(key, integer);
473 | pool.returnResource(jedis);
474 | return r;
475 | } catch (Exception e) {
476 | throw new ClusterOpException(e);
477 | }
478 |
479 | }
480 |
481 | /**
482 | * 整数原子减1
483 | *
484 | * @param key
485 | * @return
486 | */
487 | public Long decr(String key) {
488 | try {
489 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
490 | .getPool(hashFunc.hash(key));
491 | Jedis jedis = pool.getResource();
492 | Long r = jedis.decr(key);
493 | pool.returnResource(jedis);
494 | return r;
495 | } catch (Exception e) {
496 | throw new ClusterOpException(e);
497 | }
498 |
499 | }
500 |
501 | /**
502 | * 执行原子增加一个整数
503 | *
504 | * @param key
505 | * @param integer
506 | * @return
507 | */
508 | public Long incrBy(String key, long integer) {
509 | try {
510 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
511 | .getPool(hashFunc.hash(key));
512 | Jedis jedis = pool.getResource();
513 | Long r = jedis.incrBy(key, integer);
514 | pool.returnResource(jedis);
515 | return r;
516 | } catch (Exception e) {
517 | throw new ClusterOpException(e);
518 | }
519 |
520 | }
521 |
522 | /**
523 | * 执行原子加1操作
524 | *
525 | * @param key
526 | * @return
527 | */
528 | public Long incr(String key) {
529 | try {
530 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
531 | .getPool(hashFunc.hash(key));
532 | Jedis jedis = pool.getResource();
533 | Long r = jedis.incr(key);
534 | pool.returnResource(jedis);
535 | return r;
536 | } catch (Exception e) {
537 | throw new ClusterOpException(e);
538 | }
539 |
540 | }
541 |
542 | /**
543 | * 追加一个值到key上
544 | *
545 | * @param key
546 | * @param value
547 | * @return
548 | */
549 | public Long append(String key, String value) {
550 | try {
551 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
552 | .getPool(hashFunc.hash(key));
553 | Jedis jedis = pool.getResource();
554 | Long r = jedis.append(key, value);
555 | pool.returnResource(jedis);
556 | return r;
557 | } catch (Exception e) {
558 | throw new ClusterOpException(e);
559 | }
560 |
561 | }
562 |
563 | /**
564 | * 设置hash里面一个字段的值
565 | *
566 | * @param key
567 | * @param field
568 | * @param value
569 | * @return
570 | */
571 | public Long hset(String key, String field, String value) {
572 | try {
573 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
574 | .getPool(hashFunc.hash(key));
575 | Jedis jedis = pool.getResource();
576 | Long r = jedis.hset(key, field, value);
577 | pool.returnResource(jedis);
578 | return r;
579 | } catch (Exception e) {
580 | throw new ClusterOpException(e);
581 | }
582 | }
583 |
584 | /**
585 | * 读取哈希域的的值
586 | *
587 | * @param key
588 | * @param field
589 | * @return
590 | */
591 | public String hget(String key, String field) {
592 | try {
593 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
594 | .getPool(hashFunc.hash(key));
595 | Jedis jedis = pool.getResource();
596 | String r = jedis.hget(key, field);
597 | pool.returnResource(jedis);
598 | return r;
599 | } catch (Exception e) {
600 | throw new ClusterOpException(e);
601 | }
602 | }
603 |
604 | /**
605 | * 设置hash的一个字段,只有当这个字段不存在时有效
606 | *
607 | * @param key
608 | * @param field
609 | * @param value
610 | * @return
611 | */
612 | public Long hsetnx(String key, String field, String value) {
613 | try {
614 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
615 | .getPool(hashFunc.hash(key));
616 | Jedis jedis = pool.getResource();
617 | Long r = jedis.hsetnx(key, field, value);
618 | pool.returnResource(jedis);
619 | return r;
620 | } catch (Exception e) {
621 | throw new ClusterOpException(e);
622 | }
623 | }
624 |
625 | /**
626 | * 设置hash字段值
627 | *
628 | * @param key
629 | * @param hash
630 | * @return
631 | */
632 | public String hmset(String key, Map hash) {
633 | if (hash == null || hash.isEmpty())
634 | throw new ClusterOpException("Param cannot be null or empty!");
635 | try {
636 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
637 | .getPool(hashFunc.hash(key));
638 | Jedis jedis = pool.getResource();
639 | String r = jedis.hmset(key, hash);
640 | pool.returnResource(jedis);
641 | return r;
642 | } catch (Exception e) {
643 | throw new ClusterOpException(e);
644 | }
645 | }
646 |
647 | /**
648 | * 获取hash里面指定字段的值
649 | *
650 | * @param key
651 | * @param fields
652 | * @return
653 | */
654 | public List hmget(String key, String... fields) {
655 | try {
656 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
657 | .getPool(hashFunc.hash(key));
658 | Jedis jedis = pool.getResource();
659 | List r = jedis.hmget(key, fields);
660 | pool.returnResource(jedis);
661 | return r;
662 | } catch (Exception e) {
663 | throw new ClusterOpException(e);
664 | }
665 | }
666 |
667 | /**
668 | * 将哈希集中指定域的值增加给定的数字
669 | *
670 | * @param key
671 | * @param field
672 | * @param value
673 | * @return
674 | */
675 | public Long hincrBy(String key, String field, long value) {
676 | try {
677 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
678 | .getPool(hashFunc.hash(key));
679 | Jedis jedis = pool.getResource();
680 | Long r = jedis.hincrBy(key, field, value);
681 | pool.returnResource(jedis);
682 | return r;
683 | } catch (Exception e) {
684 | throw new ClusterOpException(e);
685 | }
686 | }
687 |
688 | /**
689 | * 判断给定域是否存在于哈希集中
690 | *
691 | * @param key
692 | * @param field
693 | * @return
694 | */
695 | public Boolean hexists(String key, String field) {
696 | try {
697 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
698 | .getPool(hashFunc.hash(key));
699 | Jedis jedis = pool.getResource();
700 | Boolean r = jedis.hexists(key, field);
701 | pool.returnResource(jedis);
702 | return r;
703 | } catch (Exception e) {
704 | throw new ClusterOpException(e);
705 | }
706 | }
707 |
708 | /**
709 | * 删除一个或多个哈希域
710 | *
711 | * @param key
712 | * @param fields
713 | * @return
714 | */
715 | public Long hdel(String key, String... fields) {
716 | try {
717 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
718 | .getPool(hashFunc.hash(key));
719 | Jedis jedis = pool.getResource();
720 | Long r = jedis.hdel(key, fields);
721 | pool.returnResource(jedis);
722 | return r;
723 | } catch (Exception e) {
724 | throw new ClusterOpException(e);
725 | }
726 | }
727 |
728 | /**
729 | * 获取hash里所有字段的数量
730 | *
731 | * @param key
732 | * @return
733 | */
734 | public Long hlen(String key) {
735 | try {
736 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
737 | .getPool(hashFunc.hash(key));
738 | Jedis jedis = pool.getResource();
739 | Long r = jedis.hlen(key);
740 | pool.returnResource(jedis);
741 | return r;
742 | } catch (Exception e) {
743 | throw new ClusterOpException(e);
744 | }
745 | }
746 |
747 | /**
748 | * 获取hash的所有字段
749 | *
750 | * @param key
751 | * @return
752 | */
753 | public Set hkeys(String key) {
754 | try {
755 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
756 | .getPool(hashFunc.hash(key));
757 | Jedis jedis = pool.getResource();
758 | Set r = jedis.hkeys(key);
759 | pool.returnResource(jedis);
760 | return r;
761 | } catch (Exception e) {
762 | throw new ClusterOpException(e);
763 | }
764 | }
765 |
766 | /**
767 | * 获得hash的所有值
768 | *
769 | * @param key
770 | * @return
771 | */
772 | public List hvals(String key) {
773 | try {
774 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
775 | .getPool(hashFunc.hash(key));
776 | Jedis jedis = pool.getResource();
777 | List r = jedis.hvals(key);
778 | pool.returnResource(jedis);
779 | return r;
780 | } catch (Exception e) {
781 | throw new ClusterOpException(e);
782 | }
783 | }
784 |
785 | /**
786 | * 从哈希集中读取全部的域和值
787 | *
788 | * @param key
789 | * @return
790 | */
791 | public Map hgetAll(String key) {
792 | try {
793 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
794 | .getPool(hashFunc.hash(key));
795 | Jedis jedis = pool.getResource();
796 | Map r = jedis.hgetAll(key);
797 | pool.returnResource(jedis);
798 | return r;
799 | } catch (Exception e) {
800 | throw new ClusterOpException(e);
801 | }
802 | }
803 |
804 | /**
805 | * 从队列的右边入队一个元素
806 | *
807 | * @param key
808 | * @param strings
809 | * @return
810 | */
811 | public Long rpush(String key, String... strings) {
812 | try {
813 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
814 | .getPool(hashFunc.hash(key));
815 | Jedis jedis = pool.getResource();
816 | Long r = jedis.rpush(key, strings);
817 | pool.returnResource(jedis);
818 | return r;
819 | } catch (Exception e) {
820 | throw new ClusterOpException(e);
821 | }
822 | }
823 |
824 | /**
825 | * 从队列的左边入队一个或多个元素
826 | *
827 | * @param key
828 | * @param strings
829 | * @return
830 | */
831 | public Long lpush(String key, String... strings) {
832 | try {
833 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
834 | .getPool(hashFunc.hash(key));
835 | Jedis jedis = pool.getResource();
836 | Long r = jedis.lpush(key, strings);
837 | pool.returnResource(jedis);
838 | return r;
839 | } catch (Exception e) {
840 | throw new ClusterOpException(e);
841 | }
842 | }
843 |
844 | /**
845 | * 获得队列(List)的长度
846 | *
847 | * @param key
848 | * @return
849 | */
850 | public Long llen(String key) {
851 | try {
852 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
853 | .getPool(hashFunc.hash(key));
854 | Jedis jedis = pool.getResource();
855 | Long r = jedis.llen(key);
856 | pool.returnResource(jedis);
857 | return r;
858 | } catch (Exception e) {
859 | throw new ClusterOpException(e);
860 | }
861 | }
862 |
863 | /**
864 | * 从列表中获取指定返回的元素
865 | *
866 | * @param key
867 | * @param start
868 | * @param end
869 | * @return
870 | */
871 | public List lrange(String key, long start, long end) {
872 | try {
873 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
874 | .getPool(hashFunc.hash(key));
875 | Jedis jedis = pool.getResource();
876 | List r = jedis.lrange(key, start, end);
877 | pool.returnResource(jedis);
878 | return r;
879 | } catch (Exception e) {
880 | throw new ClusterOpException(e);
881 | }
882 | }
883 |
884 | /**
885 | * 修剪(trim)一个已存在的 list,这样 list 就会只包含指定范围的指定元素。start 和 stop 都是由0开始计数的, 这里的 0
886 | * 是列表里的第一个元素(表头),1 是第二个元素,以此类推。 例如: LTRIM foobar 0 2 将会对存储在 foobar
887 | * 的列表进行修剪,只保留列表里的前3个元素。 start 和 end 也可以用负数来表示与表尾的偏移量,比如 -1 表示列表里的最后一个元素, -2
888 | * 表示倒数第二个,等等。 超过范围的下标并不会产生错误:如果 start 超过列表尾部,或者 start > end,结果会是列表变成空表(即该
889 | * key 会被移除)。 如果 end 超过列表尾部,Redis 会将其当作列表的最后一个元素。 LTRIM 的一个常见用法是和 LPUSH /
890 | * RPUSH 一起使用
891 | *
892 | * @param key
893 | * @param start
894 | * @param end
895 | * @return
896 | */
897 | public String ltrim(String key, long start, long end) {
898 | try {
899 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
900 | .getPool(hashFunc.hash(key));
901 | Jedis jedis = pool.getResource();
902 | String r = jedis.ltrim(key, start, end);
903 | pool.returnResource(jedis);
904 | return r;
905 | } catch (Exception e) {
906 | throw new ClusterOpException(e);
907 | }
908 | }
909 |
910 | /**
911 | * 获取一个元素,通过其索引列表
912 | *
913 | * @param key
914 | * @param index
915 | * @return
916 | */
917 | public String lindex(String key, long index) {
918 | try {
919 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
920 | .getPool(hashFunc.hash(key));
921 | Jedis jedis = pool.getResource();
922 | String r = jedis.lindex(key, index);
923 | pool.returnResource(jedis);
924 | return r;
925 | } catch (Exception e) {
926 | throw new ClusterOpException(e);
927 | }
928 | }
929 |
930 | /**
931 | * 设置队列里面一个元素的值
932 | *
933 | * @param key
934 | * @param index
935 | * @param value
936 | * @return
937 | */
938 | public String lset(String key, long index, String value) {
939 | try {
940 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
941 | .getPool(hashFunc.hash(key));
942 | Jedis jedis = pool.getResource();
943 | String r = jedis.lset(key, index, value);
944 | pool.returnResource(jedis);
945 | return r;
946 | } catch (Exception e) {
947 | throw new ClusterOpException(e);
948 | }
949 | }
950 |
951 | /**
952 | * 从列表中删除元素
953 | *
954 | * 从存于 key 的列表里移除前 count 次出现的值为 value 的元素。 这个 count 参数通过下面几种方式影响这个操作: count
955 | * > 0: 从头往尾移除值为 value 的元素。 count < 0: 从尾往头移除值为 value 的元素。 count = 0: 移除所有值为
956 | * value 的元素。
957 | *
958 | * @param key
959 | * @param count
960 | * @param value
961 | * @return
962 | */
963 | public Long lrem(String key, long count, String value) {
964 | try {
965 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
966 | .getPool(hashFunc.hash(key));
967 | Jedis jedis = pool.getResource();
968 | Long r = jedis.lrem(key, count, value);
969 | pool.returnResource(jedis);
970 | return r;
971 | } catch (Exception e) {
972 | throw new ClusterOpException(e);
973 | }
974 | }
975 |
976 | /**
977 | * 删除,并获得该列表中的第一元素,或阻塞,直到有一个可用
978 | *
979 | * BLPOP 是阻塞式列表的弹出原语。 它是命令 LPOP 的阻塞版本,这是因为当给定列表内没有任何元素可供弹出的时候, 连接将被 BLPOP
980 | * 命令阻塞。 当给定多个 key 参数时,按参数 key 的先后顺序依次检查各个列表,弹出第一个非空列表的头元素。 非阻塞行为 当 BLPOP
981 | * 被调用时,如果给定 key 内至少有一个非空列表,那么弹出遇到的第一个非空列表的头元素,并和被弹出元素所属的列表的名字 key
982 | * 一起,组成结果返回给调用者。 当存在多个给定 key 时, BLPOP 按给定 key 参数排列的先后顺序,依次检查各个列表。 我们假设 key
983 | * list1 不存在,而 list2 和 list3 都是非空列表
984 | *
985 | * @param key
986 | * @param timeoutSecs
987 | * @return
988 | */
989 | public String blpop(String key, final int timeoutSecs) {
990 | try {
991 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
992 | .getPool(hashFunc.hash(key));
993 | Jedis jedis = pool.getResource();
994 | List r = jedis.blpop(timeoutSecs, key);
995 | pool.returnResource(jedis);
996 | if (r != null && r.size() == 2) {
997 | return r.get(1);
998 | } else {
999 | return null;
1000 | }
1001 | } catch (Exception e) {
1002 | throw new ClusterOpException(e);
1003 | }
1004 | }
1005 |
1006 | /**
1007 | * 删除,并获得该列表中的最后一个元素,或阻塞,直到有一个可用
1008 | *
1009 | * @param key
1010 | * @param timeoutSecs
1011 | * @return
1012 | */
1013 | public String brpop(String key, final int timeoutSecs) {
1014 | try {
1015 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1016 | .getPool(hashFunc.hash(key));
1017 | Jedis jedis = pool.getResource();
1018 | List r = jedis.brpop(timeoutSecs, key);
1019 | pool.returnResource(jedis);
1020 | if (r != null && r.size() == 2) {
1021 | return r.get(1);
1022 | } else {
1023 | return null;
1024 | }
1025 | } catch (Exception e) {
1026 | throw new ClusterOpException(e);
1027 | }
1028 | }
1029 |
1030 | /**
1031 | * 删除,并获得该列表中的第一元素,或阻塞,直到有一个可用
1032 | *
1033 | * @param key
1034 | * @param timeoutSecs
1035 | * @return
1036 | */
1037 | public byte[] blpop(byte[] key, final int timeoutSecs) {
1038 | try {
1039 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1040 | .getPool(hashFunc.hash(key));
1041 | Jedis jedis = pool.getResource();
1042 | List r = jedis.blpop(timeoutSecs, key);
1043 | pool.returnResource(jedis);
1044 | if (r != null && r.size() == 2) {
1045 | return r.get(1);
1046 | } else {
1047 | return null;
1048 | }
1049 | } catch (Exception e) {
1050 | throw new ClusterOpException(e);
1051 | }
1052 | }
1053 |
1054 | /**
1055 | * 删除,并获得该列表中的第一元素,或阻塞,直到有一个可用
1056 | *
1057 | * @param key
1058 | * @param timeoutSecs
1059 | * @return
1060 | */
1061 | public byte[] brpop(byte[] key, final int timeoutSecs) {
1062 | try {
1063 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1064 | .getPool(hashFunc.hash(key));
1065 | Jedis jedis = pool.getResource();
1066 | List r = jedis.brpop(timeoutSecs, key);
1067 | pool.returnResource(jedis);
1068 | if (r != null && r.size() == 2) {
1069 | return r.get(1);
1070 | } else {
1071 | return null;
1072 | }
1073 | } catch (Exception e) {
1074 | throw new ClusterOpException(e);
1075 | }
1076 | }
1077 |
1078 | /**
1079 | * 从队列的左边出队一个元素
1080 | *
1081 | * @param key
1082 | * @return
1083 | */
1084 | public String lpop(String key) {
1085 | try {
1086 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1087 | .getPool(hashFunc.hash(key));
1088 | Jedis jedis = pool.getResource();
1089 | String r = jedis.lpop(key);
1090 | pool.returnResource(jedis);
1091 | return r;
1092 | } catch (Exception e) {
1093 | throw new ClusterOpException(e);
1094 | }
1095 | }
1096 |
1097 | /**
1098 | * 从队列的右边出队一个元素
1099 | *
1100 | * @param key
1101 | * @return
1102 | */
1103 | public String rpop(String key) {
1104 | try {
1105 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1106 | .getPool(hashFunc.hash(key));
1107 | Jedis jedis = pool.getResource();
1108 | String r = jedis.rpop(key);
1109 | pool.returnResource(jedis);
1110 | return r;
1111 | } catch (Exception e) {
1112 | throw new ClusterOpException(e);
1113 | }
1114 | }
1115 |
1116 | /**
1117 | * 添加一个或者多个元素到集合(set)里
1118 | *
1119 | * @param key
1120 | * @param members
1121 | * @return
1122 | */
1123 | public Long sadd(String key, String... members) {
1124 | try {
1125 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1126 | .getPool(hashFunc.hash(key));
1127 | Jedis jedis = pool.getResource();
1128 | Long r = jedis.sadd(key, members);
1129 | pool.returnResource(jedis);
1130 | return r;
1131 | } catch (Exception e) {
1132 | throw new ClusterOpException(e);
1133 | }
1134 | }
1135 |
1136 | /**
1137 | * 获取集合里面的所有key
1138 | *
1139 | * @param key
1140 | * @return
1141 | */
1142 | public Set smembers(String key) {
1143 | try {
1144 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1145 | .getPool(hashFunc.hash(key));
1146 | Jedis jedis = pool.getResource();
1147 | Set r = jedis.smembers(key);
1148 | pool.returnResource(jedis);
1149 | return r;
1150 | } catch (Exception e) {
1151 | throw new ClusterOpException(e);
1152 | }
1153 | }
1154 |
1155 | /**
1156 | * 从集合里删除一个或多个key
1157 | *
1158 | * @param key
1159 | * @param members
1160 | * @return
1161 | */
1162 | public Long srem(String key, String... members) {
1163 | try {
1164 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1165 | .getPool(hashFunc.hash(key));
1166 | Jedis jedis = pool.getResource();
1167 | Long r = jedis.srem(key, members);
1168 | pool.returnResource(jedis);
1169 | return r;
1170 | } catch (Exception e) {
1171 | throw new ClusterOpException(e);
1172 | }
1173 | }
1174 |
1175 | /**
1176 | * 移除并返回一个集合中的随机元素
1177 | *
1178 | * @param key
1179 | * @return
1180 | */
1181 | public String spop(String key) {
1182 | try {
1183 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1184 | .getPool(hashFunc.hash(key));
1185 | Jedis jedis = pool.getResource();
1186 | String r = jedis.spop(key);
1187 | pool.returnResource(jedis);
1188 | return r;
1189 | } catch (Exception e) {
1190 | throw new ClusterOpException(e);
1191 | }
1192 | }
1193 |
1194 | /**
1195 | * 获取集合里面的元素数量
1196 | *
1197 | * @param key
1198 | * @return
1199 | */
1200 | public Long scard(String key) {
1201 | try {
1202 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1203 | .getPool(hashFunc.hash(key));
1204 | Jedis jedis = pool.getResource();
1205 | Long r = jedis.scard(key);
1206 | pool.returnResource(jedis);
1207 | return r;
1208 | } catch (Exception e) {
1209 | throw new ClusterOpException(e);
1210 | }
1211 | }
1212 |
1213 | /**
1214 | * 确定一个给定的值是一个集合的成员
1215 | *
1216 | * @param key
1217 | * @param member
1218 | * @return
1219 | */
1220 | public Boolean sismember(String key, String member) {
1221 | try {
1222 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1223 | .getPool(hashFunc.hash(key));
1224 | Jedis jedis = pool.getResource();
1225 | Boolean r = jedis.sismember(key, member);
1226 | pool.returnResource(jedis);
1227 | return r;
1228 | } catch (Exception e) {
1229 | throw new ClusterOpException(e);
1230 | }
1231 | }
1232 |
1233 | /**
1234 | * 从集合里面随机获取一个key
1235 | *
1236 | * @param key
1237 | * @return
1238 | */
1239 | public String srandmember(String key) {
1240 | try {
1241 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1242 | .getPool(hashFunc.hash(key));
1243 | Jedis jedis = pool.getResource();
1244 | String r = jedis.srandmember(key);
1245 | pool.returnResource(jedis);
1246 | return r;
1247 | } catch (Exception e) {
1248 | throw new ClusterOpException(e);
1249 | }
1250 | }
1251 |
1252 | /**
1253 | * 添加到有序set的一个或多个成员,或更新的分数,如果它已经存在
1254 | *
1255 | * @param key
1256 | * @param score
1257 | * @param member
1258 | * @return
1259 | */
1260 | public Long zadd(String key, double score, String member) {
1261 | try {
1262 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1263 | .getPool(hashFunc.hash(key));
1264 | Jedis jedis = pool.getResource();
1265 | Long r = jedis.zadd(key, score, member);
1266 | pool.returnResource(jedis);
1267 | return r;
1268 | } catch (Exception e) {
1269 | throw new ClusterOpException(e);
1270 | }
1271 | }
1272 |
1273 | /**
1274 | * 返回有序集key中,指定区间内的成员。其中成员按score值递增(从小到大)来排序。具有相同score值的成员按字典序来排列。
1275 | *
1276 | * @param key
1277 | * @param start
1278 | * @param end
1279 | * @return
1280 | */
1281 | public Set zrange(String key, int start, int end) {
1282 | try {
1283 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1284 | .getPool(hashFunc.hash(key));
1285 | Jedis jedis = pool.getResource();
1286 | Set r = jedis.zrange(key, start, end);
1287 | pool.returnResource(jedis);
1288 | return r;
1289 | } catch (Exception e) {
1290 | throw new ClusterOpException(e);
1291 | }
1292 | }
1293 |
1294 | /**
1295 | * 从排序的集合中删除一个或多个成员
1296 | *
1297 | * @param key
1298 | * @param members
1299 | * @return
1300 | */
1301 | public Long zrem(String key, String... members) {
1302 | try {
1303 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1304 | .getPool(hashFunc.hash(key));
1305 | Jedis jedis = pool.getResource();
1306 | Long r = jedis.zrem(key, members);
1307 | pool.returnResource(jedis);
1308 | return r;
1309 | } catch (Exception e) {
1310 | throw new ClusterOpException(e);
1311 | }
1312 | }
1313 |
1314 | /**
1315 | * 增量的一名成员在排序设置的评分
1316 | *
1317 | * @param key
1318 | * @param score
1319 | * @param member
1320 | * @return
1321 | */
1322 | public Double zincrby(String key, double score, String member) {
1323 | try {
1324 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1325 | .getPool(hashFunc.hash(key));
1326 | Jedis jedis = pool.getResource();
1327 | Double r = jedis.zincrby(key, score, member);
1328 | pool.returnResource(jedis);
1329 | return r;
1330 | } catch (Exception e) {
1331 | throw new ClusterOpException(e);
1332 | }
1333 | }
1334 |
1335 | /**
1336 | * 确定在排序集合成员的索引
1337 | *
1338 | * @param key
1339 | * @param member
1340 | * @return
1341 | */
1342 | public Long zrank(String key, String member) {
1343 | try {
1344 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1345 | .getPool(hashFunc.hash(key));
1346 | Jedis jedis = pool.getResource();
1347 | Long r = jedis.zrank(key, member);
1348 | pool.returnResource(jedis);
1349 | return r;
1350 | } catch (Exception e) {
1351 | throw new ClusterOpException(e);
1352 | }
1353 | }
1354 |
1355 | /**
1356 | * 确定指数在排序集的成员,下令从分数高到低
1357 | *
1358 | * @param key
1359 | * @param member
1360 | * @return
1361 | */
1362 | public Long zrevrank(String key, String member) {
1363 | try {
1364 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1365 | .getPool(hashFunc.hash(key));
1366 | Jedis jedis = pool.getResource();
1367 | Long r = jedis.zrevrank(key, member);
1368 | pool.returnResource(jedis);
1369 | return r;
1370 | } catch (Exception e) {
1371 | throw new ClusterOpException(e);
1372 | }
1373 | }
1374 |
1375 | /**
1376 | *
1377 | * 返回有序集key中,指定区间内的成员。其中成员的位置按score值递减(从大到小)来排列
1378 | *
1379 | * @param key
1380 | * @param start
1381 | * @param end
1382 | * @return
1383 | */
1384 | public Set zrevrange(String key, int start, int end) {
1385 | try {
1386 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1387 | .getPool(hashFunc.hash(key));
1388 | Jedis jedis = pool.getResource();
1389 | Set r = jedis.zrevrange(key, start, end);
1390 | pool.returnResource(jedis);
1391 | return r;
1392 | } catch (Exception e) {
1393 | throw new ClusterOpException(e);
1394 | }
1395 | }
1396 |
1397 | public Set zrangeWithScores(String key, int start, int end) {
1398 | try {
1399 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1400 | .getPool(hashFunc.hash(key));
1401 | Jedis jedis = pool.getResource();
1402 | Set r = jedis.zrangeWithScores(key, start, end);
1403 | pool.returnResource(jedis);
1404 | return r;
1405 | } catch (Exception e) {
1406 | throw new ClusterOpException(e);
1407 | }
1408 | }
1409 |
1410 | public Set zrevrangeWithScores(String key, int start, int end) {
1411 | try {
1412 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1413 | .getPool(hashFunc.hash(key));
1414 | Jedis jedis = pool.getResource();
1415 | Set r = jedis.zrevrangeWithScores(key, start, end);
1416 | pool.returnResource(jedis);
1417 | return r;
1418 | } catch (Exception e) {
1419 | throw new ClusterOpException(e);
1420 | }
1421 | }
1422 |
1423 | /**
1424 | * 获取一个排序的集合中的成员数量
1425 | *
1426 | * @param key
1427 | * @return
1428 | */
1429 | public Long zcard(String key) {
1430 | try {
1431 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1432 | .getPool(hashFunc.hash(key));
1433 | Jedis jedis = pool.getResource();
1434 | Long r = jedis.zcard(key);
1435 | pool.returnResource(jedis);
1436 | return r;
1437 | } catch (Exception e) {
1438 | throw new ClusterOpException(e);
1439 | }
1440 | }
1441 |
1442 | /**
1443 | * 获取成员在排序设置相关的比分
1444 | *
1445 | * @param key
1446 | * @param member
1447 | * @return
1448 | */
1449 | public Double zscore(String key, String member) {
1450 | try {
1451 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1452 | .getPool(hashFunc.hash(key));
1453 | Jedis jedis = pool.getResource();
1454 | Double r = jedis.zscore(key, member);
1455 | pool.returnResource(jedis);
1456 | return r;
1457 | } catch (Exception e) {
1458 | throw new ClusterOpException(e);
1459 | }
1460 | }
1461 |
1462 | /**
1463 | * 对队列、集合、有序集合排序
1464 | *
1465 | * @param key
1466 | * @return
1467 | */
1468 | public List sort(String key) {
1469 | try {
1470 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1471 | .getPool(hashFunc.hash(key));
1472 | Jedis jedis = pool.getResource();
1473 | List r = jedis.sort(key);
1474 | pool.returnResource(jedis);
1475 | return r;
1476 | } catch (Exception e) {
1477 | throw new ClusterOpException(e);
1478 | }
1479 | }
1480 |
1481 | /**
1482 | * 带参数排序 http://blog.csdn.net/yousite1/article/details/8486218
1483 | *
1484 | * @param key
1485 | * @param sortingParameters
1486 | * @return
1487 | */
1488 | public List sort(String key, SortingParams sortingParameters) {
1489 | try {
1490 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1491 | .getPool(hashFunc.hash(key));
1492 | Jedis jedis = pool.getResource();
1493 | List r = jedis.sort(key, sortingParameters);
1494 | pool.returnResource(jedis);
1495 | return r;
1496 | } catch (Exception e) {
1497 | throw new ClusterOpException(e);
1498 | }
1499 | }
1500 |
1501 | /**
1502 | * 返回有序集key中,score值在min和max之间(默认包括score值等于min或max)的成员
1503 | *
1504 | * @param key
1505 | * @param min
1506 | * @param max
1507 | * @return
1508 | */
1509 | public Long zcount(String key, double min, double max) {
1510 | try {
1511 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1512 | .getPool(hashFunc.hash(key));
1513 | Jedis jedis = pool.getResource();
1514 | Long r = jedis.zcount(key, min, max);
1515 | pool.returnResource(jedis);
1516 | return r;
1517 | } catch (Exception e) {
1518 | throw new ClusterOpException(e);
1519 | }
1520 | }
1521 |
1522 | /**
1523 | * 返回key的有序集合中的分数在min和max之间的所有元素(包括分数等于max或者min的元素)。元素被认为是从低分到高分排序的。
1524 | * 具有相同分数的元素按字典序排列(这个根据redis对有序集合实现的情况而定,并不需要进一步计算)。
1525 | *
1526 | * @param key
1527 | * @param min
1528 | * @param max
1529 | * @return
1530 | */
1531 | public Set zrangeByScore(String key, double min, double max) {
1532 | try {
1533 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1534 | .getPool(hashFunc.hash(key));
1535 | Jedis jedis = pool.getResource();
1536 | Set r = jedis.zrangeByScore(key, min, max);
1537 | pool.returnResource(jedis);
1538 | return r;
1539 | } catch (Exception e) {
1540 | throw new ClusterOpException(e);
1541 | }
1542 | }
1543 |
1544 | public Set zrevrangeByScore(String key, double max, double min) {
1545 | try {
1546 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1547 | .getPool(hashFunc.hash(key));
1548 | Jedis jedis = pool.getResource();
1549 | Set r = jedis.zrevrangeByScore(key, max, min);
1550 | pool.returnResource(jedis);
1551 | return r;
1552 | } catch (Exception e) {
1553 | throw new ClusterOpException(e);
1554 | }
1555 | }
1556 |
1557 | public Set zrangeByScore(String key, double min, double max,
1558 | int offset, int count) {
1559 | try {
1560 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1561 | .getPool(hashFunc.hash(key));
1562 | Jedis jedis = pool.getResource();
1563 | Set r = jedis.zrangeByScore(key, min, max, offset, count);
1564 | pool.returnResource(jedis);
1565 | return r;
1566 | } catch (Exception e) {
1567 | throw new ClusterOpException(e);
1568 | }
1569 | }
1570 |
1571 | public Set zrevrangeByScore(String key, double max, double min,
1572 | int offset, int count) {
1573 | try {
1574 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1575 | .getPool(hashFunc.hash(key));
1576 | Jedis jedis = pool.getResource();
1577 | Set r = jedis
1578 | .zrevrangeByScore(key, max, min, offset, count);
1579 | pool.returnResource(jedis);
1580 | return r;
1581 | } catch (Exception e) {
1582 | throw new ClusterOpException(e);
1583 | }
1584 | }
1585 |
1586 | public Set zrangeByScoreWithScores(String key, double min, double max) {
1587 | try {
1588 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1589 | .getPool(hashFunc.hash(key));
1590 | Jedis jedis = pool.getResource();
1591 | Set r = jedis.zrangeByScoreWithScores(key, min, max);
1592 | pool.returnResource(jedis);
1593 | return r;
1594 | } catch (Exception e) {
1595 | throw new ClusterOpException(e);
1596 | }
1597 | }
1598 |
1599 | public Set zrevrangeByScoreWithScores(String key, double max,
1600 | double min) {
1601 | try {
1602 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1603 | .getPool(hashFunc.hash(key));
1604 | Jedis jedis = pool.getResource();
1605 | Set r = jedis.zrevrangeByScoreWithScores(key, max, min);
1606 | pool.returnResource(jedis);
1607 | return r;
1608 | } catch (Exception e) {
1609 | throw new ClusterOpException(e);
1610 | }
1611 | }
1612 |
1613 | public Set zrangeByScoreWithScores(String key, double min,
1614 | double max, int offset, int count) {
1615 | try {
1616 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1617 | .getPool(hashFunc.hash(key));
1618 | Jedis jedis = pool.getResource();
1619 | Set r = jedis.zrangeByScoreWithScores(key, min, max, offset,
1620 | count);
1621 | pool.returnResource(jedis);
1622 | return r;
1623 | } catch (Exception e) {
1624 | throw new ClusterOpException(e);
1625 | }
1626 | }
1627 |
1628 | public Set zrevrangeByScoreWithScores(String key, double max,
1629 | double min, int offset, int count) {
1630 | try {
1631 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1632 | .getPool(hashFunc.hash(key));
1633 | Jedis jedis = pool.getResource();
1634 | Set r = jedis.zrevrangeByScoreWithScores(key, max, min,
1635 | offset, count);
1636 | pool.returnResource(jedis);
1637 | return r;
1638 | } catch (Exception e) {
1639 | throw new ClusterOpException(e);
1640 | }
1641 | }
1642 |
1643 | public Long zremrangeByRank(String key, int start, int end) {
1644 | try {
1645 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1646 | .getPool(hashFunc.hash(key));
1647 | Jedis jedis = pool.getResource();
1648 | Long r = jedis.zremrangeByRank(key, start, end);
1649 | pool.returnResource(jedis);
1650 | return r;
1651 | } catch (Exception e) {
1652 | throw new ClusterOpException(e);
1653 | }
1654 | }
1655 |
1656 | public Long zremrangeByScore(String key, double start, double end) {
1657 | try {
1658 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1659 | .getPool(hashFunc.hash(key));
1660 | Jedis jedis = pool.getResource();
1661 | Long r = jedis.zremrangeByScore(key, start, end);
1662 | pool.returnResource(jedis);
1663 | return r;
1664 | } catch (Exception e) {
1665 | throw new ClusterOpException(e);
1666 | }
1667 | }
1668 |
1669 | public Long linsert(String key, LIST_POSITION where, String pivot,
1670 | String value) {
1671 | try {
1672 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1673 | .getPool(hashFunc.hash(key));
1674 | Jedis jedis = pool.getResource();
1675 | Long r = jedis.linsert(key, where, pivot, value);
1676 | pool.returnResource(jedis);
1677 | return r;
1678 | } catch (Exception e) {
1679 | throw new ClusterOpException(e);
1680 | }
1681 | }
1682 |
1683 | public String set(byte[] key, byte[] value) {
1684 | try {
1685 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1686 | .getPool(hashFunc.hash(key));
1687 | Jedis jedis = pool.getResource();
1688 | String r = jedis.set(key, value);
1689 | pool.returnResource(jedis);
1690 | return r;
1691 | } catch (Exception e) {
1692 | throw new ClusterOpException(e);
1693 | }
1694 | }
1695 |
1696 | public byte[] get(byte[] key) {
1697 | try {
1698 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1699 | .getPool(hashFunc.hash(key));
1700 | Jedis jedis = pool.getResource();
1701 | byte[] r = jedis.get(key);
1702 | pool.returnResource(jedis);
1703 | return r;
1704 | } catch (Exception e) {
1705 | throw new ClusterOpException(e);
1706 | }
1707 | }
1708 |
1709 | public Boolean exists(byte[] key) {
1710 | try {
1711 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1712 | .getPool(hashFunc.hash(key));
1713 | Jedis jedis = pool.getResource();
1714 | Boolean r = jedis.exists(key);
1715 | pool.returnResource(jedis);
1716 | return r;
1717 | } catch (Exception e) {
1718 | throw new ClusterOpException(e);
1719 | }
1720 | }
1721 |
1722 | public String type(byte[] key) {
1723 | try {
1724 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1725 | .getPool(hashFunc.hash(key));
1726 | Jedis jedis = pool.getResource();
1727 | String r = jedis.type(key);
1728 | pool.returnResource(jedis);
1729 | return r;
1730 | } catch (Exception e) {
1731 | throw new ClusterOpException(e);
1732 | }
1733 | }
1734 |
1735 | public Long expire(byte[] key, int seconds) {
1736 | try {
1737 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1738 | .getPool(hashFunc.hash(key));
1739 | Jedis jedis = pool.getResource();
1740 | Long r = jedis.expire(key, seconds);
1741 | pool.returnResource(jedis);
1742 | return r;
1743 | } catch (Exception e) {
1744 | throw new ClusterOpException(e);
1745 | }
1746 | }
1747 |
1748 | public Long expire(int seconds, byte[]... keys) {
1749 | try {
1750 | long result = 0;
1751 | for (byte[] key : keys) {
1752 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1753 | .getPool(hashFunc.hash(key));
1754 | Jedis jedis = pool.getResource();
1755 | result += jedis.expire(key, seconds);
1756 | pool.returnResource(jedis);
1757 | }
1758 | return result;
1759 | } catch (Exception e) {
1760 | throw new ClusterOpException(e);
1761 | }
1762 | }
1763 |
1764 | public Long expireAt(byte[] key, long unixTime) {
1765 | try {
1766 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1767 | .getPool(hashFunc.hash(key));
1768 | Jedis jedis = pool.getResource();
1769 | Long r = jedis.expireAt(key, unixTime);
1770 | pool.returnResource(jedis);
1771 | return r;
1772 | } catch (Exception e) {
1773 | throw new ClusterOpException(e);
1774 | }
1775 | }
1776 |
1777 | public Long ttl(byte[] key) {
1778 | try {
1779 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1780 | .getPool(hashFunc.hash(key));
1781 | Jedis jedis = pool.getResource();
1782 | Long r = jedis.ttl(key);
1783 | pool.returnResource(jedis);
1784 | return r;
1785 | } catch (Exception e) {
1786 | throw new ClusterOpException(e);
1787 | }
1788 | }
1789 |
1790 | public byte[] getSet(byte[] key, byte[] value) {
1791 | try {
1792 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1793 | .getPool(hashFunc.hash(key));
1794 | Jedis jedis = pool.getResource();
1795 | byte[] r = jedis.getSet(key, value);
1796 | pool.returnResource(jedis);
1797 | return r;
1798 | } catch (Exception e) {
1799 | throw new ClusterOpException(e);
1800 | }
1801 | }
1802 |
1803 | public Long setnx(byte[] key, byte[] value) {
1804 | try {
1805 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1806 | .getPool(hashFunc.hash(key));
1807 | Jedis jedis = pool.getResource();
1808 | Long r = jedis.setnx(key, value);
1809 | pool.returnResource(jedis);
1810 | return r;
1811 | } catch (Exception e) {
1812 | throw new ClusterOpException(e);
1813 | }
1814 | }
1815 |
1816 | public String setex(byte[] key, int seconds, byte[] value) {
1817 | try {
1818 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1819 | .getPool(hashFunc.hash(key));
1820 | Jedis jedis = pool.getResource();
1821 | String r = jedis.setex(key, seconds, value);
1822 | pool.returnResource(jedis);
1823 | return r;
1824 | } catch (Exception e) {
1825 | throw new ClusterOpException(e);
1826 | }
1827 | }
1828 |
1829 | public Long decrBy(byte[] key, long integer) {
1830 | try {
1831 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1832 | .getPool(hashFunc.hash(key));
1833 | Jedis jedis = pool.getResource();
1834 | Long r = jedis.decrBy(key, integer);
1835 | pool.returnResource(jedis);
1836 | return r;
1837 | } catch (Exception e) {
1838 | throw new ClusterOpException(e);
1839 | }
1840 | }
1841 |
1842 | public Long decr(byte[] key) {
1843 | try {
1844 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1845 | .getPool(hashFunc.hash(key));
1846 | Jedis jedis = pool.getResource();
1847 | Long r = jedis.decr(key);
1848 | pool.returnResource(jedis);
1849 | return r;
1850 | } catch (Exception e) {
1851 | throw new ClusterOpException(e);
1852 | }
1853 | }
1854 |
1855 | public Long incrBy(byte[] key, long integer) {
1856 | try {
1857 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1858 | .getPool(hashFunc.hash(key));
1859 | Jedis jedis = pool.getResource();
1860 | Long r = jedis.incrBy(key, integer);
1861 | pool.returnResource(jedis);
1862 | return r;
1863 | } catch (Exception e) {
1864 | throw new ClusterOpException(e);
1865 | }
1866 | }
1867 |
1868 | public Long incr(byte[] key) {
1869 | try {
1870 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1871 | .getPool(hashFunc.hash(key));
1872 | Jedis jedis = pool.getResource();
1873 | Long r = jedis.incr(key);
1874 | pool.returnResource(jedis);
1875 | return r;
1876 | } catch (Exception e) {
1877 | throw new ClusterOpException(e);
1878 | }
1879 | }
1880 |
1881 | public Long append(byte[] key, byte[] value) {
1882 | try {
1883 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1884 | .getPool(hashFunc.hash(key));
1885 | Jedis jedis = pool.getResource();
1886 | Long r = jedis.append(key, value);
1887 | pool.returnResource(jedis);
1888 | return r;
1889 | } catch (Exception e) {
1890 | throw new ClusterOpException(e);
1891 | }
1892 | }
1893 |
1894 | public byte[] substr(byte[] key, int start, int end) {
1895 | try {
1896 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1897 | .getPool(hashFunc.hash(key));
1898 | Jedis jedis = pool.getResource();
1899 | byte[] r = jedis.substr(key, start, end);
1900 | pool.returnResource(jedis);
1901 | return r;
1902 | } catch (Exception e) {
1903 | throw new ClusterOpException(e);
1904 | }
1905 | }
1906 |
1907 | public Long hset(byte[] key, byte[] field, byte[] value) {
1908 | try {
1909 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1910 | .getPool(hashFunc.hash(key));
1911 | Jedis jedis = pool.getResource();
1912 | Long r = jedis.hset(key, field, value);
1913 | pool.returnResource(jedis);
1914 | return r;
1915 | } catch (Exception e) {
1916 | throw new ClusterOpException(e);
1917 | }
1918 | }
1919 |
1920 | public byte[] hget(byte[] key, byte[] field) {
1921 | try {
1922 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1923 | .getPool(hashFunc.hash(key));
1924 | Jedis jedis = pool.getResource();
1925 | byte[] r = jedis.hget(key, field);
1926 | pool.returnResource(jedis);
1927 | return r;
1928 | } catch (Exception e) {
1929 | throw new ClusterOpException(e);
1930 | }
1931 | }
1932 |
1933 | public Long hsetnx(byte[] key, byte[] field, byte[] value) {
1934 | try {
1935 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1936 | .getPool(hashFunc.hash(key));
1937 | Jedis jedis = pool.getResource();
1938 | Long r = jedis.hsetnx(key, field, value);
1939 | pool.returnResource(jedis);
1940 | return r;
1941 | } catch (Exception e) {
1942 | throw new ClusterOpException(e);
1943 | }
1944 | }
1945 |
1946 | public String hmset(byte[] key, Map hash) {
1947 | if (hash == null || hash.isEmpty())
1948 | throw new ClusterOpException("Param cannot be null or empty!");
1949 | try {
1950 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1951 | .getPool(hashFunc.hash(key));
1952 | Jedis jedis = pool.getResource();
1953 | String r = jedis.hmset(key, hash);
1954 | pool.returnResource(jedis);
1955 | return r;
1956 | } catch (Exception e) {
1957 | throw new ClusterOpException(e);
1958 | }
1959 | }
1960 |
1961 | public List hmget(byte[] key, byte[]... fields) {
1962 | try {
1963 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1964 | .getPool(hashFunc.hash(key));
1965 | Jedis jedis = pool.getResource();
1966 | List r = jedis.hmget(key, fields);
1967 | pool.returnResource(jedis);
1968 | return r;
1969 | } catch (Exception e) {
1970 | throw new ClusterOpException(e);
1971 | }
1972 | }
1973 |
1974 | public Long hincrBy(byte[] key, byte[] field, long value) {
1975 | try {
1976 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1977 | .getPool(hashFunc.hash(key));
1978 | Jedis jedis = pool.getResource();
1979 | Long r = jedis.hincrBy(key, field, value);
1980 | pool.returnResource(jedis);
1981 | return r;
1982 | } catch (Exception e) {
1983 | throw new ClusterOpException(e);
1984 | }
1985 | }
1986 |
1987 | public Boolean hexists(byte[] key, byte[] field) {
1988 | try {
1989 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
1990 | .getPool(hashFunc.hash(key));
1991 | Jedis jedis = pool.getResource();
1992 | Boolean r = jedis.hexists(key, field);
1993 | pool.returnResource(jedis);
1994 | return r;
1995 | } catch (Exception e) {
1996 | throw new ClusterOpException(e);
1997 | }
1998 | }
1999 |
2000 | public Long hdel(byte[] key, byte[]... fields) {
2001 | try {
2002 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2003 | .getPool(hashFunc.hash(key));
2004 | Jedis jedis = pool.getResource();
2005 | Long r = jedis.hdel(key, fields);
2006 | pool.returnResource(jedis);
2007 | return r;
2008 | } catch (Exception e) {
2009 | throw new ClusterOpException(e);
2010 | }
2011 | }
2012 |
2013 | public Long hlen(byte[] key) {
2014 | try {
2015 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2016 | .getPool(hashFunc.hash(key));
2017 | Jedis jedis = pool.getResource();
2018 | Long r = jedis.hlen(key);
2019 | pool.returnResource(jedis);
2020 | return r;
2021 | } catch (Exception e) {
2022 | throw new ClusterOpException(e);
2023 | }
2024 | }
2025 |
2026 | public Set hkeys(byte[] key) {
2027 | try {
2028 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2029 | .getPool(hashFunc.hash(key));
2030 | Jedis jedis = pool.getResource();
2031 | Set r = jedis.hkeys(key);
2032 | pool.returnResource(jedis);
2033 | return r;
2034 | } catch (Exception e) {
2035 | throw new ClusterOpException(e);
2036 | }
2037 | }
2038 |
2039 | public Collection hvals(byte[] key) {
2040 | try {
2041 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2042 | .getPool(hashFunc.hash(key));
2043 | Jedis jedis = pool.getResource();
2044 | Collection r = jedis.hvals(key);
2045 | pool.returnResource(jedis);
2046 | return r;
2047 | } catch (Exception e) {
2048 | throw new ClusterOpException(e);
2049 | }
2050 | }
2051 |
2052 | public Map hgetAll(byte[] key) {
2053 | try {
2054 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2055 | .getPool(hashFunc.hash(key));
2056 | Jedis jedis = pool.getResource();
2057 | Map r = jedis.hgetAll(key);
2058 | pool.returnResource(jedis);
2059 | return r;
2060 | } catch (Exception e) {
2061 | throw new ClusterOpException(e);
2062 | }
2063 | }
2064 |
2065 | public Long rpush(byte[] key, byte[]... strings) {
2066 | try {
2067 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2068 | .getPool(hashFunc.hash(key));
2069 | Jedis jedis = pool.getResource();
2070 | Long r = jedis.rpush(key, strings);
2071 | pool.returnResource(jedis);
2072 | return r;
2073 | } catch (Exception e) {
2074 | throw new ClusterOpException(e);
2075 | }
2076 | }
2077 |
2078 | public Long lpush(byte[] key, byte[]... strings) {
2079 | try {
2080 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2081 | .getPool(hashFunc.hash(key));
2082 | Jedis jedis = pool.getResource();
2083 | Long r = jedis.lpush(key, strings);
2084 | pool.returnResource(jedis);
2085 | return r;
2086 | } catch (Exception e) {
2087 | throw new ClusterOpException(e);
2088 | }
2089 | }
2090 |
2091 | public Long llen(byte[] key) {
2092 | try {
2093 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2094 | .getPool(hashFunc.hash(key));
2095 | Jedis jedis = pool.getResource();
2096 | Long r = jedis.llen(key);
2097 | pool.returnResource(jedis);
2098 | return r;
2099 | } catch (Exception e) {
2100 | throw new ClusterOpException(e);
2101 | }
2102 | }
2103 |
2104 | public List lrange(byte[] key, int start, int end) {
2105 | try {
2106 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2107 | .getPool(hashFunc.hash(key));
2108 | Jedis jedis = pool.getResource();
2109 | List r = jedis.lrange(key, start, end);
2110 | pool.returnResource(jedis);
2111 | return r;
2112 | } catch (Exception e) {
2113 | throw new ClusterOpException(e);
2114 | }
2115 | }
2116 |
2117 | public String ltrim(byte[] key, int start, int end) {
2118 | try {
2119 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2120 | .getPool(hashFunc.hash(key));
2121 | Jedis jedis = pool.getResource();
2122 | String r = jedis.ltrim(key, start, end);
2123 | pool.returnResource(jedis);
2124 | return r;
2125 | } catch (Exception e) {
2126 | throw new ClusterOpException(e);
2127 | }
2128 | }
2129 |
2130 | public byte[] lindex(byte[] key, int index) {
2131 | try {
2132 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2133 | .getPool(hashFunc.hash(key));
2134 | Jedis jedis = pool.getResource();
2135 | byte[] r = jedis.lindex(key, index);
2136 | pool.returnResource(jedis);
2137 | return r;
2138 | } catch (Exception e) {
2139 | throw new ClusterOpException(e);
2140 | }
2141 | }
2142 |
2143 | public String lset(byte[] key, int index, byte[] value) {
2144 | try {
2145 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2146 | .getPool(hashFunc.hash(key));
2147 | Jedis jedis = pool.getResource();
2148 | String r = jedis.lset(key, index, value);
2149 | pool.returnResource(jedis);
2150 | return r;
2151 | } catch (Exception e) {
2152 | throw new ClusterOpException(e);
2153 | }
2154 | }
2155 |
2156 | public Long lrem(byte[] key, int count, byte[] value) {
2157 | try {
2158 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2159 | .getPool(hashFunc.hash(key));
2160 | Jedis jedis = pool.getResource();
2161 | Long r = jedis.lrem(key, count, value);
2162 | pool.returnResource(jedis);
2163 | return r;
2164 | } catch (Exception e) {
2165 | throw new ClusterOpException(e);
2166 | }
2167 | }
2168 |
2169 | public byte[] lpop(byte[] key) {
2170 | try {
2171 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2172 | .getPool(hashFunc.hash(key));
2173 | Jedis jedis = pool.getResource();
2174 | byte[] r = jedis.lpop(key);
2175 | pool.returnResource(jedis);
2176 | return r;
2177 | } catch (Exception e) {
2178 | throw new ClusterOpException(e);
2179 | }
2180 | }
2181 |
2182 | public byte[] rpop(byte[] key) {
2183 | try {
2184 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2185 | .getPool(hashFunc.hash(key));
2186 | Jedis jedis = pool.getResource();
2187 | byte[] r = jedis.rpop(key);
2188 | pool.returnResource(jedis);
2189 | return r;
2190 | } catch (Exception e) {
2191 | throw new ClusterOpException(e);
2192 | }
2193 | }
2194 |
2195 | public Long sadd(byte[] key, byte[]... members) {
2196 | try {
2197 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2198 | .getPool(hashFunc.hash(key));
2199 | Jedis jedis = pool.getResource();
2200 | Long r = jedis.sadd(key, members);
2201 | pool.returnResource(jedis);
2202 | return r;
2203 | } catch (Exception e) {
2204 | throw new ClusterOpException(e);
2205 | }
2206 | }
2207 |
2208 | public Set smembers(byte[] key) {
2209 | try {
2210 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2211 | .getPool(hashFunc.hash(key));
2212 | Jedis jedis = pool.getResource();
2213 | Set r = jedis.smembers(key);
2214 | pool.returnResource(jedis);
2215 | return r;
2216 | } catch (Exception e) {
2217 | throw new ClusterOpException(e);
2218 | }
2219 | }
2220 |
2221 | public Long srem(byte[] key, byte[]... members) {
2222 | try {
2223 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2224 | .getPool(hashFunc.hash(key));
2225 | Jedis jedis = pool.getResource();
2226 | Long r = jedis.srem(key, members);
2227 | pool.returnResource(jedis);
2228 | return r;
2229 | } catch (Exception e) {
2230 | throw new ClusterOpException(e);
2231 | }
2232 | }
2233 |
2234 | public byte[] spop(byte[] key) {
2235 | try {
2236 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2237 | .getPool(hashFunc.hash(key));
2238 | Jedis jedis = pool.getResource();
2239 | byte[] r = jedis.spop(key);
2240 | pool.returnResource(jedis);
2241 | return r;
2242 | } catch (Exception e) {
2243 | throw new ClusterOpException(e);
2244 | }
2245 | }
2246 |
2247 | public Long scard(byte[] key) {
2248 | try {
2249 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2250 | .getPool(hashFunc.hash(key));
2251 | Jedis jedis = pool.getResource();
2252 | Long r = jedis.scard(key);
2253 | pool.returnResource(jedis);
2254 | return r;
2255 | } catch (Exception e) {
2256 | throw new ClusterOpException(e);
2257 | }
2258 | }
2259 |
2260 | public Boolean sismember(byte[] key, byte[] member) {
2261 | try {
2262 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2263 | .getPool(hashFunc.hash(key));
2264 | Jedis jedis = pool.getResource();
2265 | Boolean r = jedis.sismember(key, member);
2266 | pool.returnResource(jedis);
2267 | return r;
2268 | } catch (Exception e) {
2269 | throw new ClusterOpException(e);
2270 | }
2271 | }
2272 |
2273 | public byte[] srandmember(byte[] key) {
2274 | try {
2275 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2276 | .getPool(hashFunc.hash(key));
2277 | Jedis jedis = pool.getResource();
2278 | byte[] r = jedis.srandmember(key);
2279 | pool.returnResource(jedis);
2280 | return r;
2281 | } catch (Exception e) {
2282 | throw new ClusterOpException(e);
2283 | }
2284 | }
2285 |
2286 | public Long zadd(byte[] key, double score, byte[] member) {
2287 | try {
2288 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2289 | .getPool(hashFunc.hash(key));
2290 | Jedis jedis = pool.getResource();
2291 | Long r = jedis.zadd(key, score, member);
2292 | pool.returnResource(jedis);
2293 | return r;
2294 | } catch (Exception e) {
2295 | throw new ClusterOpException(e);
2296 | }
2297 | }
2298 |
2299 | public Set zrange(byte[] key, int start, int end) {
2300 | try {
2301 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2302 | .getPool(hashFunc.hash(key));
2303 | Jedis jedis = pool.getResource();
2304 | Set r = jedis.zrange(key, start, end);
2305 | pool.returnResource(jedis);
2306 | return r;
2307 | } catch (Exception e) {
2308 | throw new ClusterOpException(e);
2309 | }
2310 | }
2311 |
2312 | public Long zrem(byte[] key, byte[]... members) {
2313 | try {
2314 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2315 | .getPool(hashFunc.hash(key));
2316 | Jedis jedis = pool.getResource();
2317 | Long r = jedis.zrem(key, members);
2318 | pool.returnResource(jedis);
2319 | return r;
2320 | } catch (Exception e) {
2321 | throw new ClusterOpException(e);
2322 | }
2323 | }
2324 |
2325 | public Double zincrby(byte[] key, double score, byte[] member) {
2326 | try {
2327 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2328 | .getPool(hashFunc.hash(key));
2329 | Jedis jedis = pool.getResource();
2330 | Double r = jedis.zincrby(key, score, member);
2331 | pool.returnResource(jedis);
2332 | return r;
2333 | } catch (Exception e) {
2334 | throw new ClusterOpException(e);
2335 | }
2336 | }
2337 |
2338 | public Long zrank(byte[] key, byte[] member) {
2339 | try {
2340 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2341 | .getPool(hashFunc.hash(key));
2342 | Jedis jedis = pool.getResource();
2343 | Long r = jedis.zrank(key, member);
2344 | pool.returnResource(jedis);
2345 | return r;
2346 | } catch (Exception e) {
2347 | throw new ClusterOpException(e);
2348 | }
2349 | }
2350 |
2351 | public Long zrevrank(byte[] key, byte[] member) {
2352 | try {
2353 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2354 | .getPool(hashFunc.hash(key));
2355 | Jedis jedis = pool.getResource();
2356 | Long r = jedis.zrevrank(key, member);
2357 | pool.returnResource(jedis);
2358 | return r;
2359 | } catch (Exception e) {
2360 | throw new ClusterOpException(e);
2361 | }
2362 | }
2363 |
2364 | public Set zrevrange(byte[] key, int start, int end) {
2365 | try {
2366 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2367 | .getPool(hashFunc.hash(key));
2368 | Jedis jedis = pool.getResource();
2369 | Set r = jedis.zrevrange(key, start, end);
2370 | pool.returnResource(jedis);
2371 | return r;
2372 | } catch (Exception e) {
2373 | throw new ClusterOpException(e);
2374 | }
2375 | }
2376 |
2377 | public Set zrangeWithScores(byte[] key, int start, int end) {
2378 | try {
2379 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2380 | .getPool(hashFunc.hash(key));
2381 | Jedis jedis = pool.getResource();
2382 | Set r = jedis.zrangeWithScores(key, start, end);
2383 | pool.returnResource(jedis);
2384 | return r;
2385 | } catch (Exception e) {
2386 | throw new ClusterOpException(e);
2387 | }
2388 | }
2389 |
2390 | public Set zrevrangeWithScores(byte[] key, int start, int end) {
2391 | try {
2392 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2393 | .getPool(hashFunc.hash(key));
2394 | Jedis jedis = pool.getResource();
2395 | Set r = jedis.zrevrangeWithScores(key, start, end);
2396 | pool.returnResource(jedis);
2397 | return r;
2398 | } catch (Exception e) {
2399 | throw new ClusterOpException(e);
2400 | }
2401 | }
2402 |
2403 | public Long zcard(byte[] key) {
2404 | try {
2405 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2406 | .getPool(hashFunc.hash(key));
2407 | Jedis jedis = pool.getResource();
2408 | Long r = jedis.zcard(key);
2409 | pool.returnResource(jedis);
2410 | return r;
2411 | } catch (Exception e) {
2412 | throw new ClusterOpException(e);
2413 | }
2414 | }
2415 |
2416 | public Double zscore(byte[] key, byte[] member) {
2417 | try {
2418 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2419 | .getPool(hashFunc.hash(key));
2420 | Jedis jedis = pool.getResource();
2421 | Double r = jedis.zscore(key, member);
2422 | pool.returnResource(jedis);
2423 | return r;
2424 | } catch (Exception e) {
2425 | throw new ClusterOpException(e);
2426 | }
2427 | }
2428 |
2429 | public List sort(byte[] key) {
2430 | try {
2431 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2432 | .getPool(hashFunc.hash(key));
2433 | Jedis jedis = pool.getResource();
2434 | List r = jedis.sort(key);
2435 | pool.returnResource(jedis);
2436 | return r;
2437 | } catch (Exception e) {
2438 | throw new ClusterOpException(e);
2439 | }
2440 | }
2441 |
2442 | public List sort(byte[] key, SortingParams sortingParameters) {
2443 | try {
2444 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2445 | .getPool(hashFunc.hash(key));
2446 | Jedis jedis = pool.getResource();
2447 | List r = jedis.sort(key, sortingParameters);
2448 | pool.returnResource(jedis);
2449 | return r;
2450 | } catch (Exception e) {
2451 | throw new ClusterOpException(e);
2452 | }
2453 | }
2454 |
2455 | public Long zcount(byte[] key, double min, double max) {
2456 | try {
2457 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2458 | .getPool(hashFunc.hash(key));
2459 | Jedis jedis = pool.getResource();
2460 | Long r = jedis.zcount(key, min, max);
2461 | pool.returnResource(jedis);
2462 | return r;
2463 | } catch (Exception e) {
2464 | throw new ClusterOpException(e);
2465 | }
2466 | }
2467 |
2468 | public Set zrangeByScore(byte[] key, double min, double max) {
2469 | try {
2470 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2471 | .getPool(hashFunc.hash(key));
2472 | Jedis jedis = pool.getResource();
2473 | Set r = jedis.zrangeByScore(key, min, max);
2474 | pool.returnResource(jedis);
2475 | return r;
2476 | } catch (Exception e) {
2477 | throw new ClusterOpException(e);
2478 | }
2479 | }
2480 |
2481 | public Set zrangeByScore(byte[] key, double min, double max,
2482 | int offset, int count) {
2483 | try {
2484 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2485 | .getPool(hashFunc.hash(key));
2486 | Jedis jedis = pool.getResource();
2487 | Set r = jedis.zrangeByScore(key, min, max, offset, count);
2488 | pool.returnResource(jedis);
2489 | return r;
2490 | } catch (Exception e) {
2491 | throw new ClusterOpException(e);
2492 | }
2493 | }
2494 |
2495 | public Set zrangeByScoreWithScores(byte[] key, double min, double max) {
2496 | try {
2497 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2498 | .getPool(hashFunc.hash(key));
2499 | Jedis jedis = pool.getResource();
2500 | Set r = jedis.zrangeByScoreWithScores(key, min, max);
2501 | pool.returnResource(jedis);
2502 | return r;
2503 | } catch (Exception e) {
2504 | throw new ClusterOpException(e);
2505 | }
2506 | }
2507 |
2508 | public Set zrangeByScoreWithScores(byte[] key, double min,
2509 | double max, int offset, int count) {
2510 | try {
2511 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2512 | .getPool(hashFunc.hash(key));
2513 | Jedis jedis = pool.getResource();
2514 | Set r = jedis.zrangeByScoreWithScores(key, min, max, offset,
2515 | count);
2516 | pool.returnResource(jedis);
2517 | return r;
2518 | } catch (Exception e) {
2519 | throw new ClusterOpException(e);
2520 | }
2521 | }
2522 |
2523 | public Set zrevrangeByScore(byte[] key, double max, double min) {
2524 | try {
2525 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2526 | .getPool(hashFunc.hash(key));
2527 | Jedis jedis = pool.getResource();
2528 | Set r = jedis.zrevrangeByScore(key, max, min);
2529 | pool.returnResource(jedis);
2530 | return r;
2531 | } catch (Exception e) {
2532 | throw new ClusterOpException(e);
2533 | }
2534 | }
2535 |
2536 | public Set zrevrangeByScore(byte[] key, double max, double min,
2537 | int offset, int count) {
2538 | try {
2539 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2540 | .getPool(hashFunc.hash(key));
2541 | Jedis jedis = pool.getResource();
2542 | Set r = jedis
2543 | .zrevrangeByScore(key, max, min, offset, count);
2544 | pool.returnResource(jedis);
2545 | return r;
2546 | } catch (Exception e) {
2547 | throw new ClusterOpException(e);
2548 | }
2549 | }
2550 |
2551 | public Set zrevrangeByScoreWithScores(byte[] key, double max,
2552 | double min) {
2553 | try {
2554 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2555 | .getPool(hashFunc.hash(key));
2556 | Jedis jedis = pool.getResource();
2557 | Set r = jedis.zrevrangeByScoreWithScores(key, max, min);
2558 | pool.returnResource(jedis);
2559 | return r;
2560 | } catch (Exception e) {
2561 | throw new ClusterOpException(e);
2562 | }
2563 | }
2564 |
2565 | public Set zrevrangeByScoreWithScores(byte[] key, double max,
2566 | double min, int offset, int count) {
2567 | try {
2568 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2569 | .getPool(hashFunc.hash(key));
2570 | Jedis jedis = pool.getResource();
2571 | Set r = jedis.zrevrangeByScoreWithScores(key, max, min,
2572 | offset, count);
2573 | pool.returnResource(jedis);
2574 | return r;
2575 | } catch (Exception e) {
2576 | throw new ClusterOpException(e);
2577 | }
2578 | }
2579 |
2580 | public Long zremrangeByRank(byte[] key, int start, int end) {
2581 | try {
2582 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2583 | .getPool(hashFunc.hash(key));
2584 | Jedis jedis = pool.getResource();
2585 | Long r = jedis.zremrangeByRank(key, start, end);
2586 | pool.returnResource(jedis);
2587 | return r;
2588 | } catch (Exception e) {
2589 | throw new ClusterOpException(e);
2590 | }
2591 | }
2592 |
2593 | public Long zremrangeByScore(byte[] key, double start, double end) {
2594 | try {
2595 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2596 | .getPool(hashFunc.hash(key));
2597 | Jedis jedis = pool.getResource();
2598 | Long r = jedis.zremrangeByScore(key, start, end);
2599 | pool.returnResource(jedis);
2600 | return r;
2601 | } catch (Exception e) {
2602 | throw new ClusterOpException(e);
2603 | }
2604 | }
2605 |
2606 | public Long linsert(byte[] key, LIST_POSITION where, byte[] pivot,
2607 | byte[] value) {
2608 | try {
2609 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2610 | .getPool(hashFunc.hash(key));
2611 | Jedis jedis = pool.getResource();
2612 | Long r = jedis.linsert(key, where, pivot, value);
2613 | pool.returnResource(jedis);
2614 | return r;
2615 | } catch (Exception e) {
2616 | throw new ClusterOpException(e);
2617 | }
2618 | }
2619 |
2620 | public Long del(final String key) {
2621 | try {
2622 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2623 | .getPool(hashFunc.hash(key));
2624 | Jedis jedis = pool.getResource();
2625 | Long r = jedis.del(key);
2626 | pool.returnResource(jedis);
2627 | return r;
2628 | } catch (Exception e) {
2629 | throw new ClusterOpException(e);
2630 | }
2631 | }
2632 |
2633 | public Long del(final String... keys) {
2634 |
2635 | try {
2636 | Map> mkeys = new HashMap>();
2637 | for (String key : keys) {
2638 | int part = hashFunc.hash(key);
2639 | List mlist = new ArrayList();
2640 | if (mkeys.containsKey(part)) {
2641 | mlist = mkeys.get(part);
2642 | }
2643 | mlist.add(key);
2644 | mkeys.put(part, mlist);
2645 | }
2646 | Long r = 0l;
2647 | for (int part : mkeys.keySet()) {
2648 | List mlist = mkeys.get(part);
2649 | String[] delKeys = (String[]) mlist.toArray(new String[mlist
2650 | .size()]);
2651 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2652 | .getPool(part);
2653 | Jedis jedis = pool.getResource();
2654 | r += jedis.del(delKeys);
2655 | pool.returnResource(jedis);
2656 | }
2657 | return r;
2658 | } catch (Exception e) {
2659 | throw new ClusterOpException(e);
2660 | }
2661 | }
2662 |
2663 | public Long del(final List keys) {
2664 | String[] keyss = keys.toArray(new String[keys.size()]);
2665 | return del(keyss);
2666 | }
2667 |
2668 | public Long delBinary(final List keys) {
2669 | byte[][] delKeys = (byte[][]) keys.toArray(new byte[keys.size()][]);
2670 | return del(delKeys);
2671 | }
2672 |
2673 | public Long del(final byte[]... keys) {
2674 | try {
2675 | Map> mkeys = new HashMap>();
2676 | for (byte[] key : keys) {
2677 | int part = hashFunc.hash(key);
2678 | List mlist = new ArrayList();
2679 | if (mkeys.containsKey(part)) {
2680 | mlist = mkeys.get(part);
2681 | }
2682 | mlist.add(key);
2683 | mkeys.put(part, mlist);
2684 | }
2685 | Long r = 0l;
2686 | for (int part : mkeys.keySet()) {
2687 | List mlist = mkeys.get(part);
2688 | byte[][] delKeys = (byte[][]) mlist.toArray(new byte[mlist
2689 | .size()][]);
2690 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2691 | .getPool(part);
2692 | Jedis jedis = pool.getResource();
2693 | r += jedis.del(delKeys);
2694 | pool.returnResource(jedis);
2695 | }
2696 | return r;
2697 | } catch (Exception e) {
2698 | throw new ClusterOpException(e);
2699 | }
2700 | }
2701 |
2702 | public List mget(final List keys) {
2703 | String[] keyss = keys.toArray(new String[keys.size()]);
2704 | return mget(keyss);
2705 | }
2706 |
2707 | public List mget(final String... keys) {
2708 | try {
2709 | List ret = new ArrayList(keys.length);
2710 | for (int g = 0; g < keys.length; g++) {
2711 | ret.add(null);
2712 | }
2713 | int[][] shadow = new int[partsLen][keys.length];
2714 | int[] count = new int[partsLen];
2715 | List> mkeys = new ArrayList>(partsLen);
2716 | List> mvals = new ArrayList>(partsLen);
2717 | for (int h = 0; h < partsLen; h++) {
2718 | mkeys.add(new ArrayList(keys.length));
2719 | mvals.add(new ArrayList(keys.length));
2720 | }
2721 | for (int j = 0; j < keys.length; j++) {
2722 | int part = hashFunc.hash(keys[j]);
2723 | mkeys.get(part).add(keys[j]);
2724 | shadow[part][count[part]] = j;
2725 | count[part]++;
2726 | }
2727 | for (int i = 0; i < partsLen; i++) {
2728 | if (!mkeys.get(i).isEmpty()) {
2729 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2730 | .getPool(i);
2731 | Jedis jedis = pool.getResource();
2732 | String[] mgkeys = mkeys.get(i).toArray(
2733 | new String[mkeys.get(i).size()]);
2734 | mvals.set(i, jedis.mget(mgkeys));
2735 | pool.returnResource(jedis);
2736 | }
2737 | }
2738 | for (int k = 0; k < partsLen; k++) {
2739 | for (int m = 0; m < count[k]; m++) {
2740 | ret.set(shadow[k][m], mvals.get(k).get(m));
2741 | }
2742 | }
2743 | return ret;
2744 | } catch (Exception e) {
2745 | throw new ClusterOpException(e);
2746 | }
2747 | }
2748 |
2749 | public List mgetBinary(final List keys) {
2750 | byte[][] mgkeys = (byte[][]) keys.toArray(new byte[keys.size()][]);
2751 | return mget(mgkeys);
2752 | }
2753 |
2754 | public List mget(final byte[]... keys) {
2755 | try {
2756 | List ret = new ArrayList(keys.length);
2757 | for (int g = 0; g < keys.length; g++) {
2758 | ret.add(null);
2759 | }
2760 | int[][] shadow = new int[partsLen][keys.length];
2761 | int[] count = new int[partsLen];
2762 | List> mkeys = new ArrayList>(partsLen);
2763 | List> mvals = new ArrayList>(partsLen);
2764 | for (int h = 0; h < partsLen; h++) {
2765 | mkeys.add(new ArrayList(keys.length));
2766 | mvals.add(new ArrayList(keys.length));
2767 | }
2768 | for (int j = 0; j < keys.length; j++) {
2769 | int part = hashFunc.hash(keys[j]);
2770 | mkeys.get(part).add(keys[j]);
2771 | shadow[part][count[part]] = j;
2772 | count[part]++;
2773 | }
2774 | for (int i = 0; i < partsLen; i++) {
2775 | if (!mkeys.get(i).isEmpty()) {
2776 | RedisClusterPool pool = (RedisClusterPool) this.poolsObj
2777 | .getPool(i);
2778 | Jedis jedis = pool.getResource();
2779 | byte[][] mgkeys = (byte[][]) mkeys.get(i).toArray(
2780 | new byte[mkeys.get(i).size()][]);
2781 | mvals.set(i, jedis.mget(mgkeys));
2782 | pool.returnResource(jedis);
2783 | }
2784 | }
2785 | for (int k = 0; k < partsLen; k++) {
2786 | for (int m = 0; m < count[k]; m++) {
2787 | ret.set(shadow[k][m], mvals.get(k).get(m));
2788 | }
2789 | }
2790 | return ret;
2791 | } catch (Exception e) {
2792 | throw new ClusterOpException(e);
2793 | }
2794 | }
2795 |
2796 | public Long mset(final Map keysvalues) {
2797 | try {
2798 | Long ret = 0L;
2799 | List