├── .gitignore
├── .travis.yml
├── CONTRIBUTING.md
├── LICENSE
├── Makefile.am
├── NOTICE
├── OSSMETADATA
├── README.md
├── bin
├── core_affinity.sh
├── kill_dynomite.sh
└── launch_dynomite.sh
├── build.sh
├── conf
├── dynomite.pem
├── dynomite.yml
├── dynomite_dns_single.yml
├── dynomite_florida_single.yml
├── mc_single.yml
├── node1.yml
├── node2.yml
├── recon_iv.pem
├── recon_key.pem
├── redis_dc1.yml
├── redis_dc2.yml
├── redis_node1.yml
├── redis_node2.yml
├── redis_rack1_node.yml
├── redis_rack2_node.yml
├── redis_rack3_node.yml
└── redis_single.yml
├── configure.ac
├── contrib
├── Makefile.am
├── fmemopen.c
├── fmemopen.h
├── murmur3
│ ├── README.md
│ ├── makefile
│ ├── murmur3.c
│ └── murmur3.h
├── yaml-0.1.4.tar.gz
└── yaml-0.1.4
│ └── .gitignore
├── docker
├── Dockerfile
├── HOWTO.md
└── scripts
│ └── startup.sh
├── docs
├── dyn_protocol.txt
└── florida.md
├── images
├── dynomite-emblem.png
└── dynomite-logo.png
├── init
├── README.md
├── systemd_environment__dynomite
├── systemd_service_rhel__dynomite.service
├── systemd_service_ubuntu__dynomite.service
└── upstart_ubuntu__dynomite
├── m4
└── .gitignore
├── man
└── dynomite.8
├── notes
├── c-styleguide.txt
├── debug.txt
├── kqueue.pdf
├── memcache.txt
├── recommendation.md
├── redis.md
└── socket.txt
├── scripts
├── Florida
│ ├── florida.js
│ ├── package.json
│ └── seeds.list
├── dynomite-manager
│ ├── bash-alias
│ ├── cassandra
│ ├── dynomite-manager
│ ├── kill_redis.sh
│ └── launch_nfredis.sh
├── dynomite
│ ├── dyn_mc_test.py
│ ├── dyn_redis_purge.py
│ ├── dyn_redis_test.py
│ ├── dynomite-test.sh
│ ├── generate_yamls.py
│ ├── multi_get.sh
│ ├── pipelined_read.sh
│ ├── pipelined_write.sh
│ └── redis-check.sh
├── memcache
│ └── populate_memcached.sh
├── redis
│ ├── dyno_redis_bgrewriteaof.sh
│ ├── dyno_redis_rss_healing.sh
│ └── redis-check.py
└── runall.sh
├── src
├── Makefile.am
├── dyn_array.c
├── dyn_array.h
├── dyn_asciilogo.h
├── dyn_cbuf.h
├── dyn_client.c
├── dyn_client.h
├── dyn_conf.c
├── dyn_conf.h
├── dyn_connection.c
├── dyn_connection.h
├── dyn_connection_internal.c
├── dyn_connection_internal.h
├── dyn_connection_pool.c
├── dyn_connection_pool.h
├── dyn_core.c
├── dyn_core.h
├── dyn_crypto.c
├── dyn_crypto.h
├── dyn_dict.c
├── dyn_dict.h
├── dyn_dict_msg_id.c
├── dyn_dict_msg_id.h
├── dyn_dnode_client.c
├── dyn_dnode_client.h
├── dyn_dnode_msg.c
├── dyn_dnode_msg.h
├── dyn_dnode_peer.c
├── dyn_dnode_peer.h
├── dyn_dnode_proxy.c
├── dyn_dnode_proxy.h
├── dyn_dnode_request.c
├── dyn_gossip.c
├── dyn_gossip.h
├── dyn_histogram.c
├── dyn_histogram.h
├── dyn_log.c
├── dyn_log.h
├── dyn_mbuf.c
├── dyn_mbuf.h
├── dyn_message.c
├── dyn_message.h
├── dyn_node_snitch.c
├── dyn_node_snitch.h
├── dyn_proxy.c
├── dyn_proxy.h
├── dyn_queue.h
├── dyn_rbtree.c
├── dyn_rbtree.h
├── dyn_request.c
├── dyn_response.c
├── dyn_response_mgr.c
├── dyn_response_mgr.h
├── dyn_ring_queue.c
├── dyn_ring_queue.h
├── dyn_server.c
├── dyn_server.h
├── dyn_setting.c
├── dyn_setting.h
├── dyn_signal.c
├── dyn_signal.h
├── dyn_stats.c
├── dyn_stats.h
├── dyn_string.c
├── dyn_string.h
├── dyn_task.c
├── dyn_task.h
├── dyn_test.c
├── dyn_types.c
├── dyn_types.h
├── dyn_util.c
├── dyn_util.h
├── dyn_vnode.c
├── dyn_vnode.h
├── dynomite.c
├── entropy
│ ├── Makefile.am
│ ├── dyn_entropy.h
│ ├── dyn_entropy_rcv.c
│ ├── dyn_entropy_snd.c
│ └── dyn_entropy_util.c
├── event
│ ├── Makefile.am
│ ├── dyn_epoll.c
│ ├── dyn_event.h
│ ├── dyn_evport.c
│ └── dyn_kqueue.c
├── hashkit
│ ├── .dirstamp
│ ├── Makefile.am
│ ├── dyn_crc16.c
│ ├── dyn_crc32.c
│ ├── dyn_fnv.c
│ ├── dyn_hashkit.c
│ ├── dyn_hashkit.h
│ ├── dyn_hsieh.c
│ ├── dyn_jenkins.c
│ ├── dyn_ketama.c
│ ├── dyn_md5.c
│ ├── dyn_modula.c
│ ├── dyn_murmur.c
│ ├── dyn_murmur3.c
│ ├── dyn_one_at_a_time.c
│ ├── dyn_random.c
│ ├── dyn_token.c
│ └── dyn_token.h
├── proto
│ ├── Makefile.am
│ ├── dyn_memcache.c
│ ├── dyn_proto.h
│ ├── dyn_proto_repair.h
│ ├── dyn_redis.c
│ └── dyn_redis_repair.c
├── seedsprovider
│ ├── Makefile.am
│ ├── dyn_dns.c
│ ├── dyn_florida.c
│ └── dyn_seeds_provider.h
└── tools
│ ├── Makefile.am
│ └── dyn_hash_tool.c
├── test
├── cluster_generator.py
├── dual_run.py
├── dyno_cluster.py
├── dyno_node.py
├── func_test.py
├── ip_util.py
├── kill_cluster.py
├── load.py
├── no_quorum_request.yaml
├── node.py
├── redis_node.py
├── run_loop.py
├── safe_quorum_request.yaml
├── start_cluster.py
├── supplemental.sh
└── utils.py
└── travis.sh
/.gitignore:
--------------------------------------------------------------------------------
1 | # IDE
2 | .idea
3 | CMakeLists.txt
4 |
5 | # Compiled Object files
6 | *.lo
7 | *.o
8 |
9 | # Compiled Dynamic libraries
10 | *.so
11 |
12 | # Compiled Static libraries
13 | *.la
14 | *.a
15 |
16 | # Compiled misc
17 | *.dep
18 | *.gcda
19 | *.gcno
20 | *.gcov
21 | *.pyc
22 |
23 | # Packages
24 | *.tar.gz
25 | *.tar.bz2
26 |
27 | # Logs
28 | *.log
29 |
30 | # Temporary
31 | *.swp
32 | *.~
33 | *~
34 | *.project
35 | *.cproject
36 | nbproject/*
37 | TAGS
38 | TODO
39 |
40 | # Core and executable
41 | core*
42 | nutcracker
43 | dynomite
44 | dynomite-test
45 | dyno-hash-tool
46 |
47 | # extracted yaml
48 | !/contrib/yaml-0.1.4.tar.gz
49 |
50 | # Autotools
51 | .deps
52 | .libs
53 |
54 | /aclocal.m4
55 | /autom4te.cache
56 | /stamp-h1
57 | /autoscan.log
58 | /libtool
59 |
60 | /config/config.guess
61 | /config/config.sub
62 | /config/depcomp
63 | /config/install-sh
64 | /config/ltmain.sh
65 | /config/missing
66 | /config
67 |
68 | /config.h
69 | /config.h.in
70 | /config.h.in~
71 | /config.log
72 | /config.status
73 | /configure.scan
74 | /configure
75 |
76 | Makefile
77 | Makefile.in
78 | /Debug
79 | /src/.dirstamp
80 | /src/test
81 | /src/tools
82 | /src/dynomite-test
83 | cscope.out
84 | /src/cscope.out
85 | /src/.dirstamp
86 | /src/tools/dyn_hash_tool
87 | /src/tools/dynomite-hash-tool
88 | /test/*.pyc
89 | /test/_binaries/*
90 | /test/_binaries
91 | /test/conf
92 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: c
2 | script: bash ./travis.sh
3 |
4 | before_install:
5 | - sudo apt remove --purge python3-pip
6 | - curl -O https://bootstrap.pypa.io/pip/3.5/get-pip.py
7 | - sudo -E python3 get-pip.py
8 | - sudo -E python3 -m pip install --upgrade "pip < 22.3"
9 |
10 | addons:
11 | apt:
12 | update: true
13 | packages:
14 | - python3.9
15 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to Dynomite
2 |
3 | Documentation for Dynomite is available in the following locations:
4 | - Dynomite docs
5 | - [wiki]( https://github.com/Netflix/dynomite/wiki )
6 |
7 | The Dynomite team is following the Gitflow workflow. The active development branch is [dev](https://github.com/Netflix/dynomite/tree/dev), the stable branch is [master](https://github.com/Netflix/dynomite/tree/master).
8 |
9 | Contributions will be accepted to the [dev](https://github.com/Netflix/dynomite/tree/dev) only.
10 |
11 |
12 | ## How to provide a patch for a new feature
13 |
14 | 1. If it is a major feature, please create an [Issue]( https://github.com/Netflix/dynomite/issues ) and discuss with the project leaders.
15 |
16 | 2. If in step 1 you get an acknowledge from the project leaders, use the
17 | following procedure to submit a patch:
18 |
19 | a. Fork Dynomite on github ( http://help.github.com/fork-a-repo/ )
20 |
21 | b. Create a topic branch (git checkout -b my_branch)
22 |
23 | c. Push to your branch (git push origin my_branch)
24 |
25 | d. Initiate a pull request on github ( http://help.github.com/en/articles/creating-a-pull-request/ )
26 |
27 | e. Done :)
28 |
29 | For minor fixes just open a pull request to the [dev]( https://github.com/Netflix/dynomite/tree/dev ) branch on Github.
30 |
31 | ## Questions
32 |
33 | If you have questions or want to report a bug please create an [Issue]( https://github.com/Netflix/dynomite/issues ) or chat with us on [](https://gitter.im/Netflix/dynomite?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
34 |
35 |
36 |
--------------------------------------------------------------------------------
/Makefile.am:
--------------------------------------------------------------------------------
1 | MAINTAINERCLEANFILES = Makefile.in aclocal.m4 configure config.h.in config.h.in~ stamp-h.in
2 |
3 | ACLOCAL_AMFLAGS = -I m4
4 |
5 | SUBDIRS = contrib src
6 |
7 | dist_man_MANS = man/dynomite.8
8 |
9 | EXTRA_DIST = README.md NOTICE LICENSE conf scripts notes
10 |
--------------------------------------------------------------------------------
/OSSMETADATA:
--------------------------------------------------------------------------------
1 | osslifecycle=active
2 |
--------------------------------------------------------------------------------
/bin/core_affinity.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Set core affinity for redis and dynomite processes
3 | #
4 |
5 | # Requires setting the EC2 Instance type as ENV variable.
6 | # If Dynomite is used outside of AWS environment,
7 | # the core affinity script can be configured accordingly.
8 | echo "$EC2_INSTANCE_TYPE"
9 |
10 | if [ "$EC2_INSTANCE_TYPE" == "r3.xlarge" ]; then
11 | dynomite_pid=`pgrep -f $DYN_DIR/bin/dynomite`
12 | echo "dynomite pid: $dynomite_pid"
13 | taskset -pac 2,3 $dynomite_pid
14 |
15 | redis_pid=`ps -ef | grep 22122 | grep redis | awk -F' ' '{print $2}'`
16 | echo "redis pid: $redis_pid"
17 | taskset -pac 1 $redis_pid
18 |
19 | else
20 |
21 | dynomite_pid=`pgrep -f $DYN_DIR/bin/dynomite`
22 | echo "dynomite pid: $dynomite_pid"
23 | taskset -pac 2,5,6 $dynomite_pid
24 |
25 | redis_pid=`ps -ef | grep 22122 | grep redis | awk -F' ' '{print $2}'`
26 | echo "redis pid: $redis_pid"
27 | taskset -pac 3,7 $redis_pid
28 |
29 | fi
30 |
31 |
--------------------------------------------------------------------------------
/bin/kill_dynomite.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cmd="pkill -f /apps/dynomite/bin/dynomite"
4 | if [ $USER != "root" ];then
5 | exec $cmd
6 | else
7 | su $userowner -c "$cmd"
8 | fi
9 |
10 |
--------------------------------------------------------------------------------
/bin/launch_dynomite.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 |
4 | # ** This check has to be after the redis check above.
5 | # Quit if Dynomite is already running, rather than
6 | # throwing errors from this script.
7 | if (ps -ef | grep '[/]apps/dynomite/bin/dynomite'); then
8 | logger -s "Dynomite already running, no need to restart it again."
9 | exit 0
10 | fi
11 |
12 | DYN_DIR=/apps/dynomite
13 | LOG_DIR=/logs/system/dynomite
14 | CONF_DIR=$DYN_DIR/conf
15 |
16 |
17 | declare -x `stat --printf "userowner=%U\ngroupowner=%G\n" "$0"`
18 |
19 | if [ ! -d "$LOG_DIR" ]; then
20 | sudo mkdir -p $LOG_DIR
21 | sudo chown -R $userowner:$groupowner $LOG_DIR
22 | fi
23 |
24 | #save the previous log
25 | if [ -e $LOG_DIR/dynomite.log ]; then
26 | mv $LOG_DIR/dynomite.log $LOG_DIR/dynomite-$(date +%Y%m%d_%H%M%S).log
27 | fi
28 |
29 | echo "MBUF_SIZE=$MBUF_SIZE"
30 | if [ -z "$MBUF_SIZE" ]; then
31 | echo "MBUF_SIZE is empty. Use default value 16K"
32 | MBUF_SIZE=16384
33 | fi
34 |
35 | echo "ALLOC_MSGS=$ALLOC_MSGS"
36 | if [ -z "$ALLOC_MSGS" ]; then
37 | #** Requires setting the EC2 Instance type as ENV variable
38 | # If Dynomite is used outside of AWS environment the
39 | # following can be used as ideas on how much memory Dynomite
40 | # should take.
41 |
42 | # Message allocation based on the instance type
43 | # 2GB for Florida + 85% for Redis (rest available for OS)
44 | if [ "$EC2_INSTANCE_TYPE" == "r3.xlarge" ]; then
45 | # r3.xlarge: 30.5GB RAM (2.5GB available)
46 | ALLOC_MSGS=100000
47 | elif [ "$EC2_INSTANCE_TYPE" == "r3.2xlarge" ]; then
48 | # r3.2xlarge: 61GB RAM (7.15GB available)
49 | ALLOC_MSGS=300000
50 | elif [ "$EC2_INSTANCE_TYPE" == "r3.4xlarge" ]; then
51 | # r3.4xlarge: 122GB RAM (16.3GB available)
52 | ALLOC_MSGS=800000
53 | elif [ "$EC2_INSTANCE_TYPE" == "r3.8xlarge" ]; then
54 | # r3.8xlarge: 244GB RAM (34.19GB available)
55 | # Dynomite uper threshold is 1M
56 | ALLOC_MSGS=1000000
57 | fi
58 | echo "Instance Type: $EC2_INSTANCE_TYPE --> Allocated messages: $ALLOC_MSGS"
59 | fi
60 |
61 |
62 | # note that we do not use 'su - username .... ' , because we want to keep the env settings that we have done so far
63 | cmd="$DYN_DIR/bin/dynomite -d -c $CONF_DIR/dynomite.yml -m$MBUF_SIZE -M$ALLOC_MSGS --output=$LOG_DIR/dynomite.log "
64 |
65 |
66 | if [ $USER != "root" ];then
67 | exec $cmd
68 | else
69 | su $userowner -c "$cmd"
70 | fi
71 |
72 | sleep 1
73 |
74 | sudo $DYN_DIR/bin/core_affinity.sh
75 |
76 |
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #
4 | # Dear Mac user, remember to setup your development environment. Install XCode
5 | # then run the following commands:
6 | #
7 | # xcode-select --install
8 | # brew install cmake autoconf libtool gcc automake openssl
9 | # brew link openssl --force
10 | #
11 |
12 | OS=`uname -s`
13 |
14 | if [ $OS == "Darwin" ] ; then
15 | SSL_LIBDIR=`pkg-config --variable=libdir openssl`
16 | SSL_INCLUDEDIR=`pkg-config --variable=includedir openssl`
17 | fi
18 |
19 | #make clean
20 |
21 | autoreconf -fvi
22 |
23 | if [ $OS == "Darwin" ] ; then
24 | ./configure --enable-debug=yes LDFLAGS="-L${SSL_LIBDIR}" CPPFLAGS="-I${SSL_INCLUDEDIR}"
25 | else
26 | ./configure --enable-debug=full
27 | fi
28 |
29 | make
30 |
--------------------------------------------------------------------------------
/conf/dynomite.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIICWwIBAAKBgQCjijNAnd1/uFlrCtfO83Qo5idWbqLpyfQdKHd1CRE9AliA8pyM
3 | u9XqdyEbMJ229PPPWhlKui0moVcj1J8w/ZBzLFEQtqf+93Ytv9YR4tktyv0kWTnf
4 | yUzp9D74WHL2/lwfi0Nv+jSIWg5aQRqMzRsdErTwk+iaSuuZ/+3Jbp9JxwIDAQAB
5 | AoGAd3s70MTFlE+SfMMyQo/Z2Ru1t88jFV2oDTmIdShHwOQa1zLpr2R4eFHLDTtq
6 | rsWt71srSU5WOHf92z63g6ptwpxixb+HWIF49Ke0vtMUs+ah/E0jV9EyAwFmdACh
7 | Yxa/dwCTkl4O3q8/Lm1gXExpeif9PUtenIFU8Va2l1s87WECQQDV33ZLWatbAUQz
8 | qLXsRIOiSDAhTXyuAFX2I9HyKfAGxaOz7U8QYZYJVf/vmubWERHZhe8TurusYuEZ
9 | onh4zPuRAkEAw8CxzxKjGZB75Wz2+k9Qbkb6+HS4VBCVPHScwE2+kXnI28l+ATro
10 | MDE0F7VfYA2Cv8xDeBBFhR/rki/5MmhT1wJAMWa2il0iCZlXSZ90tih1R2zpQSgG
11 | 3qJRONO5UPiC6u2IDK2KD7yazXlB7vSW0WeL+fTH7oS5iO4mFKfEsBKZIQJAAZth
12 | WkvS3vDas4VzeZaRGHaotMVwhfAk6XwjlUiOGe+gAH3k9Omg4lRlExTAFa/GtQBe
13 | RWwMxgz/CBIwv4MvvQJAZxZuxzN7D5VhJlkL1IEXnPHgE+RgR0vTAQaqnSa9PNcv
14 | gAiXFRKkMHeAkHUIsZ382C5zzjUGQrCD8IrvpAwXBg==
15 | -----END RSA PRIVATE KEY-----
16 |
--------------------------------------------------------------------------------
/conf/dynomite.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | listen: 127.0.0.1:8102
3 | dyn_listen: 127.0.0.1:8101
4 | tokens: '101134286'
5 | servers:
6 | - 127.0.0.1:22122:1
7 | data_store: 0
8 | mbuf_size: 16384
9 | max_msgs: 300000
10 |
11 |
--------------------------------------------------------------------------------
/conf/dynomite_dns_single.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | datacenter: dc
3 | rack: rack1
4 | dyn_listen: 0.0.0.0:8101
5 | dyn_seed_provider: dns_provider
6 | listen: 0.0.0.0:8102
7 | servers:
8 | - 127.0.0.1:6379:1
9 | tokens: '0'
10 | data_store: 0
11 | stats_listen: 0.0.0.0:22222
12 |
--------------------------------------------------------------------------------
/conf/dynomite_florida_single.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | datacenter: dc
3 | rack: rack1
4 | dyn_listen: 0.0.0.0:8101
5 | dyn_seed_provider: florida_provider
6 | listen: 0.0.0.0:8102
7 | servers:
8 | - 127.0.0.1:6379:1
9 | tokens: '0'
10 | secure_server_option: datacenter
11 | pem_key_file: conf/dynomite.pem
12 | data_store: 0
13 | stats_listen: 0.0.0.0:22222
14 |
--------------------------------------------------------------------------------
/conf/mc_single.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | dyn_listen: 127.0.0.1:8101
3 | listen: 127.0.0.1:8102
4 | servers:
5 | - 127.0.0.1:22122:1
6 | tokens: 437425602
7 | data_store: 1
8 | stats_listen: 0.0.0.0:22222
9 |
--------------------------------------------------------------------------------
/conf/node1.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | datacenter: dc1
3 | rack: rack1
4 | listen: 0.0.0.0:8102
5 | dyn_listen: 0.0.0.0:8101
6 | dyn_seeds:
7 | - 127.0.0.1:8113:rack1:dc2:101134286
8 | dyn_seed_provider: simple_provider
9 | tokens: '101134286'
10 | servers:
11 | - 127.0.0.1:11371:1
12 | data_store: 0
13 | stats_listen: 0.0.0.0:33331
14 | preconnect: true
15 |
--------------------------------------------------------------------------------
/conf/node2.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | datacenter: dc2
3 | rack: rack1
4 | listen: 0.0.0.0:8114
5 | dyn_listen: 0.0.0.0:8113
6 | dyn_seeds:
7 | - 127.0.0.1:8101:rack1:dc1:101134286
8 | dyn_seed_provider: simple_provider
9 | tokens: '101134286'
10 | servers:
11 | - 127.0.0.1:11370:1
12 | data_store: 0
13 | stats_listen: 0.0.0.0:33333
14 | preconnect: true
15 |
--------------------------------------------------------------------------------
/conf/recon_iv.pem:
--------------------------------------------------------------------------------
1 | 01234567890123456
2 |
--------------------------------------------------------------------------------
/conf/recon_key.pem:
--------------------------------------------------------------------------------
1 | 01234567890123456
2 |
--------------------------------------------------------------------------------
/conf/redis_dc1.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | datacenter: dc1
3 | rack: rack1
4 | dyn_listen: 127.0.0.1:8101
5 | dyn_seeds:
6 | - 127.0.0.2:8101:rack2:dc2:1383429731
7 | listen: 127.0.0.1:8102
8 | servers:
9 | - 127.0.0.1:22121:1
10 | tokens: '12345678'
11 | secure_server_option: datacenter
12 | pem_key_file: conf/dynomite.pem
13 | data_store: 0
14 | stats_listen: 0.0.0.0:22221
15 | datastore_connections: 3
16 | local_peer_connections: 3
17 | remote_peer_connections: 3
18 |
--------------------------------------------------------------------------------
/conf/redis_dc2.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | datacenter: dc2
3 | rack: rack2
4 | dyn_listen: 127.0.0.2:8101
5 | dyn_seeds:
6 | - 127.0.0.1:8101:rack1:dc1:12345678
7 | listen: 127.0.0.2:8102
8 | servers:
9 | - 127.0.0.1:22122:1
10 | tokens: '1383429731'
11 | secure_server_option: datacenter
12 | pem_key_file: conf/dynomite.pem
13 | data_store: 0
14 | stats_listen: 0.0.0.0:22222
15 | datastore_connections: 3
16 | local_peer_connections: 3
17 | remote_peer_connections: 3
18 |
--------------------------------------------------------------------------------
/conf/redis_node1.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | datacenter: dc
3 | rack: rack
4 | dyn_listen: 127.0.0.1:8101
5 | dyn_seeds:
6 | - 127.0.0.2:8101:rack:dc:1383429731
7 | listen: 127.0.0.1:8102
8 | servers:
9 | - 127.0.0.1:22122:1
10 | tokens: '12345678'
11 | secure_server_option: datacenter
12 | pem_key_file: conf/dynomite.pem
13 | data_store: 0
14 | stats_listen: 0.0.0.0:22222
15 |
--------------------------------------------------------------------------------
/conf/redis_node2.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | datacenter: dc
3 | rack: rack
4 | dyn_listen: 127.0.0.2:8101
5 | dyn_seeds:
6 | - 127.0.0.1:8101:rack:dc:12345678
7 | listen: 127.0.0.2:8102
8 | servers:
9 | - 127.0.0.1:22123:1
10 | tokens: '1383429731'
11 | secure_server_option: datacenter
12 | pem_key_file: conf/dynomite.pem
13 | data_store: 0
14 | stats_listen: 0.0.0.0:22223
15 |
--------------------------------------------------------------------------------
/conf/redis_rack1_node.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | datacenter: dc
3 | rack: rack1
4 | dyn_listen: 127.0.0.1:8101
5 | dyn_seeds:
6 | - 127.0.0.2:8101:rack2:dc:1383429731
7 | - 127.0.0.3:8101:rack3:dc:1383429731
8 | listen: 127.0.0.1:8102
9 | servers:
10 | - 127.0.0.1:22121:1
11 | tokens: '1383429731'
12 | secure_server_option: datacenter
13 | pem_key_file: conf/dynomite.pem
14 | data_store: 0
15 | read_consistency : DC_SAFE_QUORUM
16 | write_consistency : DC_SAFE_QUORUM
17 | stats_listen: 0.0.0.0:22221
18 |
--------------------------------------------------------------------------------
/conf/redis_rack2_node.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | datacenter: dc
3 | rack: rack2
4 | dyn_listen: 127.0.0.2:8101
5 | dyn_seeds:
6 | - 127.0.0.1:8101:rack1:dc:1383429731
7 | - 127.0.0.3:8101:rack3:dc:1383429731
8 | listen: 127.0.0.2:8102
9 | servers:
10 | - 127.0.0.1:22122:1
11 | tokens: '1383429731'
12 | secure_server_option: datacenter
13 | pem_key_file: conf/dynomite.pem
14 | data_store: 0
15 | read_consistency : DC_SAFE_QUORUM
16 | write_consistency : DC_SAFE_QUORUM
17 | stats_listen: 0.0.0.0:22222
18 |
--------------------------------------------------------------------------------
/conf/redis_rack3_node.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | datacenter: dc
3 | rack: rack3
4 | dyn_listen: 127.0.0.3:8101
5 | dyn_seeds:
6 | - 127.0.0.1:8101:rack1:dc:1383429731
7 | - 127.0.0.2:8101:rack2:dc:1383429731
8 | listen: 127.0.0.3:8102
9 | servers:
10 | - 127.0.0.1:22123:1
11 | tokens: '1383429731'
12 | secure_server_option: datacenter
13 | pem_key_file: conf/dynomite.pem
14 | data_store: 0
15 | read_consistency : DC_SAFE_QUORUM
16 | write_consistency : DC_SAFE_QUORUM
17 | stats_listen: 0.0.0.0:22223
18 |
--------------------------------------------------------------------------------
/conf/redis_single.yml:
--------------------------------------------------------------------------------
1 | dyn_o_mite:
2 | dyn_listen: 0.0.0.0:8101
3 | data_store: 0
4 | listen: 0.0.0.0:8102
5 | dyn_seed_provider: simple_provider
6 | servers:
7 | - 127.0.0.1:22122:1
8 | tokens: 437425602
9 | stats_listen: 0.0.0.0:22222
10 |
--------------------------------------------------------------------------------
/contrib/Makefile.am:
--------------------------------------------------------------------------------
1 | SUBDIRS = yaml-0.1.4
2 |
3 | EXTRA_DIST = yaml-0.1.4.tar.gz
4 |
--------------------------------------------------------------------------------
/contrib/fmemopen.c:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright 2011-2014 NimbusKit
3 | // Originally ported from https://github.com/ingenuitas/python-tesseract/blob/master/fmemopen.c
4 | //
5 | // Licensed under the Apache License, Version 2.0 (the "License");
6 | // you may not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing, software
12 | // distributed under the License is distributed on an "AS IS" BASIS,
13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | // See the License for the specific language governing permissions and
15 | // limitations under the License.
16 | //
17 |
18 | #include
19 | #include
20 | #include
21 | #include
22 |
23 | struct fmem {
24 | size_t pos;
25 | size_t size;
26 | char *buffer;
27 | };
28 | typedef struct fmem fmem_t;
29 |
30 | static int readfn(void *handler, char *buf, int size) {
31 | fmem_t *mem = handler;
32 | size_t available = mem->size - mem->pos;
33 |
34 | if (size > available) {
35 | size = available;
36 | }
37 | memcpy(buf, mem->buffer + mem->pos, sizeof(char) * size);
38 | mem->pos += size;
39 |
40 | return size;
41 | }
42 |
43 | static int writefn(void *handler, const char *buf, int size) {
44 | fmem_t *mem = handler;
45 | size_t available = mem->size - mem->pos;
46 |
47 | if (size > available) {
48 | size = available;
49 | }
50 | memcpy(mem->buffer + mem->pos, buf, sizeof(char) * size);
51 | mem->pos += size;
52 |
53 | return size;
54 | }
55 |
56 | static fpos_t seekfn(void *handler, fpos_t offset, int whence) {
57 | size_t pos;
58 | fmem_t *mem = handler;
59 |
60 | switch (whence) {
61 | case SEEK_SET: {
62 | if (offset >= 0) {
63 | pos = (size_t)offset;
64 | } else {
65 | pos = 0;
66 | }
67 | break;
68 | }
69 | case SEEK_CUR: {
70 | if (offset >= 0 || (size_t)(-offset) <= mem->pos) {
71 | pos = mem->pos + (size_t)offset;
72 | } else {
73 | pos = 0;
74 | }
75 | break;
76 | }
77 | case SEEK_END: pos = mem->size + (size_t)offset; break;
78 | default: return -1;
79 | }
80 |
81 | if (pos > mem->size) {
82 | return -1;
83 | }
84 |
85 | mem->pos = pos;
86 | return (fpos_t)pos;
87 | }
88 |
89 | static int closefn(void *handler) {
90 | free(handler);
91 | return 0;
92 | }
93 |
94 | FILE *fmemopen(void *buf, size_t size, const char *mode) {
95 | // This data is released on fclose.
96 | fmem_t* mem = (fmem_t *) malloc(sizeof(fmem_t));
97 |
98 | // Zero-out the structure.
99 | memset(mem, 0, sizeof(fmem_t));
100 |
101 | mem->size = size;
102 | mem->buffer = buf;
103 |
104 | // funopen's man page: https://developer.apple.com/library/mac/#documentation/Darwin/Reference/ManPages/man3/funopen.3.html
105 | return funopen(mem, readfn, writefn, seekfn, closefn);
106 | }
107 |
--------------------------------------------------------------------------------
/contrib/fmemopen.h:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright 2011-2014 NimbusKit
3 | // Originally ported from https://github.com/ingenuitas/python-tesseract/blob/master/fmemopen.c
4 | //
5 | // Licensed under the Apache License, Version 2.0 (the "License");
6 | // you may not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing, software
12 | // distributed under the License is distributed on an "AS IS" BASIS,
13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | // See the License for the specific language governing permissions and
15 | // limitations under the License.
16 | //
17 |
18 | #ifndef FMEMOPEN_H_
19 | #define FMEMOPEN_H_
20 |
21 | #if defined __cplusplus
22 | extern "C" {
23 | #endif
24 |
25 | /**
26 | * A BSD port of the fmemopen Linux method using funopen.
27 | *
28 | * man docs for fmemopen:
29 | * http://linux.die.net/man/3/fmemopen
30 | *
31 | * man docs for funopen:
32 | * https://developer.apple.com/library/mac/#documentation/Darwin/Reference/ManPages/man3/funopen.3.html
33 | *
34 | * This method is ported from ingenuitas' python-tesseract project.
35 | *
36 | * You must call fclose on the returned file pointer or memory will be leaked.
37 | *
38 | * @param buf The data that will be used to back the FILE* methods. Must be at least
39 | * @c size bytes.
40 | * @param size The size of the @c buf data.
41 | * @param mode The permitted stream operation modes.
42 | * @return A pointer that can be used in the fread/fwrite/fseek/fclose family of methods.
43 | * If a failure occurred NULL will be returned.
44 | * @ingroup NimbusMemoryMappping
45 | */
46 | FILE *fmemopen(void *buf, size_t size, const char *mode);
47 |
48 | #ifdef __cplusplus
49 | }
50 | #endif
51 |
52 | #endif // #ifndef FMEMOPEN_H_
53 |
--------------------------------------------------------------------------------
/contrib/murmur3/README.md:
--------------------------------------------------------------------------------
1 | C port of Murmur3 hash
2 | ==============
3 |
4 | This is a port of the [Murmur3](http://code.google.com/p/smhasher/wiki/MurmurHash3) hash function. Murmur3 is a non-cryptographic hash, designed to be fast and excellent-quality for making things like hash tables or bloom filters. This is a port of the original C++ code, designed for Visual Studio, into standard C that gcc can compile efficiently.
5 |
6 | How to use it
7 | -----------
8 |
9 | Just compile and link your program with `murmur3.c`, and be sure to include `murmur3.h` to get the function prototypes. There are three hash functions:
10 |
11 | void MurmurHash3_x86_32 (const void *key, int len, uint32_t seed, void *out);
12 |
13 | void MurmurHash3_x86_128(const void *key, int len, uint32_t seed, void *out);
14 |
15 | void MurmurHash3_x64_128(const void *key, int len, uint32_t seed, void *out);
16 |
17 | All three of these functions have the same interface: you give them `key`, a pointer to the data you wish to hash; `len`, the length in bytes; `seed`, an arbitrary seed number which you can use to tweak the hash, and `out`, a pointer to a buffer big enough to hold the hash's output value.
18 |
19 | The hash functions differ in both their internal mechanisms and in their outputs. They are specialized for different use cases:
20 |
21 | **MurmurHash3_x86_32** has the lowest throughput, but also the lowest latency. If you're making a hash table that usually has small keys, this is probably the one you want to use on 32-bit machines. It has a 32-bit output.
22 |
23 |
24 | **MurmurHash3_x86_128** is also designed for 32-bit systems, but produces a 128-bit output, and has about 30% higher throughput than the previous hash. Be warned, though, that its latency for a single 16-byte key is about 86% longer!
25 |
26 | **MurmurHash3_x64_128** is the best of the lot, if you're using a 64-bit machine. Its throughput is 250% higher than MurmurHash3_x86_32, but it has roughly the same latency. It has a 128-bit output.
27 |
28 | The hash functions are designed to work efficiently on x86 processors; in particular, they make some assumptions about the endianness of the processor, and about the speed of unaligned reads. If you have problems running this code on non-x86 architectures, it should be possible to modify it to work correctly and efficiently -- I just don't have access to those machines for testing. The code in `murmur3.c` is pretty straightforward, and shouldn't be too hard to alter.
29 |
30 | There is an example program, `example.c`, which you can look at and play with. You can build it with the makefile.
31 |
32 | License and contributing
33 | --------------------
34 |
35 | All this code is in the public domain. Murmur3 was created by Austin Appleby, and the C port and general tidying up was done by Peter Scott. If you'd like to contribute something, I would love to add your name to this list.
36 |
--------------------------------------------------------------------------------
/contrib/murmur3/makefile:
--------------------------------------------------------------------------------
1 | CFLAGS = -O3 -Wall
2 |
3 | all: example
4 | example: murmur3.o
5 |
6 | clean:
7 | rm -rf *.o
8 |
--------------------------------------------------------------------------------
/contrib/murmur3/murmur3.h:
--------------------------------------------------------------------------------
1 | //-----------------------------------------------------------------------------
2 | // MurmurHash3 was written by Austin Appleby, and is placed in the
3 | // public domain. The author hereby disclaims copyright to this source
4 | // code.
5 |
6 | #ifndef _MURMURHASH3_H_
7 | #define _MURMURHASH3_H_
8 |
9 | #include
10 |
11 | //-----------------------------------------------------------------------------
12 |
13 | void MurmurHash3_x86_32 (const void *key, int len, uint32_t seed, void *out);
14 |
15 | void MurmurHash3_x86_128(const void *key, int len, uint32_t seed, void *out);
16 |
17 | void MurmurHash3_x64_128(const void *key, int len, uint32_t seed, void *out);
18 |
19 | //-----------------------------------------------------------------------------
20 |
21 | #endif // _MURMURHASH3_H_
22 |
--------------------------------------------------------------------------------
/contrib/yaml-0.1.4.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Netflix/dynomite/2046b05e27f6bc55dded5e82131a8001449e5485/contrib/yaml-0.1.4.tar.gz
--------------------------------------------------------------------------------
/contrib/yaml-0.1.4/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything
2 | *
3 |
4 | # Except me
5 | !.gitignore
6 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | ##########################################################################
2 | # Dockerfile to build Dynomite container images with Redis as the backend
3 | # Based on Ubuntu
4 | ##########################################################################
5 | # Copyright 2015 Netflix, Inc.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | ##########################################################################
20 |
21 | # Set the base image to Ubuntu
22 | FROM ubuntu
23 |
24 | # File Author / Maintainer
25 | MAINTAINER Ioannis Papapanagiotou - dynomite@netflix.com
26 |
27 | # Update the repository sources list and Install package Build Essential
28 | RUN apt-get update && \
29 | export DEBIAN_FRONTEND=noninteractive && \
30 | apt-get install -y \
31 | autoconf \
32 | build-essential \
33 | dh-autoreconf \
34 | git \
35 | libssl-dev \
36 | libtool \
37 | software-properties-common \
38 | redis-server \
39 | tcl8.5
40 |
41 | # Clone the Dynomite Git
42 | RUN git clone https://github.com/Netflix/dynomite.git
43 | RUN echo 'Git repo has been cloned in your Docker VM'
44 |
45 | RUN echo 'copy file'
46 | COPY scripts/startup.sh dynomite/startup.sh
47 | RUN echo 'done copying file'
48 |
49 | # Move to working directory
50 | WORKDIR dynomite/
51 |
52 | # Autoreconf
53 | RUN autoreconf -fvi \
54 | && ./configure --enable-debug=log \
55 | && CFLAGS="-ggdb3 -O0" ./configure --enable-debug=full \
56 | && make \
57 | && make install
58 |
59 | ##################### INSTALLATION ENDS #####################
60 |
61 | # Expose the peer port
62 | RUN echo 'Exposing peer port 8101'
63 | EXPOSE 8101
64 |
65 | # Expose the underlying Redis port
66 | RUN echo 'Exposing Redis port 22122'
67 | EXPOSE 22122
68 |
69 | # Expose the stats/admin port
70 | RUN echo 'Exposing stats/admin port 22222'
71 | EXPOSE 22222
72 |
73 | # Default port to acccess Dynomite
74 | RUN echo 'Exposing client port for Dynomite 8102'
75 | EXPOSE 8102
76 |
77 |
78 | # Setting overcommit for Redis to be able to do BGSAVE/BGREWRITEAOF
79 | RUN sysctl vm.overcommit_memory=1
80 |
81 | # Set the entry-point to be the startup script
82 | ENTRYPOINT ["/dynomite/startup.sh"]
83 |
--------------------------------------------------------------------------------
/docker/HOWTO.md:
--------------------------------------------------------------------------------
1 | # Build Image
2 |
3 | Create Dynomite plus Redis single server image with docker
4 |
5 | # Example: sudo docker build -t [name] .
6 | $ sudo docker build -t my_dynomite .
7 |
8 | # Running A Dynomite Instance
9 |
10 | Creating a container running Dynomite and Redis instance inside. To set the name, you can use the -name [name]. If a name is not set, an alphanumeric ID will be obtained.
11 |
12 | $ sudo docker run -name my_dynomite_instance -i -i my_dynomite
13 |
14 | To list all containers
15 |
16 | $ sudo docker ps
17 |
18 | Enjoy!
19 |
--------------------------------------------------------------------------------
/docker/scripts/startup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #Start redis server on 22122
4 | redis-server --port 22122 &
5 |
6 | src/dynomite --conf-file=conf/redis_single.yml -v5
7 |
--------------------------------------------------------------------------------
/docs/dyn_protocol.txt:
--------------------------------------------------------------------------------
1 | Dyn message Protocol:
2 |
3 | I. Specification:
4 |
5 | 1. Structure format
6 |
7 |
8 |
9 |
10 |
11 |
12 | *
13 |
14 | *
15 |
16 |
17 | 2. Field descriptions
18 | a. MagicNumber: it is always 2014
19 | b. MessageId: sequence id of a message
20 | c. TypeId: please see the code for the list.
21 | d. BitField: indicate whether compression and/or encryption. Right most bit indicates encryption.
22 | The 2nd bit from the right indicates a compressed message.
23 | e. Version: version of the msg. This usually depends on the version of the dynomite.
24 | This is needed during upgrading a live cluster with mixed versions.
25 | f. Same-dc: bit to indicate whether the communication is inter-dcs.
26 | We should later to exchange node's metadata like rack, dc, and ip.
27 |
28 | 3. Example:
29 | This is a dynomite message that carries a Redis' "get foo" request:
30 |
31 | "2014 1344 5 1 1 0\r\n*4 minh\r\n*2\r\n$3\r\nget\r\n$3\r\nfoo\r\n"
32 |
33 | where "*2\r\n$3\r\nget\r\n$3\r\nfoo\r\n" is Redis' payload for "get foo" cmd.
34 |
35 |
36 | II. Bit field for encryption/decryption
37 |
38 |
39 | III. Compression
40 | We currently don't support compression but it should be similar to the way how
41 | encryption/decryption works. We will support/implement this feature in the near future
42 | if there is a demand for it.
43 |
44 | IV. Payload size
45 | Will fill in soon
46 |
47 |
--------------------------------------------------------------------------------
/docs/florida.md:
--------------------------------------------------------------------------------
1 | # Florida
2 |
3 | ## Florida Multi-Cluster Solution
4 |
5 | Let's say you want to have multiple clusters so you need to have different Florida URLs providing different seeds for each cluster. In such a case, the following environment variables can be used to customize the behavior of Dynomite without requiring a custom build (via Make).
6 |
7 | - `DYNOMITE_FLORIDA_IP`
8 | - `DYNOMITE_FLORIDA_PORT`
9 | - `DYNOMITE_FLORIDA_REQUEST`
10 |
11 | The Florida server only reads the environment variables one time when the server is started. If the environment variables are not present (NULL) the code will use the macros or CFLAGS values. This is great because it allows us to use the same binary all the time, and then later when I deploy the code into ASGARD I can have just different user data since is just a matter of export different values to the AMI/Linux. There is a minor FIX of YAML syntax as well.
12 |
13 | So for instance someone can have in the `/etc/profile`
14 |
15 | ```bash
16 | export DYNOMITE_FLORIDA_PORT=8080
17 | export DYNOMITE_FLORIDA_IP="127.0.0.1"
18 | export DYNOMITE_FLORIDA_REQUEST="GET /florida/cluster1/get_seeds.txt HTTP/1.0
19 | Host: 127.0.0.1
20 | User-Agent: HTMLGET 1.0
21 |
22 | "
23 | ```
24 |
25 | and for another box just
26 |
27 | ```bash
28 | export DYNOMITE_FLORIDA_PORT=8080
29 | export DYNOMITE_FLORIDA_IP="127.0.0.1"
30 | export DYNOMITE_FLORIDA_REQUEST="GET /florida/cluster2/get_seeds.txt HTTP/1.0
31 | Host: 127.0.0.1
32 | User-Agent: HTMLGET 1.0
33 |
34 | "
35 | ```
36 |
37 | Even pass this through ASGARD via user_data.
38 |
39 | Then build dynomite normally like:
40 |
41 | ```bash
42 | sudo autoreconf -fvi ; sudo ./configure --enable-debug=log ; sudo make;
43 | ```
44 |
45 | And Run:
46 |
47 | ```bash
48 | sudo --preserve-env src/dynomite -c conf/dynomite_florida_single.yml
49 | ```
50 |
51 | And it works :-)
52 |
53 | ## florida.js
54 |
55 | `florida.js` is an http server that returns a list of seed nodes via a REST API.
56 |
57 | ### Command
58 |
59 | ```bash
60 | node florida.js [file] [debug]
61 | ```
62 |
63 | **file** must be a path to a `seeds.list` file. The default `seeds.list` file is `/etc/dynomite/seeds.list`.
64 |
65 | **debug** is written as the string "debug" (without the quotes).
66 |
67 | ### Run in debug mode
68 |
69 | You can run `florida.js` in debug mode (i.e. with messages logged to the console) with the following command.
70 |
71 | ```bash
72 | cd scripts/Florida
73 |
74 | npm run debug
75 | ```
76 |
77 | ### Run with a custom seeds file
78 |
79 | ```bash
80 | cd scripts/Florida
81 |
82 | node florida.js ./seeds.list
83 | ```
84 |
85 | ### Run with a custom seeds file in debug mode
86 |
87 | ```bash
88 | cd scripts/Florida
89 |
90 | node florida.js ./seeds.list debug
91 | ```
92 |
--------------------------------------------------------------------------------
/images/dynomite-emblem.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Netflix/dynomite/2046b05e27f6bc55dded5e82131a8001449e5485/images/dynomite-emblem.png
--------------------------------------------------------------------------------
/images/dynomite-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Netflix/dynomite/2046b05e27f6bc55dded5e82131a8001449e5485/images/dynomite-logo.png
--------------------------------------------------------------------------------
/init/README.md:
--------------------------------------------------------------------------------
1 | # README
2 |
3 | The `init` directory is the location for all System V init, Upstart, systemd, etc. initialization scripts.
4 |
5 | ## systemd on RHEL 7
6 |
7 | Create the `dynomite` user.
8 |
9 | ```bash
10 | mkdir -p /usr/share/dynomite
11 |
12 | mkdir /var/run/dynomite
13 |
14 | useradd -r -M -c "Dynomite server" -s /sbin/nologin -d /usr/share/dynomite dynomite
15 |
16 | chown -R dynomite:dynomite /usr/share/dynomite
17 |
18 | chown -R dynomite:dynomite /var/run/dynomite
19 | ```
20 |
21 | Install the Dynomite service file and the associated sysconfig file.
22 |
23 | ```bash
24 | cp init/systemd_environment__dynomite /etc/sysconfig/dynomite
25 |
26 | cp init/systemd_service_rhel__dynomite.service /usr/lib/systemd/system/dynomite.service
27 |
28 | systemctl daemon-reload
29 |
30 | systemctl enable dynomite
31 |
32 | systemctl status dynomite
33 | ```
34 |
35 | ## systemd on Ubuntu 15.10
36 |
37 | Create the `dynomite` user.
38 |
39 | ```bash
40 | mkdir -p /usr/share/dynomite
41 |
42 | mkdir /var/run/dynomite
43 |
44 | useradd -r -M -c "Dynomite server" -s /sbin/nologin -d /usr/share/dynomite dynomite
45 |
46 | chown -R dynomite:dynomite /usr/share/dynomite
47 |
48 | chown -R dynomite:dynomite /var/run/dynomite
49 | ```
50 |
51 | Install the Dynomite service file and the associated sysconfig file.
52 |
53 | ```bash
54 | cp init/systemd_environment__dynomite /etc/default/dynomite
55 |
56 | cp init/systemd_service_ubuntu__dynomite.service /lib/systemd/system/dynomite.service
57 |
58 | systemctl daemon-reload
59 |
60 | systemctl enable dynomite
61 |
62 | systemctl status dynomite
63 | ```
64 |
--------------------------------------------------------------------------------
/init/systemd_environment__dynomite:
--------------------------------------------------------------------------------
1 | DYNOMITE_CONF=/etc/dynomite/dynomite.yml
2 |
--------------------------------------------------------------------------------
/init/systemd_service_rhel__dynomite.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Dynomite server
3 | ; Services that should be started before Dynomite
4 | After=network.target nss-lookup.target time-sync.target
5 | Documentation=man:dynomite(8)
6 |
7 | [Service]
8 | User=dynomite
9 | ; Default environment variable values
10 | Environment=DYNOMITE_CONF=/etc/dynomite/dynomite.yaml
11 | Type=forking
12 | ; Environment vars to customize startup
13 | EnvironmentFile=-/etc/sysconfig/dynomite
14 | ExecStart=/usr/local/sbin/dynomite -d -c $DYNOMITE_CONF -p /var/run/dynomite/dynomite.pid
15 | ; Restart dynomite every 100ms
16 | Restart=always
17 | PIDFile=/var/run/dynomite/dynomite.pid
18 |
19 | [Install]
20 | WantedBy=multi-user.target
21 |
--------------------------------------------------------------------------------
/init/systemd_service_ubuntu__dynomite.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Dynomite server
3 | ; Services that should be started before Dynomite
4 | After=network.target nss-lookup.target time-sync.target
5 | Documentation=man:dynomite(8)
6 |
7 | [Service]
8 | User=dynomite
9 | ; Default environment variable values
10 | Environment=DYNOMITE_CONF=/etc/dynomite/dynomite.yaml
11 | Type=forking
12 | ; Environment vars to customize startup
13 | EnvironmentFile=-/etc/default/dynomite
14 | ExecStart=/usr/local/sbin/dynomite -d -c $DYNOMITE_CONF -p /var/run/dynomite/dynomite.pid
15 | ; Restart dynomite every 100ms
16 | Restart=always
17 | PIDFile=/var/run/dynomite/dynomite.pid
18 |
19 | [Install]
20 | WantedBy=multi-user.target
21 |
--------------------------------------------------------------------------------
/m4/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything
2 | *
3 |
4 | # Except me
5 | !.gitignore
6 |
--------------------------------------------------------------------------------
/man/dynomite.8:
--------------------------------------------------------------------------------
1 | .TH DYNOMITE 8 "November 23, 2016" "v0.5.9"
2 | .SH NAME
3 | dynomite - a generic dynamo implementation for different key/value storage engines.
4 | .SH SYNOPSIS
5 | .B dynomite \-\-help
6 | .br
7 | .B dynomite \-\-version
8 | .br
9 | .B dynomite
10 | .RI "[\"
11 | .B "-c"
12 | .RI "conf-file]"
13 | .br
14 | .B dynomite
15 | .RB [\| \-?hVdDt \|]
16 | .RB [\| \-v
17 | .IR verbosity-level \|]
18 | .RB [\| \-o
19 | .IR output-file \|]
20 | .RB [\| -c
21 | .IR conf-file \|]
22 | .RB [\-s
23 | .IR stats-port \|]
24 | .RB [\-a
25 | .IR stats-addr \|]
26 | .RB [\-i
27 | .IR stats-interval \|]
28 | .RB [\-p
29 | .IR pid-file \|]
30 | .RB [\-m
31 | .IR mbuf-size \|]
32 | .RB [\-M
33 | .IR max-alloc-messages \|]
34 | .SH DESCRIPTION
35 | .B Dynomite
36 | is a thin, distributed Dynamo layer for different storage engines and protocols. Dynomite provides sharding and multi-data center replication. It has a shared nothing architecture with no single point of failure (SPOF) that delivers high availability (HA) even when a server, rack or entire data center goes offline.
37 | .PP
38 | Redis is currently the primary backend and protocol supported by Dynomite, while support for Memcached is partially implemented. Future versions of Dynomite will support additional backends.
39 | .PP
40 | Dynomite provides the following functionality:
41 | .IP \[bu]
42 | Linear scalability
43 | .IP \[bu]
44 | High availability (HA)
45 | .IP \[bu]
46 | Shared nothing architecture with symmetric nodes
47 | .IP \[bu]
48 | Multi-data center (DC) replication
49 | .IP \[bu]
50 | Data replication and sharding
51 | .IP \[bu]
52 | Support for any Redis client plus a specialized Dyno client for Java
53 | .IP \[bu]
54 | Reduced connections to and lower connection overhead on backend storage engines via persistent connections
55 | .IP \[bu]
56 | Observability via easily accessible statistics
57 | .SH OPTIONS
58 | .TP
59 | .BR \-h ", " \-\-help
60 | Show help about dynomite and exit.
61 | .TP
62 | .BR \-V ", " \-\-version
63 | Show dynomite version and exit.
64 | .TP
65 | .BR \-t ", " \-\-test-conf
66 | Test configuration file for syntax errors and exit.
67 | .TP
68 | .BR \-g ", " \-\-gossip
69 | Enable gossip. (default: disable)
70 | .TP
71 | .BR \-d ", " \-\-daemonize
72 | Run dynomite as a daemon in the background.
73 | .TP
74 | .BR \-D ", " \-\-describe-stats
75 | Print statistics description and exit.
76 | .TP
77 | .BI \-v\ N \fR,\ \fB\-\-verbosity= N
78 | Set logging level to
79 | .IR N .
80 | (default: 5, min: 0, max: 11)
81 | .TP
82 | .BI \-o\ file \fR,\ \fB\-\-output= file
83 | Set logging file to
84 | .IR file .
85 | .TP
86 | .BI \-c\ file \fR,\ \fB\-\-conf-file= file
87 | Set the configuration file to
88 | .IR file .
89 | .TP
90 | .BI \-p\ file \fR,\ \fB\-\-pid-file= file
91 | Set the pid file to
92 | .IR file .
93 | .TP
94 | .BI \-m\ N \fR,\ \fB\-\-mbuf-size= N
95 | Set mbuf chunk size to
96 | .IR N
97 | bytes. (default: 16384)
98 | .TP
99 | .BI \-M\ N \fR,\ \fB\-\-max-msgx= N
100 | Set the maximum number of messages to allocate to
101 | .IR N .
102 | (default: 200000)
103 | .TP
104 | .BI \-x\ N \fR,\ \fB\-\-admin-operation= N
105 | Set the size of admin operation to
106 | .IR N .
107 | (default: 0)
108 | .SH SEE ALSO
109 | .BR memcached (8),
110 | .BR redis-server (1)
111 | .br
112 | .SH AUTHOR
113 | Dynomite is developed by Netflix, Inc. Dynomite is currently developed by Netflix, Inc. and the open source community.
114 |
--------------------------------------------------------------------------------
/notes/kqueue.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Netflix/dynomite/2046b05e27f6bc55dded5e82131a8001449e5485/notes/kqueue.pdf
--------------------------------------------------------------------------------
/notes/memcache.txt:
--------------------------------------------------------------------------------
1 | - ascii:
2 |
3 | - Storage Commands (set, add, replace, append, prepend, cas):
4 |
5 | set [noreply]\r\n\r\n
6 | add [noreply]\r\n\r\n
7 | replace [noreply]\r\n\r\n
8 | append [noreply]\r\n\r\n
9 | prepend [noreply]\r\n\r\n
10 |
11 | cas [noreply]\r\n\r\n
12 |
13 | where,
14 | - uint32_t : data specific client side flags
15 | - uint32_t : expiration time (in seconds)
16 | - uint32_t : size of the data (in bytes)
17 | - uint8_t[]: data block
18 | - uint64_t
19 |
20 | - Retrival Commands (get, gets):
21 |
22 | get \r\n
23 | get []+\r\n
24 |
25 | gets \r\n
26 | gets []+\r\n
27 |
28 | - Delete Command (delete):
29 |
30 | delete [noreply]\r\n
31 |
32 | - Arithmetic Commands (incr, decr):
33 |
34 | incr [noreply]\r\n
35 | decr [noreply]\r\n
36 |
37 | where,
38 | - uint64_t
39 |
40 | - Misc Commands (quit)
41 |
42 | quit\r\n
43 | flush_all [] [noreply]\r\n
44 | version\r\n
45 | verbosity [noreply]\r\n
46 |
47 | - Statistics Commands
48 |
49 | stats\r\n
50 | stats \r\n
51 |
52 | - Error Responses:
53 |
54 | ERROR\r\n
55 | CLIENT_ERROR [error]\r\n
56 | SERVER_ERROR [error]\r\n
57 |
58 | where,
59 | ERROR means client sent a non-existent command name
60 | CLIENT_ERROR means that command sent by the client does not conform to the protocol
61 | SERVER_ERROR means that there was an error on the server side that made processing of the command impossible
62 |
63 | - Storage Command Responses:
64 |
65 | STORED\r\n
66 | NOT_STORED\r\n
67 | EXISTS\r\n
68 | NOT_FOUND\r\n
69 |
70 | where,
71 | STORED indicates success.
72 | NOT_STORED indicates the data was not stored because condition for an add or replace wasn't met.
73 | EXISTS indicates that the item you are trying to store with a cas has been modified since you last fetched it.
74 | NOT_FOUND indicates that the item you are trying to store with a cas does not exist.
75 |
76 | - Delete Command Response:
77 |
78 | NOT_FOUND\r\n
79 | DELETED\r\n
80 |
81 | - Retrival Responses:
82 |
83 | END\r\n
84 | VALUE []\r\n\r\nEND\r\n
85 | VALUE []\r\n\r\n[VALUE []\r\n]+\r\nEND\r\n
86 |
87 | - Arithmetic Responses:
88 |
89 | NOT_FOUND\r\n
90 | \r\n
91 |
92 | where,
93 | - uint64_t : new key value after incr or decr operation
94 |
95 | - Statistics Response
96 | [STAT \r\n]+END\r\n
97 |
98 | - Misc Response
99 |
100 | OK\r\n
101 | VERSION \r\n
102 |
103 | - Notes:
104 | - set always creates mapping irrespective of whether it is present on not.
105 | - add, adds only if the mapping is not present
106 | - replace, only replaces if the mapping is present
107 | - append and prepend command ignore flags and expiry values
108 | - noreply instructs the server to not send the reply even if there is an error.
109 | - decr of 0 is 0, while incr of UINT64_MAX is 0
110 | - maximum length of the key is 250 characters
111 | - expiry of 0 means that item never expires, though it could be evicted from the cache
112 | - non-zero expiry is either unix time (# seconds since 01/01/1970) or,
113 | offset in seconds from the current time (< 60 x 60 x 24 x 30 seconds = 30 days)
114 | - expiry time is with respect to the server (not client)
115 | - can be zero and when it is, the block is empty.
116 |
117 | - Thoughts:
118 | - ascii protocol is easier to debug - think using strace or tcpdump to see
119 | protocol on the wire, Or using telnet or netcat or socat to build memcache
120 | requests and responses
121 | http://stackoverflow.com/questions/2525188/are-binary-protocols-dead
122 |
123 | - http://news.ycombinator.com/item?id=1712788
124 |
--------------------------------------------------------------------------------
/scripts/Florida/florida.js:
--------------------------------------------------------------------------------
1 | var http = require('http');
2 | var url = require('url');
3 | var fs = require('fs');
4 |
5 | // Settings
6 | var port = process.env.DYNOMITE_FLORIDA_PORT ?
7 | process.env.DYNOMITE_FLORIDA_PORT : 8080;
8 |
9 | var apiUrl = process.env.DYNOMITE_FLORIDA_REQUEST ?
10 | process.env.DYNOMITE_FLORIDA_REQUEST : '/REST/v1/admin/get_seeds';
11 |
12 | // Parse command line options
13 | var seedsFilePath = process.argv[2] && process.argv[2].length > 0 ?
14 | process.argv[2] : '/etc/dynomite/seeds.list';
15 | var enableDebug = process.argv[3] === 'debug' ? true : false;
16 |
17 | http.createServer(function(req, res) {
18 | var path = url.parse(req.url).pathname;
19 | enableDebug && console.log('Request: ' + path);
20 |
21 | res.writeHead(200, {'Content-Type': 'application/json'});
22 | if (path === apiUrl) {
23 | fs.readFile(seedsFilePath, 'utf-8', function(err, data) {
24 | if (err) console.log(err);
25 |
26 | var now = (new Date()).toJSON();
27 | var seeds = data.trim().replace(/\n/g, '|');
28 |
29 | enableDebug && console.log(now + ' - get_seeds [' + seeds + ']');
30 | res.write(seeds);
31 | res.end();
32 | });
33 | } else {
34 | res.end();
35 | }
36 | }).listen(port);
37 |
38 | console.log('Server is listening on ' + port);
39 |
--------------------------------------------------------------------------------
/scripts/Florida/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "florida.js",
3 | "version": "1.0.0",
4 | "description": "Service that returns list of seeds.",
5 | "keywords": [
6 | "Dynomite",
7 | "Florida",
8 | "Netflix",
9 | "Redis",
10 | "Memcached",
11 | "Dynamo"
12 | ],
13 | "main": "florida.js",
14 | "scripts": {
15 | "test": "echo \"Error: no test specified\" && exit 1",
16 | "debug": "node florida.js ./seeds.list debug"
17 | },
18 | "repository": {
19 | "type": "git",
20 | "url": "git+https://github.com/Netflix/dynomite.git"
21 | },
22 | "contributors": [
23 | {
24 | "name": "Diego Pacheco",
25 | "email": "diego.pacheco.it@gmail.com",
26 | "url": "https://github.com/diegopacheco"
27 | },
28 | {
29 | "name": "Akbar S. Ahmed",
30 | "email": "akbar501@gmail.com",
31 | "url": "https://grockdoc.com/@akbar501"
32 | }
33 | ],
34 | "license": "Apache-2.0",
35 | "bugs": {
36 | "url": "https://github.com/Netflix/dynomite/issues"
37 | },
38 | "homepage": "https://github.com/Netflix/dynomite#readme"
39 | }
40 |
--------------------------------------------------------------------------------
/scripts/Florida/seeds.list:
--------------------------------------------------------------------------------
1 | 192.168.6.41:8101:rc1:dc1:2147483647
2 | 192.168.6.42:8101:rc2:dc1:0
3 | 192.168.6.43:8101:rc2:dc1:2147483647
4 |
--------------------------------------------------------------------------------
/scripts/dynomite-manager/bash-alias:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | alias cls=clear
4 | alias dlog='tail -f -n 2000 /logs/system/dynomite/dynomite.log'
5 | alias dmlog='tail -f -n 2000 /logs/system/dynomite-manager/dynomite-manager.log'
6 | alias rlog='tail -f -n 2000 /var/log/redis_6379.log'
7 |
8 |
--------------------------------------------------------------------------------
/scripts/dynomite-manager/cassandra:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # chkconfig: 2345 95 20
3 | # description: This script does some stuff
4 | # processname: java
5 |
6 | start() {
7 | echo "Starting cassandra..."
8 | export JAVA_HOME=/home/ec2-user/jdk1.8.0_45
9 | export JRE_HOME=/home/ec2-user/jdk1.8.0_45/jre
10 | export PATH=$PATH:/home/ec2-user/jdk1.8.0_45/bin:/home/ec2-user/jdk1.8.0_45/jre/bin
11 |
12 | cd /home/ec2-user/apache-cassandra-2.1.14
13 | bin/cassandra start &
14 | }
15 |
16 | stop() {
17 | echo "stop"
18 | PID=`ps aux | grep cassandra | grep -v grep | awk '{print $2}'`
19 | if [[ "" != "$PID" ]]; then
20 | echo "killing $PID"
21 | sudo kill -9 $PID
22 | fi
23 | }
24 |
25 | case "$1" in start)
26 | start
27 | ;;
28 | stop)
29 | stop
30 | ;;
31 | *)
32 |
33 | echo $"Usage: $0 {start|stop}"
34 | RETVAL=1
35 | esac
36 | exit 0
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/scripts/dynomite-manager/dynomite-manager:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # chkconfig: 2345 95 20
3 | # description: This script does some stuff
4 | # processname: java
5 |
6 | export JAVA_HOME=/home/ec2-user/jdk1.8.0_45
7 | export JRE_HOME=/home/ec2-user/jdk1.8.0_45/jre
8 | export PATH=$PATH:/home/ec2-user/jdk1.8.0_45/bin:/home/ec2-user/jdk1.8.0_45/jre/bin
9 |
10 | export ASG_NAME="asg_dynomite"
11 | export EC2_REGION="us-west-2"
12 | export AUTO_SCALE_GROUP="asg_dynomite"
13 |
14 | start() {
15 | echo "Starting Dynomite Manager..."
16 | cd /home/ec2-user/dynomite-manager/dynomite-manager/
17 | /home/ec2-user/dynomite-manager/dynomite-manager/gradlew jettyRun > /logs/system/dynomite-manager/dynomite-manager.log &
18 | }
19 |
20 | stop() {
21 | echo "stoping Dynomite Manager... "
22 | PID=`ps -ef | grep gradlew | awk '{print $2}' ORS=' ' | awk '{print $1}'`
23 | if [[ "" != "$PID" ]]; then
24 | echo "killing $PID"
25 | sudo kill -9 $PID
26 | fi
27 | }
28 |
29 | debug() {
30 | echo "Starting Dynomite Manager for DEBUG..."
31 | cd /home/ec2-user/dynomite-manager/dynomite-manager/
32 | export GRADLE_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,address=8000,server=y,suspend=n"
33 | ./gradlew jettyRun &
34 | }
35 |
36 |
37 | case "$1" in
38 | "start")
39 | start
40 | ;;
41 | "debug")
42 | debug
43 | ;;
44 | "stop")
45 | stop
46 | ;;
47 | *)
48 |
49 | echo $"Usage: $0 {start|stop|debug}"
50 | RETVAL=1
51 | esac
52 | exit 0
53 |
54 |
55 |
56 |
--------------------------------------------------------------------------------
/scripts/dynomite-manager/kill_redis.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | sudo service redis_6379 stop
3 |
4 |
--------------------------------------------------------------------------------
/scripts/dynomite-manager/launch_nfredis.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | sudo service redis_6379 start
3 |
4 |
--------------------------------------------------------------------------------
/scripts/dynomite/dyn_mc_test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from optparse import OptionParser
4 | import configparser
5 | import logging
6 | import time
7 | import os
8 | import re
9 | import sys
10 | import errno
11 | from datetime import datetime
12 | from datetime import timedelta
13 | import threading
14 | import random
15 |
16 | from logging import debug, info, warning, error
17 |
18 |
19 | import memcache
20 |
21 |
22 | current_milli_time = lambda: int(round(time.time() * 1000))
23 |
24 |
25 | def main():
26 | parser = OptionParser(usage="usage: %prog [options] filename",
27 | version="%prog 1.0")
28 | parser.add_option("-t", "--threads",
29 | action="store",
30 | dest="th",
31 | default="1",
32 | help="Number of client threads")
33 | parser.add_option("-o", "--operation",
34 | action="store",
35 | dest="operation",
36 | default="write",
37 | help="Operation to perform: write, read, del, swrite (single write), sread (polling single read), and sdel")
38 | parser.add_option("-l", "--logfile",
39 | action="store",
40 | dest="logfle",
41 | default="/tmp/dynomite-test.log",
42 | help="log file location")
43 | parser.add_option("-H", "--host",
44 | action="store",
45 | dest="host",
46 | default="127.0.0.1",
47 | help="targe host ip")
48 | parser.add_option("-P", "--port",
49 | action="store",
50 | dest="port",
51 | default="8102",
52 | help="target port")
53 | parser.add_option("-S", "--skipkeys",
54 | action="store",
55 | dest="skipkeys",
56 | default="0",
57 | help="target port")
58 | parser.add_option("-n", "--numkeys",
59 | action="store",
60 | dest="numkeys",
61 | default="100",
62 | help="Number of keys\n")
63 |
64 | if len(sys.argv) == 1:
65 | print("Learn some usages: " + sys.argv[0] + " -h")
66 | sys.exit(1)
67 |
68 |
69 | (options, args) = parser.parse_args()
70 |
71 |
72 |
73 | #logger = logging.getLogger(log_name)
74 | #logger.setLevel(logging.DEBUG)
75 | #fh = logging.handlers.TimedRotatingFileHandler('/tmp/dynomite-test.log', when="midnight")
76 | #fh.setLevel(logging.DEBUG)
77 | #formatter = logging.Formatter('%(asctime)s: %(name)s: %(levelname)s: %(message)s')
78 | #fh.setFormatter(formatter)
79 | #logger.addHandler(fh)
80 |
81 | print(options)
82 |
83 | logging.basicConfig(level=logging.DEBUG,
84 | format='%(asctime)s %(levelname)s %(message)s',
85 | filename='/tmp/dynomite-test.log',
86 | filemode='w')
87 |
88 | #should do some try/catch but I am lazy now
89 |
90 | mc = memcache.Client([options.host + ':' + options.port], debug=0)
91 | numkeys = int(options.numkeys)
92 | start = int(options.skipkeys)
93 | end = int(options.numkeys)
94 | print('start: ' + str(start) + ' and end: ' + str(end))
95 |
96 | if 'write' == options.operation :
97 | for i in range(start, end ) :
98 | mc.set('key_' + str(i), 'value_' + str(i))
99 |
100 | elif 'read' == options.operation :
101 | error_count = 0
102 | for i in range(start, end ) :
103 | value = mc.get('key_' + str(i))
104 | if value is None:
105 | error_count = error_count + 1
106 | print('No value for key: ' + 'key_' + str(i))
107 | else :
108 | print('key_' + str(i) + ' has value : ' + value)
109 | print('Errour count: ' + str(error_count))
110 | elif 'mread' == options.operation :
111 | n = (end - start) / 10
112 | n = min(n, 10)
113 | print(n)
114 | keys = []
115 | i = 0
116 | while (i < n) :
117 | ran = random.randint(start, end-1)
118 | key = 'key_' + str(ran)
119 | if key not in keys :
120 | keys.append(key)
121 | i = i + 1
122 | print(keys)
123 |
124 | #values = mc.get_multi(['key_1', 'key_2', 'key_3'])
125 | while (len(keys) > 0) :
126 | values = mc.get_multi(keys)
127 | print(values)
128 | for key in values.keys():
129 | keys.remove(key)
130 |
131 |
132 |
133 | elif 'del' == options.operation :
134 | for i in range(start, end ) :
135 | mc.delete('key_' + str(i))
136 | elif 'swrite' == options.operation :
137 | mc.set('key_time', str(current_milli_time()))
138 | elif 'sread' == options.operation :
139 | is_stop = False
140 |
141 | while not is_stop:
142 | value = mc.get('key_time')
143 | if value != None :
144 | is_stop = True
145 |
146 | print('Estimated elapsed time : ' + str(current_milli_time() - int(value)))
147 |
148 | elif 'sdel' == options.operation :
149 | mc.delete('key_time')
150 |
151 |
152 | mc.disconnect_all()
153 |
154 |
155 | if __name__ == '__main__':
156 | main()
157 |
158 |
--------------------------------------------------------------------------------
/scripts/dynomite/dyn_redis_purge.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from optparse import OptionParser
4 | import configparser
5 | import logging
6 | import time
7 | import os
8 | import re
9 | import sys
10 | import errno
11 | from datetime import datetime
12 | from datetime import timedelta
13 | import threading
14 | import random
15 | import string
16 |
17 | from logging import debug, info, warning, error
18 |
19 | import redis
20 |
21 | num_conn = 5
22 | dot_rate = 10
23 | current_milli_time = lambda: int(round(time.time() * 1000))
24 | threadLock = threading.Lock()
25 | threads = []
26 | conns = []
27 |
28 |
29 | class OperationThread (threading.Thread):
30 | def __init__(self, threadID, name, options):
31 | threading.Thread.__init__(self)
32 | self.threadID = threadID
33 | self.name = name
34 | self.options = options
35 | self.filename = options.filename
36 |
37 |
38 | def run(self):
39 | operation = self.options.operation
40 | host = self.options.host
41 | port = self.options.port
42 |
43 | print("Starting thread: " + self.name + ", filename: " + self.filename)
44 |
45 | # Get lock to synchronize threads
46 | #threadLock.acquire()
47 |
48 | if 'rebalance' == operation :
49 | rebalance_ops(self.filename, host, port, db=0)
50 |
51 |
52 | # Free lock to release next thread
53 | #threadLock.release()
54 |
55 |
56 |
57 | def get_conns(host, port, db, num):
58 | for i in range(0, num):
59 | conns.append(redis.StrictRedis(host, port, db=0))
60 | return conns
61 |
62 | def generate_value(i):
63 | return payload_prefix + '_' + str(i)
64 |
65 |
66 | def rebalance_ops(filename, host, port, db):
67 | conns = get_conns(host, port, db, 2)
68 | r1 = conns[0]
69 | i = 0
70 | for line in open(filename,'r').readlines():
71 | if line != '':
72 | print(line)
73 | line = line.strip('\n')
74 | if line == '':
75 | continue
76 | try:
77 |
78 | value = r1.delete(line)
79 | #r2.set(line, value)
80 | i = i + 1
81 | if (i % 5000 == 0):
82 | time.sleep(1)
83 | except redis.exceptions.ResponseError:
84 | print("reconnecting ...")
85 | r1 = redis.StrictRedis(host, port, db=0)
86 |
87 |
88 |
89 | def main():
90 | parser = OptionParser(usage="usage: %prog [options] filename",
91 | version="%prog 1.0")
92 | parser.add_option("-t", "--threads",
93 | action="store",
94 | dest="th",
95 | default="1",
96 | help="Number of client threads. Default is 1")
97 | parser.add_option("-o", "--operation",
98 | action="store",
99 | dest="operation",
100 | default="rebalance",
101 | help="Operation to perform: rebalance")
102 | parser.add_option("-l", "--logfile",
103 | action="store",
104 | dest="logfle",
105 | default="/tmp/dynomite-test.log",
106 | help="log file location. Default is /tmp/dynomite-test.log")
107 | parser.add_option("-H", "--host",
108 | action="store",
109 | dest="host",
110 | default="127.0.0.1",
111 | help="targe host ip. Default is 127.0.0.1")
112 | parser.add_option("-P", "--port",
113 | action="store",
114 | dest="port",
115 | default="8102",
116 | help="target port. Default is 8102")
117 | parser.add_option("-f", "--filename",
118 | action="store",
119 | dest="filename",
120 | default="0",
121 | help="target port. Default is 0")
122 |
123 |
124 |
125 | if len(sys.argv) == 1:
126 | print("Learn some usages: " + sys.argv[0] + " -h")
127 | sys.exit(1)
128 |
129 |
130 | (options, args) = parser.parse_args()
131 |
132 | print(options)
133 |
134 | num_threads = int(options.th)
135 |
136 |
137 | for i in range(0, num_threads):
138 | if (i != num_threads-1):
139 | thread = OperationThread(i, "Thread-" + str(i), options)
140 | else:
141 | thread = OperationThread(i, "Thread-" + str(i), options)
142 | #thread = OperationThread(1, "Thread-1", options, 1, 1000)
143 |
144 | thread.start()
145 | threads.append(thread)
146 |
147 | for t in threads:
148 | t.join()
149 |
150 | print()
151 |
152 |
153 | if __name__ == '__main__':
154 | main()
155 |
156 |
--------------------------------------------------------------------------------
/scripts/dynomite/generate_yamls.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | '''
4 | script for generating dynomite yaml files for every node in a cluster.
5 | This script should be run per rack for all nodes in the rack and so the tokens are equally distributed.
6 | usage: