├── .gitignore ├── .gitmodules ├── AUTHORS ├── BTREEFORMAT ├── Capfile ├── ChangeLog ├── LICENSE ├── NEWS ├── NOTES ├── PROTOCOL ├── README.markdown ├── Rakefile ├── XHASHFORMAT ├── bin └── dynomite ├── c ├── bloom.c ├── bloom.h ├── bloom_drv.c ├── fnv.c ├── fnv.h ├── fnv_drv.c ├── murmur.c ├── murmur.h ├── murmur_drv.c └── ulimit_drv.c ├── conf ├── default.conf └── default.config ├── config.json ├── deps └── thrift │ ├── COPYING │ ├── LICENSE │ ├── Makefile │ ├── README │ ├── build │ ├── beamver │ ├── buildtargets.mk │ ├── colors.mk │ ├── docs.mk │ ├── mime.types │ ├── otp.mk │ ├── otp_subdir.mk │ └── raw_test.mk │ ├── include │ ├── thrift_constants.hrl │ └── thrift_protocol.hrl │ ├── src │ ├── Makefile │ ├── test_handler.erl │ ├── test_service.erl │ ├── thrift.app.src │ ├── thrift.appup.src │ ├── thrift_app.erl │ ├── thrift_base64_transport.erl │ ├── thrift_binary_protocol.erl │ ├── thrift_buffered_transport.erl │ ├── thrift_client.erl │ ├── thrift_disk_log_transport.erl │ ├── thrift_file_transport.erl │ ├── thrift_framed_transport.erl │ ├── thrift_http_transport.erl │ ├── thrift_processor.erl │ ├── thrift_protocol.erl │ ├── thrift_server.erl │ ├── thrift_service.erl │ ├── thrift_socket_server.erl │ ├── thrift_socket_transport.erl │ ├── thrift_sup.erl │ └── thrift_transport.erl │ └── vsn.mk ├── dist_config.json ├── doc ├── BLOOM_FORMAT.txt ├── dynomite.R ├── javascripts │ ├── excanvas.pack.js │ ├── jquery.flot.js │ ├── jquery.flot.pack.js │ └── jquery.js └── stats.html ├── ebin ├── .gitignore └── dynomite.app ├── elibs ├── block_server.erl ├── bloom.erl ├── bootstrap.erl ├── commands.erl ├── configuration.erl ├── dets_storage.erl ├── dict_storage.erl ├── dmerkle.erl ├── dmtree.erl ├── dummy_server.erl ├── dynomite.erl ├── dynomite_app.erl ├── dynomite_pb.erl ├── dynomite_prof.erl ├── dynomite_rpc.erl ├── dynomite_sup.erl ├── dynomite_thrift_client.erl ├── dynomite_thrift_service.erl ├── dynomite_web.erl ├── ets_storage.erl ├── fail_storage.erl ├── fnv.erl ├── fs_storage.erl ├── lib_misc.erl ├── load_gen.erl ├── mediator.erl ├── membership.erl ├── mnesia_storage.erl ├── murmur.erl ├── osmos_storage.erl ├── partitions.erl ├── rate.erl ├── socket_server.erl ├── stats_server.erl ├── storage_manager.erl ├── storage_server.erl ├── storage_server_sup.erl ├── stream.erl ├── sync_manager.erl ├── sync_server.erl ├── sync_server_sup.erl ├── tc_storage.erl ├── ulimit.erl ├── vector_clock.erl └── web_rpc.erl ├── escripts └── benchmark.escript ├── etest ├── .placeholder ├── bloom_test.erl ├── bootstrap_test.erl ├── configuration_test.erl ├── dmerkle_test.erl ├── dmtree_test.erl ├── fixtures │ └── dm_adjacentblocks.idx ├── lib_misc_test.erl ├── mediator_test.erl ├── membership_test.erl ├── mock.erl ├── mock_genserver.erl ├── partitions_test.erl ├── rate_test.erl ├── storage_manager_test.erl ├── storage_server_test.erl ├── stream_test.erl ├── sync_manager_test.erl ├── t.erl └── vector_clock_test.erl ├── gen-erl ├── dynomite_constants.hrl ├── dynomite_thrift.erl ├── dynomite_thrift.hrl ├── dynomite_types.erl └── dynomite_types.hrl ├── gen-rb ├── Dynomite.rb ├── dynomite_constants.rb └── dynomite_types.rb ├── if └── dynomite.thrift ├── include ├── chunk_size.hrl ├── common.hrl ├── config.hrl ├── couch_db.hrl ├── dmerkle.hrl └── profile.hrl ├── pylibs ├── dynomite │ ├── Dynomite.py │ ├── __init__.py │ ├── client.py │ ├── constants.py │ ├── thrift_client.py │ └── ttypes.py ├── setup.cfg ├── setup.py ├── test │ ├── basic_test.py │ ├── ec2 │ │ └── ec2_load_test.py │ └── functional │ │ ├── python_client.rst │ │ └── python_client_fixtures.py └── tools │ ├── dbench.py │ ├── dbench_thrift.py │ ├── dynomite-remote │ └── load_thrift.py ├── releases └── dynomite.rel ├── rlibs ├── analyze_bench.rb ├── cli │ ├── bench.rb │ ├── console.rb │ ├── leave.rb │ ├── membership.rb │ ├── production.rb │ ├── shared │ │ └── common.rb │ ├── start.rb │ ├── status.rb │ └── stop.rb ├── distributed_bench.rb ├── dynomite.rb ├── stress_test.rb └── thrift_bench.rb ├── rspecs ├── dynomite_spec.rb └── spec_helper.rb ├── test.py └── web ├── images └── dynomite_logo.png ├── index.html ├── javascripts ├── canvastext.js ├── color.js ├── domec.js ├── drawarrows.js ├── jquery-1.2.6.js ├── jquery.timer.js └── viz.js ├── load.html ├── partitions.html ├── stylesheets └── master.css └── sync.html /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | .\#* 3 | \#*\# 4 | erl_crash.dump 5 | *.beam 6 | deps/thrift/ebin 7 | *.pyc 8 | *.egg 9 | *.egg-info 10 | etest/log/* 11 | test.config 12 | doc/*_coverage.html 13 | servers.rb 14 | priv/*.so 15 | c/*.o 16 | build/* 17 | releases/dynomite-* 18 | *.pid 19 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "deps/mochiweb"] 2 | path = deps/mochiweb 3 | url = git://github.com/cliffmoon/mochiweb.git 4 | 5 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Authors of and contributors to dynomite. 2 | 3 | Cliff Moon 4 | Todd Lipcon 5 | Jason Pellerin 6 | Nick Gerakines 7 | Christopher Brown 8 | 9 | The plumbing for the couch storage engine is from the couchdb project: http://incubator.apache.org/couchdb/ 10 | 11 | MurmurHash2 is courtesy of Austin Appleby -------------------------------------------------------------------------------- /BTREEFORMAT: -------------------------------------------------------------------------------- 1 | Implementation of a btree based merkle tree. The original merkle tree would not scale out to a large number of keys due to the limitation of keeping the tree in main memory. Therefore we will try and store the 2 | 3 | btree file: 4 | file header: version:1, blocksize:32, freepointer:64, rootpointer:64, 5 | freekeys1:64, freekeys2:64, freekeys3:64, freekeys4:64, freekeys5:64 6 | reserved/bytes: 7 | header size: 85 8 | 9 | node:{0,m:32,[keyhash:32],[childhash:32,child:64]} 10 | 11 | leaf:{1,m:32,[keyhash:32,keypointer:64,valhash:32]} 12 | 13 | freenode:{2,nextpointer:64,0:(blocksize-9)} 14 | 15 | for nodes: 16 | blocksize = 1 + 4 + D*4 + (D+1)*12 17 | D = (blocksize-17)/16 18 | 19 | for leaves: 20 | blocksize = 5 + D*16 21 | D = (blocksize-5)/16 22 | 23 | 65521 24 | 25 | 26 | 27 | key file: 28 | 29 | [key\0,key\0,key\0] 30 | 31 | keys allocation 32 | 33 | l = 2**4 34 | 35 | m = 2**64 36 | 37 | 38 | 39 | header size = 85 + (√blocksize - 4)*8 -------------------------------------------------------------------------------- /Capfile: -------------------------------------------------------------------------------- 1 | #Define your servers in servers.rb like so: 2 | 3 | 4 | # role :dyn, "server1", 5 | # "server2", 6 | # "server3", 7 | # "server4" 8 | # 9 | # role :tester, "server5" 10 | 11 | load "servers.rb" 12 | 13 | set :storage, (ENV["STORAGE"] || "dets_storage") 14 | 15 | namespace :dynomite do 16 | desc <<-EOF 17 | Deploy dynomite via rsync, configure the data directories 18 | and launch one node per server. 19 | EOF 20 | task :default, :roles => :dyn do 21 | deploy.rsync 22 | deploy.compile 23 | deploy.data_reset 24 | deploy.config 25 | deploy.start 26 | end 27 | 28 | task :bench, :roles => :tester do 29 | run "ruby dynomite/rlibs/distributed_bench.rb" 30 | end 31 | 32 | task :stop, :roles => :dyn do 33 | deploy.stop 34 | end 35 | 36 | namespace :deploy do 37 | task :rsync do 38 | username = ENV['USER'] 39 | 40 | execute_on_servers(options) do |servers| 41 | servers.each do |server| 42 | puts %Q(rsync -avz -e ssh "./" "#{username}@#{server}:dynomite" --exclude ".git" --exclude "etest/log") 43 | `rsync -avz -e ssh "./" "#{username}@#{server}:dynomite" --exclude ".git" --exclude "etest/log"` 44 | end 45 | end 46 | end 47 | 48 | task :compile do 49 | run "cd dynomite && rake clean && rake native default" 50 | end 51 | 52 | task :data_reset, :roles => :dyn do 53 | run "rm -rf /bfd/dyn-int-data dyn-int-log/*" 54 | run "mkdir -p /bfd/dyn-int-data" 55 | run "mkdir -p dyn-int-log" 56 | end 57 | 58 | task :config, :roles => :dyn do 59 | configuration = ENV["CONFIG"] || "dist_config.json" 60 | contents = File.read(configuration) 61 | put(contents, "./dynomite/config.json") 62 | end 63 | 64 | task :start, :roles => :dyn, :depends => [:data_reset, :config] do 65 | execute_on_servers(options) do |servers| 66 | first_server = servers.shift 67 | shortname = first_server.to_s.split('.').first 68 | first = sessions[first_server] 69 | rest = servers.map {|s| sessions[s]} 70 | puts shortname 71 | if ENV["SERVERS"] 72 | rest = rest[0...(ENV["SERVERS"].to_i-1)] 73 | end 74 | Command.process("./dynomite/bin/dynomite start -c config.json -l ~/dyn-int-log -d", [first], options.merge(:logger => logger)) 75 | sleep(3) 76 | Command.process("./dynomite/bin/dynomite start -c config.json -j dynomite@#{shortname} -l ~/dyn-int-log -d", rest, options.merge(:logger => logger)) 77 | end 78 | end 79 | 80 | task :stop, :roles => :dyn do 81 | run "./dynomite/bin/dynomite stop" 82 | end 83 | end 84 | 85 | end 86 | 87 | def put_file(path, remote_path, options = {}) 88 | execute_on_servers(options) do |servers| 89 | servers.each do |server| 90 | logger.info "uploading #{File.basename(path)} to #{server}" 91 | sftp = sessions[server].sftp 92 | sftp.connect unless sftp.state == :open 93 | sftp.put_file path, remote_path 94 | logger.debug "done uploading #{File.basename(path)} to #{server}" 95 | end 96 | end 97 | end -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2008, Powerset, Inc 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 5 | 6 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 7 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | * Neither the name of Powerset, Inc nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 9 | 10 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /NEWS: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/moonpolysoft/dynomite/a5618dcbe17b16cefdc9c567f27a1f4445aee005/NEWS -------------------------------------------------------------------------------- /NOTES: -------------------------------------------------------------------------------- 1 | - bad things that happen when nodes share a data dir: 2 | - nodes after the 2nd do not join correctly on startup 3 | - seeming instability in get/put as nodes join/leave? 4 | 5 | - missing tests for node joining? 6 | 7 | - after activating a new node in a group, got an error when getting a key: 8 | 9 | 10 | Error in process <0.774.0> on node 'b5@localhost' with exit value: {{case_clause,{exit,{{function_clause,[{vector_clock,resolve,[not_found,{[{'b1@localhost',1.227209e+09},{'b2@localhost',1.227212e+09},{'b3@localhost',1.227212e+09},{'b4@localhost',1.227212e+09}],[<<50 bytes>>]}]},{mediator,internal_get,2},{mediator,'-handle_call/3-fun-0-',3}]},{gen_server... 11 | 12 | 13 | - is gossip necessary? it's very busy, can we instead just use erlang node 14 | monitoring? 15 | 16 | 17 | ec2 baseline 18 | 19 | 4 clients on 1 server, 4 nodes: 20 | 21 | [root@domU-12-31-38-00-A1-D8 pylibs]# PYTHONPATH=. ./tools/dbench_thrift.py -n 1000 -c 4 22 | . . . . 23 | 4 client(s) 1000 request(s) 288.8384120.3s 24 | get avg: 19.2921230.3ms mean: 7.6100830.3ms 99.9: 128.2091140.3ms 25 | put avg: 52.9174800.3ms mean: 44.9030400.3ms 99.9: 192.3902030.3ms 26 | 27 | 19 clients on 1 server, 4 nodes: 28 | 29 | [root@domU-12-31-38-00-A1-D8 pylibs]# PYTHONPATH=. ./tools/dbench_thrift.py -n 1000 -c 10 30 | . . . . . . . . . . 31 | 10 client(s) 1000 request(s) 2450.8665540.3s 32 | get avg: 69.8761190.3ms mean: 63.1911750.3ms 99.9: 479.4890880.3ms 33 | put avg: 175.2105370.3ms mean: 164.6809580.3ms 99.9: 581.4800260.3ms 34 | -------------------------------------------------------------------------------- /PROTOCOL: -------------------------------------------------------------------------------- 1 | The dynomite datastore protocol has two basic operations: get and put. 2 | 3 | request: 4 | get keylength key\n 5 | 6 | responses: 7 | fail reason\n 8 | not_found\n 9 | succ items ctx_length ctx (data_length data)+\n 10 | 11 | request: 12 | put keylength key ctx_length ctx data_length data\n 13 | 14 | responses: 15 | fail reason\n 16 | succ n_of_servers_stored\n 17 | 18 | request: 19 | has keylength key\n 20 | 21 | responses: 22 | fail reason\n 23 | yes n_of_servers_stored\n 24 | no n_of_servers_stored\n 25 | 26 | request: 27 | del keylength key\n 28 | 29 | responses: 30 | fail reason\n 31 | succ n_of_severs_deleted\n 32 | 33 | request: 34 | close\n 35 | 36 | response: 37 | close\n -------------------------------------------------------------------------------- /README.markdown: -------------------------------------------------------------------------------- 1 | NOTICE 2 | ======== 3 | 4 | If the lack of commits over the past year isn't a clue, this is your warning that Dynomite is a dead project and is no longer 5 | being maintained. At some point in the future my employer may allow me to push out the improvements that I've made 6 | since I was barred from pushing code publicly. But I would not count on it, and I would not recommend you use Dynomite 7 | for anything other than a functional design document on how to build a Dynamo clone. 8 | 9 | If you need an erlang Dynamo clone for production use I would recommend [Riak](http://riak.basho.com/). 10 | 11 | If you need something with a bigtable style data model then I recommend [Cassandra](http://cassandra.apache.org/). 12 | 13 | 14 | 15 | Dynomite 16 | ------- 17 | 18 | This is dynomite. It is a clone of the amazon dynamo key value store written in Erlang. 19 | 20 | [Amazon's Dynamo](http://www.allthingsdistributed.com/2007/10/amazons_dynamo.html) 21 | 22 | Documentation 23 | --- 24 | 25 | * [Introduction](http://wiki.github.com/cliffmoon/dynomite/home) 26 | * [Getting Started](http://wiki.github.com/cliffmoon/dynomite/getting-started) 27 | * [Clustering Guide](http://wiki.github.com/cliffmoon/dynomite/clustering-guide) 28 | 29 | IRC 30 | --- 31 | 32 | channel #dynomite on irc.freenode.net 33 | 34 | Mailing List 35 | --- 36 | 37 | [Dynomite Mailing List](http://groups.google.com/group/dynomite-users) 38 | 39 | TL;DR Getting Started 40 | ---- 41 | 42 | git clone git://github.com/cliffmoon/dynomite.git 43 | cd dynomite 44 | git submodule init 45 | git submodule update 46 | rake 47 | ./bin/dynomite start -c config.json 48 | 49 | -------------------------------------------------------------------------------- /XHASHFORMAT: -------------------------------------------------------------------------------- 1 | This is the file system layout for the xhash storage system. The implementation is based on the data structure described by Split Ordered Lists (Shalev, Shavit.) The basic data structure is a linked list in the data file. A hashed index into the linked list is maintained in a separate file. The main data file format looks like so: 2 | 3 | header 48 bytes: 4 | identifier: 2bytes = XD 5 | version:2bytes 6 | size:4bytes 7 | head-pointer: 8bytes 8 | reserved: 32bytes 9 | node header=18bytes: 10 | keyhash: 4bytes 11 | nextptr: 8bytes 12 | keysize: 2bytes 13 | datasize: 4bytes 14 | key: variable 15 | data: variable bytes 16 | 17 | and the index file: 18 | 19 | header 40 bytes: 20 | identifier: 2bytes = XI 21 | version:2bytes 22 | capacity:4bytes 23 | reserved: 32bytes 24 | table array: 25 | pointer: 8bytes -------------------------------------------------------------------------------- /bin/dynomite: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require 'optparse' 4 | require 'digest/md5' 5 | 6 | def real_path(filename) 7 | if File.symlink?(filename) 8 | target = File.readlink(filename) 9 | File.expand_path(target[0, 1] == "/" ? 10 | target : File.join(File.dirname(filename), target)) 11 | else 12 | File.expand_path(filename) 13 | end 14 | end 15 | 16 | FILE = real_path(__FILE__) 17 | 18 | ROOT = File.expand_path(File.dirname(FILE) + "/..") 19 | COMMAND_DIR = File.join(ROOT, 'rlibs', 'cli') 20 | $:.unshift COMMAND_DIR 21 | COMMANDS = Dir[COMMAND_DIR + "/*.rb"].map { |fname| File.basename(fname).split(".")[0].downcase } 22 | 23 | Dir.chdir(ROOT) 24 | 25 | begin 26 | ARGV[0] ? command = ARGV[0].downcase : (raise "No command provided") 27 | COMMANDS.include?(command) ? (require command) : (raise "Invalid command") 28 | rescue => error 29 | puts "#{error.message}! Valid commands for dynomite are:\n#{COMMANDS.sort.join(", ")}" 30 | puts error.backtrace.join("\n") 31 | exit(1) 32 | end 33 | 34 | -------------------------------------------------------------------------------- /c/bloom.h: -------------------------------------------------------------------------------- 1 | 2 | typedef struct _bloom_data_t { 3 | uint32_t version; //0 4 | uint32_t m; //4 5 | uint64_t n; //8 6 | double e; // 7 | uint32_t k; // 8 | uint64_t keys; //3 9 | uint32_t seed; 10 | char reserved[64]; 11 | char bits[1]; 12 | } bloom_data_t; 13 | 14 | typedef struct _bloom_t { 15 | char *filename; 16 | int file; 17 | bloom_data_t data; 18 | } bloom_t; 19 | 20 | bloom_t *bloom_open(char *filename, long n, double e); 21 | void bloom_put(bloom_t* bloom, char *buff, int len); 22 | int bloom_has(bloom_t* bloom, char *buff, int len); 23 | void bloom_destroy(bloom_t* bloom); 24 | 25 | #define bloom_key_size(bloom) ((bloom)->data.keys) 26 | #define bloom_mem_size(bloom) ((bloom)->data.m / 8) -------------------------------------------------------------------------------- /c/fnv.c: -------------------------------------------------------------------------------- 1 | 2 | #include "fnv.h" 3 | 4 | #define FNV_PRIME 16777619 5 | #define MAX 4294967296 6 | 7 | unsigned int fnv_hash(const void* key, int length, unsigned int seed) { 8 | const unsigned char * data = (const unsigned char*) key; 9 | int n; 10 | unsigned int xord; 11 | unsigned int hash = seed; 12 | 13 | for(n=0; n < length; n++) { 14 | xord = hash ^ data[n]; 15 | hash = (xord * FNV_PRIME) % MAX; 16 | } 17 | return hash; 18 | } -------------------------------------------------------------------------------- /c/fnv.h: -------------------------------------------------------------------------------- 1 | 2 | 3 | unsigned int fnv_hash(const void* key, int length, unsigned int seed); -------------------------------------------------------------------------------- /c/fnv_drv.c: -------------------------------------------------------------------------------- 1 | 2 | #include "fnv.h" 3 | #include 4 | #include 5 | #include 6 | 7 | #define write_int32(i, s) {((char*)(s))[0] = (char)((i) >> 24) & 0xff; \ 8 | ((char*)(s))[1] = (char)((i) >> 16) & 0xff; \ 9 | ((char*)(s))[2] = (char)((i) >> 8) & 0xff; \ 10 | ((char*)(s))[3] = (char)((i) & 0xff);} 11 | 12 | static ErlDrvData init(ErlDrvPort port, char *cmd); 13 | static void stop(ErlDrvData handle); 14 | static void outputv(ErlDrvData handle, ErlIOVec *ev); 15 | static void send_hash(ErlDrvPort port, unsigned long hash); 16 | 17 | static ErlDrvData init(ErlDrvPort port, char *cmd) { 18 | return (ErlDrvData) port; 19 | } 20 | 21 | static void stop(ErlDrvData handle) { 22 | //noop 23 | } 24 | 25 | static void outputv(ErlDrvData handle, ErlIOVec *ev) { 26 | ErlDrvPort port = (ErlDrvPort) handle; 27 | ErlDrvTermData caller; 28 | SysIOVec *bin; 29 | int i, n, index = 0; 30 | unsigned long hash; 31 | unsigned long seed; 32 | //first piece of the iovec is the seed 33 | // printf("ev->size %d\n", ev->size); 34 | // printf("ev-vsize %d\n", ev->vsize); 35 | //apparently we start counting at 1 round here? 36 | bin = &ev->iov[1]; 37 | // printf("bin->orig_size %d\n", bin->iov_len); 38 | // printf("bin->iov_base %s\n", bin->iov_base); 39 | ei_decode_version(bin->iov_base, &index, NULL); 40 | ei_decode_ulong(bin->iov_base, &index, &seed); 41 | hash = seed; 42 | if (index < bin->iov_len) { 43 | hash = fnv_hash(&bin->iov_base[index], bin->iov_len - index, hash); 44 | } 45 | for (i=2; ivsize; i++) { 46 | bin = &ev->iov[i]; 47 | hash = fnv_hash(bin->iov_base, bin->iov_len, hash); 48 | } 49 | caller = driver_caller(port); 50 | ErlDrvTermData spec[] = { 51 | ERL_DRV_UINT, hash 52 | }; 53 | driver_send_term(port, caller, spec, 2); 54 | } 55 | 56 | static int control(ErlDrvData data, unsigned int seed, char *buf, int len, char **rbuf, int rlen) { 57 | unsigned int hash; 58 | int index = 0; 59 | // printf("length %d\n", len); 60 | // printf("buf %s\n", buf); 61 | hash = fnv_hash(buf, len, seed); 62 | if (rlen < sizeof(hash)) { 63 | (*rbuf) = (char *) driver_realloc(*rbuf, sizeof(hash)); 64 | } 65 | write_int32(hash, *rbuf); 66 | return sizeof(hash); 67 | } 68 | 69 | static void send_hash(ErlDrvPort port, unsigned long hash) { 70 | ei_x_buff x; 71 | ei_x_new_with_version(&x); 72 | ei_x_encode_ulong(&x, hash); 73 | driver_output(port, x.buff, x.index); 74 | // printf("sent hash %d\n", hash); 75 | ei_x_free(&x); 76 | } 77 | 78 | static ErlDrvEntry fnv_driver_entry = { 79 | NULL, /* init */ 80 | init, 81 | stop, 82 | NULL, /* output */ 83 | NULL, /* ready_input */ 84 | NULL, /* ready_output */ 85 | "fnv_drv", /* the name of the driver */ 86 | NULL, /* finish */ 87 | NULL, /* handle */ 88 | control, /* control */ 89 | NULL, /* timeout */ 90 | outputv, /* outputv */ 91 | NULL, /* ready_async */ 92 | NULL, /* flush */ 93 | NULL, /* call */ 94 | NULL, /* event */ 95 | ERL_DRV_EXTENDED_MARKER, /* ERL_DRV_EXTENDED_MARKER */ 96 | ERL_DRV_EXTENDED_MAJOR_VERSION, /* ERL_DRV_EXTENDED_MAJOR_VERSION */ 97 | ERL_DRV_EXTENDED_MAJOR_VERSION, /* ERL_DRV_EXTENDED_MINOR_VERSION */ 98 | ERL_DRV_FLAG_USE_PORT_LOCKING /* ERL_DRV_FLAGs */ 99 | 100 | }; 101 | 102 | DRIVER_INIT(fnv_driver) { 103 | return &fnv_driver_entry; 104 | } -------------------------------------------------------------------------------- /c/murmur.c: -------------------------------------------------------------------------------- 1 | //----------------------------------------------------------------------------- 2 | // MurmurHash2, by Austin Appleby 3 | 4 | // Note - This code makes a few assumptions about how your machine behaves - 5 | 6 | // 1. We can read a 4-byte value from any address without crashing 7 | // 2. sizeof(int) == 4 8 | 9 | // And it has a few limitations - 10 | 11 | // 1. It will not work incrementally. 12 | // 2. It will not produce the same results on little-endian and big-endian 13 | // machines. 14 | 15 | unsigned int MurmurHash2 ( const void * key, int len, unsigned int seed ) 16 | { 17 | // 'm' and 'r' are mixing constants generated offline. 18 | // They're not really 'magic', they just happen to work well. 19 | 20 | const unsigned int m = 0x5bd1e995; 21 | const int r = 24; 22 | 23 | // Initialize the hash to a 'random' value 24 | 25 | unsigned int h = seed ^ len; 26 | 27 | // Mix 4 bytes at a time into the hash 28 | 29 | const unsigned char * data = (const unsigned char *)key; 30 | 31 | while(len >= 4) 32 | { 33 | unsigned int k = *(unsigned int *)data; 34 | 35 | k *= m; 36 | k ^= k >> r; 37 | k *= m; 38 | 39 | h *= m; 40 | h ^= k; 41 | 42 | data += 4; 43 | len -= 4; 44 | } 45 | 46 | // Handle the last few bytes of the input array 47 | 48 | switch(len) 49 | { 50 | case 3: h ^= data[2] << 16; 51 | case 2: h ^= data[1] << 8; 52 | case 1: h ^= data[0]; 53 | h *= m; 54 | }; 55 | 56 | // Do a few final mixes of the hash to ensure the last few 57 | // bytes are well-incorporated. 58 | 59 | h ^= h >> 13; 60 | h *= m; 61 | h ^= h >> 15; 62 | 63 | return h; 64 | } 65 | -------------------------------------------------------------------------------- /c/murmur.h: -------------------------------------------------------------------------------- 1 | 2 | 3 | unsigned int MurmurHash2 ( const void * key, int len, unsigned int seed ); 4 | -------------------------------------------------------------------------------- /c/murmur_drv.c: -------------------------------------------------------------------------------- 1 | 2 | #include "murmur.h" 3 | #include 4 | #include 5 | #include 6 | 7 | 8 | static ErlDrvData init(ErlDrvPort port, char *cmd); 9 | static void stop(ErlDrvData handle); 10 | static void outputv(ErlDrvData handle, ErlIOVec *ev); 11 | static void send_hash(ErlDrvPort port, unsigned long hash); 12 | 13 | static ErlDrvData init(ErlDrvPort port, char *cmd) { 14 | return (ErlDrvData) port; 15 | } 16 | 17 | static void stop(ErlDrvData handle) { 18 | //noop 19 | } 20 | 21 | static void outputv(ErlDrvData handle, ErlIOVec *ev) { 22 | ErlDrvPort port = (ErlDrvPort) handle; 23 | SysIOVec *bin; 24 | int i, n, index = 0; 25 | unsigned long hash; 26 | unsigned long seed; 27 | //first piece of the iovec is the seed 28 | // printf("ev->size %d\n", ev->size); 29 | // printf("ev-vsize %d\n", ev->vsize); 30 | //apparently we start counting at 1 round here? 31 | bin = &ev->iov[1]; 32 | // printf("bin->orig_size %d\n", bin->iov_len); 33 | // printf("bin->iov_base %s\n", bin->iov_base); 34 | ei_decode_version(bin->iov_base, &index, NULL); 35 | ei_decode_ulong(bin->iov_base, &index, &seed); 36 | hash = (unsigned int) seed; 37 | if (index < bin->iov_len) { 38 | hash = MurmurHash2(&bin->iov_base[index], bin->iov_len - index, hash); 39 | } 40 | // printf("hash %d\n", hash); 41 | for (i=2; ivsize; i++) { 42 | bin = &ev->iov[i]; 43 | // printf("bin->orig_size %d\n", bin->iov_len); 44 | hash = MurmurHash2(bin->iov_base, bin->iov_len, hash); 45 | // printf("hashed %d\n", i); 46 | } 47 | send_hash(port, hash); 48 | } 49 | 50 | static void send_hash(ErlDrvPort port, unsigned long hash) { 51 | ei_x_buff x; 52 | ei_x_new_with_version(&x); 53 | ei_x_encode_ulong(&x, hash); 54 | driver_output(port, x.buff, x.index); 55 | // printf("sent hash %d\n", hash); 56 | ei_x_free(&x); 57 | } 58 | 59 | static ErlDrvEntry murmur_driver_entry = { 60 | NULL, /* init */ 61 | init, 62 | stop, 63 | NULL, /* output */ 64 | NULL, /* ready_input */ 65 | NULL, /* ready_output */ 66 | "murmur_drv", /* the name of the driver */ 67 | NULL, /* finish */ 68 | NULL, /* handle */ 69 | NULL, /* control */ 70 | NULL, /* timeout */ 71 | outputv, /* outputv */ 72 | NULL, /* ready_async */ 73 | NULL, /* flush */ 74 | NULL, /* call */ 75 | NULL /* event */ 76 | }; 77 | 78 | DRIVER_INIT(murmur_driver) { 79 | return &murmur_driver_entry; 80 | } -------------------------------------------------------------------------------- /c/ulimit_drv.c: -------------------------------------------------------------------------------- 1 | #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__linux__) 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #define SET 's' 10 | #define GET 'g' 11 | 12 | static ErlDrvData init(ErlDrvPort port, char *cmd); 13 | static void output(ErlDrvData handle, char* buff, int len); 14 | static void stop(ErlDrvData handle); 15 | static void send_errno(ErlDrvPort port, int code); 16 | static void send_rlimit(ErlDrvPort port, struct rlimit *limit); 17 | 18 | static ErlDrvData init(ErlDrvPort port, char *cmd) { 19 | return (ErlDrvData) port; 20 | } 21 | 22 | static void output(ErlDrvData handle, char* buff, int len) { 23 | ErlDrvPort port = (ErlDrvPort) handle; 24 | int index=1; 25 | struct rlimit limit; 26 | 27 | switch (buff[0]) { 28 | case SET: 29 | getrlimit(RLIMIT_NOFILE, &limit); 30 | ei_decode_version(buff, &index, NULL); 31 | ei_decode_long(buff, &index, &limit.rlim_cur); 32 | if (-1 == setrlimit(RLIMIT_NOFILE, &limit)) { 33 | send_errno(port, errno); 34 | return; 35 | } 36 | getrlimit(RLIMIT_NOFILE, &limit); 37 | send_rlimit(port, &limit); 38 | return; 39 | case GET: 40 | getrlimit(RLIMIT_NOFILE, &limit); 41 | send_rlimit(port, &limit); 42 | return; 43 | } 44 | } 45 | 46 | static void stop(ErlDrvData handle) { 47 | 48 | } 49 | 50 | static void send_rlimit(ErlDrvPort port, struct rlimit *limit) { 51 | ei_x_buff x; 52 | ei_x_new_with_version(&x); 53 | ei_x_encode_tuple_header(&x, 2); 54 | ei_x_encode_long(&x, limit->rlim_cur); 55 | ei_x_encode_long(&x, limit->rlim_max); 56 | driver_output(port, x.buff, x.index); 57 | ei_x_free(&x); 58 | } 59 | 60 | static void send_errno(ErlDrvPort port, int code) { 61 | ei_x_buff x; 62 | char *msg = erl_errno_id(code); 63 | ei_x_new_with_version(&x); 64 | ei_x_encode_tuple_header(&x, 2); 65 | ei_x_encode_atom(&x, "error"); 66 | ei_x_encode_atom(&x, msg); 67 | driver_output(port, x.buff, x.index); 68 | ei_x_free(&x); 69 | } 70 | 71 | static ErlDrvEntry ulimit_driver_entry = { 72 | NULL, /* init */ 73 | init, 74 | stop, 75 | output, /* output */ 76 | NULL, /* ready_input */ 77 | NULL, /* ready_output */ 78 | "ulimit_drv", /* the name of the driver */ 79 | NULL, /* finish */ 80 | NULL, /* handle */ 81 | NULL, /* control */ 82 | NULL, /* timeout */ 83 | NULL, /* outputv */ 84 | NULL, /* ready_async */ 85 | NULL, /* flush */ 86 | NULL, /* call */ 87 | NULL /* event */ 88 | }; 89 | 90 | DRIVER_INIT(ulimit_driver) { 91 | return &ulimit_driver_entry; 92 | } 93 | 94 | #endif -------------------------------------------------------------------------------- /conf/default.conf: -------------------------------------------------------------------------------- 1 | %% rotating log and minimal tty 2 | [{sasl, [ 3 | {sasl_error_logger, false}, 4 | {errlog_type, error}, 5 | {error_logger_mf_dir,"/p/log/dynomite"}, 6 | {error_logger_mf_maxbytes,52428800}, % 50 MB 7 | {error_logger_mf_maxfiles, 10} 8 | ]}]. 9 | -------------------------------------------------------------------------------- /conf/default.config: -------------------------------------------------------------------------------- 1 | %% rotating log and minimal tty 2 | [{sasl, [ 3 | {sasl_error_logger, false}, 4 | {errlog_type, error}, 5 | {error_logger_mf_dir,"/p/log/dynomite"}, 6 | {error_logger_mf_maxbytes,52428800}, % 50 MB 7 | {error_logger_mf_maxfiles, 10} 8 | ]}, 9 | {dynomite, [{web_port, 8081}]}]. 10 | -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "blocksize": 4096, 3 | "text_port": 11222, 4 | "thrift_port": 9200, 5 | "web_port": 8080, 6 | "buffered_writes": true, 7 | "directory": "/tmp/data", 8 | "storage_mod": "fs_storage", 9 | "n": 1, 10 | "r": 1, 11 | "w": 1, 12 | "q": 6, 13 | "cache": false, 14 | "cache_size": 128000 15 | } -------------------------------------------------------------------------------- /deps/thrift/COPYING: -------------------------------------------------------------------------------- 1 | Thrift Software License 2 | Copyright (c) 2006- Facebook, Inc. 3 | 4 | Permission is hereby granted, free of charge, to any person or organization 5 | obtaining a copy of the software and accompanying documentation covered by 6 | this license (the "Software") to use, reproduce, display, distribute, 7 | execute, and transmit the Software, and to prepare derivative works of the 8 | Software, and to permit third-parties to whom the Software is furnished to 9 | do so, all subject to the following: 10 | 11 | The copyright notices in the Software and this entire statement, including 12 | the above license grant, this restriction and the following disclaimer, 13 | must be included in all copies of the Software, in whole or in part, and 14 | all derivative works of the Software, unless such copies or derivative 15 | works are solely in the form of machine-executable object code generated by 16 | a source language processor. 17 | 18 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT 21 | SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE 22 | FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, 23 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 | DEALINGS IN THE SOFTWARE. 25 | -------------------------------------------------------------------------------- /deps/thrift/LICENSE: -------------------------------------------------------------------------------- 1 | Thrift Software License 2 | Copyright (c) 2006- Facebook, Inc. 3 | 4 | Permission is hereby granted, free of charge, to any person or organization 5 | obtaining a copy of the software and accompanying documentation covered by 6 | this license (the "Software") to use, reproduce, display, distribute, 7 | execute, and transmit the Software, and to prepare derivative works of the 8 | Software, and to permit third-parties to whom the Software is furnished to 9 | do so, all subject to the following: 10 | 11 | The copyright notices in the Software and this entire statement, including 12 | the above license grant, this restriction and the following disclaimer, 13 | must be included in all copies of the Software, in whole or in part, and 14 | all derivative works of the Software, unless such copies or derivative 15 | works are solely in the form of machine-executable object code generated by 16 | a source language processor. 17 | 18 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT 21 | SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE 22 | FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, 23 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 | DEALINGS IN THE SOFTWARE. 25 | -------------------------------------------------------------------------------- /deps/thrift/Makefile: -------------------------------------------------------------------------------- 1 | MODULES = \ 2 | src 3 | 4 | all clean docs: 5 | for dir in $(MODULES); do \ 6 | (cd $$dir; ${MAKE} $@); \ 7 | done 8 | 9 | install: all 10 | echo 'No install target, sorry.' 11 | 12 | check: all 13 | 14 | distclean: clean 15 | 16 | # Hack to make "make dist" work. 17 | # This should not work, but it appears to. 18 | distdir: 19 | -------------------------------------------------------------------------------- /deps/thrift/README: -------------------------------------------------------------------------------- 1 | Example session using thrift_client: 2 | 3 | 118> f(), {ok, C} = thrift_client:start_link("localhost", 9090, thriftTest_thrif 4 | t). 5 | {ok,<0.271.0>} 6 | 119> thrift_client:call(C, testVoid, []). 7 | {ok,ok} 8 | 120> thrift_client:call(C, testVoid, [asdf]). 9 | {error,{bad_args,testVoid,[asdf]}} 10 | 121> thrift_client:call(C, testI32, [123]). 11 | {ok,123} 12 | 122> thrift_client:call(C, testAsync, [1]). 13 | {ok,ok} 14 | 123> catch thrift_client:call(C, testXception, ["foo"]). 15 | {error,{no_function,testXception}} 16 | 124> catch thrift_client:call(C, testException, ["foo"]). 17 | {ok,ok} 18 | 125> catch thrift_client:call(C, testException, ["Xception"]). 19 | {xception,1001,"This is an Xception"} 20 | 126> thrift_client:call(C, testException, ["Xception"]). 21 | 22 | =ERROR REPORT==== 24-Feb-2008::23:00:23 === 23 | Error in process <0.269.0> with exit value: {{nocatch,{xception,1001,"This is an 24 | Xception"}},[{thrift_client,call,3},{erl_eval,do_apply,5},{shell,exprs,6},{shel 25 | l,eval_loop,3}]} 26 | 27 | ** exited: {{nocatch,{xception,1001,"This is an Xception"}}, 28 | [{thrift_client,call,3}, 29 | {erl_eval,do_apply,5}, 30 | {shell,exprs,6}, 31 | {shell,eval_loop,3}]} ** 32 | -------------------------------------------------------------------------------- /deps/thrift/build/beamver: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # erlwareSys: otp/build/beamver,v 1.1 2002/02/14 11:45:20 hal Exp $ 4 | 5 | # usage: beamver 6 | # 7 | # if there's a usable -vsn() attribute, print it and exit with status 0 8 | # otherwise, print nothing and exit with status 1 9 | 10 | # From the Erlang shell: 11 | # 12 | # 5> code:which(acca_inets). 13 | # "/home/martin/work/otp/releases//../../acca/ebin/.beam" 14 | # 15 | # 8> beam_lib:version(code:which()). 16 | # {ok,{,['$Id: beamver,v 1.1.1.1 2003/06/13 21:43:21 mlogan Exp $ ']}} 17 | 18 | # TMPFILE looks like this: 19 | # 20 | # io:format("hello ~p~n", 21 | # beam_lib:version("/home/hal/work/otp/acca/ebin/acca_inets.beam")]). 22 | 23 | TMPFILE=/tmp/beamver.$$ 24 | 25 | # exit with failure if we can't read the file 26 | test -f "$1" || exit 1 27 | BEAMFILE=\"$1\" 28 | 29 | cat > $TMPFILE <<_EOF 30 | io:format("~p~n", 31 | [beam_lib:version($BEAMFILE)]). 32 | _EOF 33 | 34 | # beam_result is {ok,{Module_name, Beam_version} or {error,beam_lib,{Reason}} 35 | beam_result=`erl -noshell \ 36 | -s file eval $TMPFILE \ 37 | -s erlang halt` 38 | 39 | rm -f $TMPFILE 40 | 41 | # sed regexes: 42 | # remove brackets and anything outside them 43 | # remove quotes and anything outside them 44 | # remove apostrophes and anything outside them 45 | # remove leading and trailing spaces 46 | 47 | case $beam_result in 48 | \{ok*) 49 | echo $beam_result | sed -e 's/.*\[\(.*\)].*/\1/' \ 50 | -e 's/.*\"\(.*\)\".*/\1/' \ 51 | -e "s/.*\'\(.*\)\'.*/\1/" \ 52 | -e 's/ *$//' -e 's/^ *//' 53 | exit 0 54 | ;; 55 | *) 56 | exit 1 57 | ;; 58 | esac 59 | 60 | -------------------------------------------------------------------------------- /deps/thrift/build/buildtargets.mk: -------------------------------------------------------------------------------- 1 | EBIN ?= ../ebin 2 | ESRC ?= . 3 | EMULATOR = beam 4 | 5 | ERLC_WFLAGS = -W 6 | ERLC = erlc $(ERLC_WFLAGS) $(ERLC_FLAGS) 7 | ERL = erl -boot start_clean 8 | 9 | $(EBIN)/%.beam: $(ESRC)/%.erl 10 | $(ERLC) $(ERL_FLAGS) $(ERL_COMPILE_FLAGS) -o$(EBIN) $< 11 | 12 | .erl.beam: 13 | $(ERLC) $(ERL_FLAGS) $(ERL_COMPILE_FLAGS) -o$(dir $@) $< 14 | 15 | -------------------------------------------------------------------------------- /deps/thrift/build/colors.mk: -------------------------------------------------------------------------------- 1 | # Colors to assist visual inspection of make output. 2 | 3 | # Colors 4 | LGRAY=$$'\e[0;37m' 5 | DGRAY=$$'\e[1;30m' 6 | LGREEN=$$'\e[1;32m' 7 | LBLUE=$$'\e[1;34m' 8 | LCYAN=$$'\e[1;36m' 9 | LPURPLE=$$'\e[1;35m' 10 | LRED=$$'\e[1;31m' 11 | NO_COLOR=$$'\e[0m' 12 | DEFAULT=$$'\e[0m' 13 | BLACK=$$'\e[0;30m' 14 | BLUE=$$'\e[0;34m' 15 | GREEN=$$'\e[0;32m' 16 | CYAN=$$'\e[0;36m' 17 | RED=$$'\e[0;31m' 18 | PURPLE=$$'\e[0;35m' 19 | BROWN=$$'\e[0;33m' 20 | YELLOW=$$'\e[1;33m' 21 | WHITE=$$'\e[1;37m' 22 | 23 | BOLD=$$'\e[1;37m' 24 | OFF=$$'\e[0m' 25 | -------------------------------------------------------------------------------- /deps/thrift/build/docs.mk: -------------------------------------------------------------------------------- 1 | EDOC_PATH=../../../tools/utilities 2 | 3 | #single place to include docs from. 4 | docs: 5 | @mkdir -p ../doc 6 | @echo -n $${MY_BLUE:-$(BLUE)}; \ 7 | $(EDOC_PATH)/edoc $(APP_NAME); \ 8 | if [ $$? -eq 0 ]; then \ 9 | echo $${MY_LRED:-$(LRED)}"$$d Doc Failed"; \ 10 | fi; \ 11 | echo -n $(OFF)$(NO_COLOR) 12 | 13 | -------------------------------------------------------------------------------- /deps/thrift/build/mime.types: -------------------------------------------------------------------------------- 1 | 2 | application/activemessage 3 | application/andrew-inset 4 | application/applefile 5 | application/atomicmail 6 | application/dca-rft 7 | application/dec-dx 8 | application/mac-binhex40 hqx 9 | application/mac-compactpro cpt 10 | application/macwriteii 11 | application/msword doc 12 | application/news-message-id 13 | application/news-transmission 14 | application/octet-stream bin dms lha lzh exe class 15 | application/oda oda 16 | application/pdf pdf 17 | application/postscript ai eps ps 18 | application/powerpoint ppt 19 | application/remote-printing 20 | application/rtf rtf 21 | application/slate 22 | application/wita 23 | application/wordperfect5.1 24 | application/x-bcpio bcpio 25 | application/x-cdlink vcd 26 | application/x-compress Z 27 | application/x-cpio cpio 28 | application/x-csh csh 29 | application/x-director dcr dir dxr 30 | application/x-dvi dvi 31 | application/x-gtar gtar 32 | application/x-gzip gz 33 | application/x-hdf hdf 34 | application/x-httpd-cgi cgi 35 | application/x-koan skp skd skt skm 36 | application/x-latex latex 37 | application/x-mif mif 38 | application/x-netcdf nc cdf 39 | application/x-sh sh 40 | application/x-shar shar 41 | application/x-stuffit sit 42 | application/x-sv4cpio sv4cpio 43 | application/x-sv4crc sv4crc 44 | application/x-tar tar 45 | application/x-tcl tcl 46 | application/x-tex tex 47 | application/x-texinfo texinfo texi 48 | application/x-troff t tr roff 49 | application/x-troff-man man 50 | application/x-troff-me me 51 | application/x-troff-ms ms 52 | application/x-ustar ustar 53 | application/x-wais-source src 54 | application/zip zip 55 | audio/basic au snd 56 | audio/mpeg mpga mp2 57 | audio/x-aiff aif aiff aifc 58 | audio/x-pn-realaudio ram 59 | audio/x-pn-realaudio-plugin rpm 60 | audio/x-realaudio ra 61 | audio/x-wav wav 62 | chemical/x-pdb pdb xyz 63 | image/gif gif 64 | image/ief ief 65 | image/jpeg jpeg jpg jpe 66 | image/png png 67 | image/tiff tiff tif 68 | image/x-cmu-raster ras 69 | image/x-portable-anymap pnm 70 | image/x-portable-bitmap pbm 71 | image/x-portable-graymap pgm 72 | image/x-portable-pixmap ppm 73 | image/x-rgb rgb 74 | image/x-xbitmap xbm 75 | image/x-xpixmap xpm 76 | image/x-xwindowdump xwd 77 | message/external-body 78 | message/news 79 | message/partial 80 | message/rfc822 81 | multipart/alternative 82 | multipart/appledouble 83 | multipart/digest 84 | multipart/mixed 85 | multipart/parallel 86 | text/html html htm 87 | text/x-server-parsed-html shtml 88 | text/plain txt 89 | text/richtext rtx 90 | text/tab-separated-values tsv 91 | text/x-setext etx 92 | text/x-sgml sgml sgm 93 | video/mpeg mpeg mpg mpe 94 | video/quicktime qt mov 95 | video/x-msvideo avi 96 | video/x-sgi-movie movie 97 | x-conference/x-cooltalk ice 98 | x-world/x-vrml wrl vrml 99 | -------------------------------------------------------------------------------- /deps/thrift/build/otp_subdir.mk: -------------------------------------------------------------------------------- 1 | # Comment by tfee 2004-07-01 2 | # ========================== 3 | # This file is a mod of the stock OTP one. 4 | # The change allows make to stop when a compile error occurs. 5 | # This file needs to go into two places: 6 | # /usr/local/include/erlang 7 | # /opt/OTP_SRC/make 8 | # 9 | # where OTP_SRC is a symbolic link to a peer directory containing 10 | # the otp source, e.g. otp_src_R9C-2. 11 | # 12 | # After installing OTP, running sudo make install in otp/build 13 | # will push this file out to the two places listed above. 14 | # 15 | # The mod involves setting the shell variable $short_circuit, which we 16 | # introduce - ie it is not in the stock file. This variable is tested 17 | # to affect execution flow and is also returned to affect the flow in 18 | # the calling script (this one). The latter step is necessary because 19 | # of the recursion involved. 20 | # ===================================================================== 21 | 22 | 23 | # ``The contents of this file are subject to the Erlang Public License, 24 | # Version 1.1, (the "License"); you may not use this file except in 25 | # compliance with the License. You should have received a copy of the 26 | # Erlang Public License along with this software. If not, it can be 27 | # retrieved via the world wide web at http://www.erlang.org/. 28 | # 29 | # Software distributed under the License is distributed on an "AS IS" 30 | # basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See 31 | # the License for the specific language governing rights and limitations 32 | # under the License. 33 | # 34 | # The Initial Developer of the Original Code is Ericsson Utvecklings AB. 35 | # Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings 36 | # AB. All Rights Reserved.'' 37 | # 38 | # $Id: otp_subdir.mk,v 1.5 2004/07/12 15:12:23 jeinhorn Exp $ 39 | # 40 | # 41 | # Make include file for otp 42 | 43 | .PHONY: debug opt release docs release_docs tests release_tests \ 44 | clean depend 45 | 46 | # 47 | # Targets that don't affect documentation directories 48 | # 49 | debug opt release docs release_docs tests release_tests clean depend: prepare 50 | @set -e ; \ 51 | app_pwd=`pwd` ; \ 52 | if test -f vsn.mk; then \ 53 | echo "=== Entering application" `basename $$app_pwd` ; \ 54 | fi ; \ 55 | case "$(MAKE)" in *clearmake*) tflag="-T";; *) tflag="";; esac; \ 56 | short_circuit=0 ; \ 57 | for d in $(SUB_DIRECTORIES); do \ 58 | if [[ $$short_circuit = 0 ]]; then \ 59 | if test -f $$d/SKIP ; then \ 60 | echo "=== Skipping subdir $$d, reason:" ; \ 61 | cat $$d/SKIP ; \ 62 | echo "===" ; \ 63 | else \ 64 | if test ! -d $$d ; then \ 65 | echo "=== Skipping subdir $$d, it is missing" ; \ 66 | else \ 67 | xflag="" ; \ 68 | if test -f $$d/ignore_config_record.inf; then \ 69 | xflag=$$tflag ; \ 70 | fi ; \ 71 | (cd $$d && $(MAKE) $$xflag $@) ; \ 72 | if [[ $$? != 0 ]]; then \ 73 | short_circuit=1 ; \ 74 | fi ; \ 75 | fi ; \ 76 | fi ; \ 77 | fi ; \ 78 | done ; \ 79 | if test -f vsn.mk; then \ 80 | echo "=== Leaving application" `basename $$app_pwd` ; \ 81 | fi ; \ 82 | exit $$short_circuit 83 | 84 | prepare: 85 | echo 86 | -------------------------------------------------------------------------------- /deps/thrift/build/raw_test.mk: -------------------------------------------------------------------------------- 1 | # for testing erlang files directly. The set up for a 2 | # this type of test would be 3 | # files to test reside in lib//src and the test files which are 4 | # just plain erlang code reside in lib//test 5 | # 6 | # This color codes emitted while the tests run assume that you are using 7 | # a white-on-black display schema. If not, e.g. if you use a white 8 | # background, you will not be able to read the "WHITE" text. 9 | # You can override this by supplying your own "white" color, 10 | # which may in fact be black! You do this by defining an environment 11 | # variable named "MY_WHITE" and setting it to $'\e[0;30m' (which is 12 | # simply bash's way of specifying "Escape [ 0 ; 3 0 m"). 13 | # Similarly, you can set your versions of the standard colors 14 | # found in colors.mk. 15 | 16 | test: 17 | @TEST_MODULES=`ls *_test.erl`; \ 18 | trap "echo $(OFF)$(NO_COLOR); exit 1;" 1 2 3 6; \ 19 | for d in $$TEST_MODULES; do \ 20 | echo $${MY_GREEN:-$(GREEN)}"Testing File $$d" $${MY_WHITE:-$(WHITE)}; \ 21 | echo -n $${MY_BLUE:-$(BLUE)}; \ 22 | erl -name $(APP_NAME) $(TEST_LIBS) \ 23 | -s `basename $$d .erl` all -s init stop -noshell; \ 24 | if [ $$? -ne 0 ]; then \ 25 | echo $${MY_LRED:-$(LRED)}"$$d Test Failed"; \ 26 | fi; \ 27 | echo -n $(OFF)$(NO_COLOR); \ 28 | done 29 | 30 | -------------------------------------------------------------------------------- /deps/thrift/include/thrift_constants.hrl: -------------------------------------------------------------------------------- 1 | %%% Copyright (c) 2007- Facebook 2 | %%% Distributed under the Thrift Software License 3 | %%% 4 | %%% See accompanying file LICENSE or visit the Thrift site at: 5 | %%% http://developers.facebook.com/thrift/ 6 | 7 | %% TType 8 | -define(tType_STOP, 0). 9 | -define(tType_VOID, 1). 10 | -define(tType_BOOL, 2). 11 | -define(tType_BYTE, 3). 12 | -define(tType_DOUBLE, 4). 13 | -define(tType_I16, 6). 14 | -define(tType_I32, 8). 15 | -define(tType_I64, 10). 16 | -define(tType_STRING, 11). 17 | -define(tType_STRUCT, 12). 18 | -define(tType_MAP, 13). 19 | -define(tType_SET, 14). 20 | -define(tType_LIST, 15). 21 | 22 | % TMessageType 23 | -define(tMessageType_CALL, 1). 24 | -define(tMessageType_REPLY, 2). 25 | -define(tMessageType_EXCEPTION, 3). 26 | 27 | % TApplicationException 28 | -define(TApplicationException_Structure, 29 | {struct, [{1, string}, 30 | {2, i32}]}). 31 | 32 | -record('TApplicationException', {message, type}). 33 | 34 | -define(TApplicationException_UNKNOWN, 0). 35 | -define(TApplicationException_UNKNOWN_METHOD, 1). 36 | -define(TApplicationException_INVALID_MESSAGE_TYPE, 2). 37 | -define(TApplicationException_WRONG_METHOD_NAME, 3). 38 | -define(TApplicationException_BAD_SEQUENCE_ID, 4). 39 | -define(TApplicationException_MISSING_RESULT, 5). 40 | 41 | -------------------------------------------------------------------------------- /deps/thrift/include/thrift_protocol.hrl: -------------------------------------------------------------------------------- 1 | -ifndef(THRIFT_PROTOCOL_INCLUDED). 2 | -define(THRIFT_PROTOCOL_INCLUDED, yea). 3 | 4 | -record(protocol_message_begin, {name, type, seqid}). 5 | -record(protocol_struct_begin, {name}). 6 | -record(protocol_field_begin, {name, type, id}). 7 | -record(protocol_map_begin, {ktype, vtype, size}). 8 | -record(protocol_list_begin, {etype, size}). 9 | -record(protocol_set_begin, {etype, size}). 10 | 11 | 12 | -endif. 13 | -------------------------------------------------------------------------------- /deps/thrift/src/Makefile: -------------------------------------------------------------------------------- 1 | # $Id: Makefile,v 1.3 2004/08/13 16:35:59 mlogan Exp $ 2 | # 3 | include ../build/otp.mk 4 | include ../build/colors.mk 5 | include ../build/buildtargets.mk 6 | 7 | # ---------------------------------------------------- 8 | # Application version 9 | # ---------------------------------------------------- 10 | 11 | include ../vsn.mk 12 | APP_NAME=thrift 13 | PFX=thrift 14 | VSN=$(THRIFT_VSN) 15 | 16 | # ---------------------------------------------------- 17 | # Install directory specification 18 | # WARNING: INSTALL_DIR the command to install a directory. 19 | # INSTALL_DST is the target directory 20 | # ---------------------------------------------------- 21 | INSTALL_DST = $(ERLANG_OTP)/lib/$(APP_NAME)-$(VSN) 22 | 23 | # ---------------------------------------------------- 24 | # Target Specs 25 | # ---------------------------------------------------- 26 | 27 | 28 | MODULES = $(shell find . -name \*.erl | sed 's:^\./::' | sed 's/\.erl//') 29 | MODULES_STRING_LIST = $(shell find . -name \*.erl | sed "s:^\./:':" | sed "s/\.erl/',/") 30 | 31 | HRL_FILES= 32 | INTERNAL_HRL_FILES= $(APP_NAME).hrl 33 | ERL_FILES= $(MODULES:%=%.erl) 34 | DOC_FILES=$(ERL_FILES) 35 | 36 | APP_FILE= $(APP_NAME).app 37 | APPUP_FILE= $(APP_NAME).appup 38 | 39 | APP_SRC= $(APP_FILE).src 40 | APPUP_SRC= $(APPUP_FILE).src 41 | 42 | APP_TARGET= $(EBIN)/$(APP_FILE) 43 | APPUP_TARGET= $(EBIN)/$(APPUP_FILE) 44 | 45 | BEAMS= $(MODULES:%=$(EBIN)/%.$(EMULATOR)) 46 | TARGET_FILES= $(BEAMS) $(APP_TARGET) $(APPUP_TARGET) 47 | 48 | WEB_TARGET=/var/yaws/www/$(APP_NAME) 49 | 50 | # ---------------------------------------------------- 51 | # FLAGS 52 | # ---------------------------------------------------- 53 | 54 | ERL_FLAGS += 55 | ERL_INCLUDE = -I../include -I../../fslib/include -I../../system_status/include 56 | ERL_COMPILE_FLAGS += $(ERL_INCLUDE) +native 57 | 58 | # ---------------------------------------------------- 59 | # Targets 60 | # ---------------------------------------------------- 61 | 62 | all debug opt: $(EBIN) $(TARGET_FILES) 63 | 64 | #$(EBIN)/rm_logger.beam: $(APP_NAME).hrl 65 | include ../build/docs.mk 66 | 67 | # Note: In the open-source build clean must not destroy the preloaded 68 | # beam files. 69 | clean: 70 | rm -f $(TARGET_FILES) 71 | rm -f *~ 72 | rm -f core 73 | rm -rf $(EBIN) 74 | rm -rf *html 75 | 76 | $(EBIN): 77 | mkdir $(EBIN) 78 | 79 | dialyzer: $(TARGET_FILES) 80 | dialyzer --src -r . $(ERL_INCLUDE) 81 | 82 | # ---------------------------------------------------- 83 | # Special Build Targets 84 | # ---------------------------------------------------- 85 | 86 | $(APP_TARGET): $(APP_SRC) ../vsn.mk $(BEAMS) 87 | sed -e 's;%VSN%;$(VSN);' \ 88 | -e 's;%PFX%;$(PFX);' \ 89 | -e 's;%APP_NAME%;$(APP_NAME);' \ 90 | -e 's;%MODULES%;%MODULES%$(MODULES_STRING_LIST);' \ 91 | $< > $<".tmp" 92 | sed -e 's/%MODULES%\(.*\),/\1/' \ 93 | $<".tmp" > $@ 94 | rm $<".tmp" 95 | 96 | $(APPUP_TARGET): $(APPUP_SRC) ../vsn.mk 97 | sed -e 's;%VSN%;$(VSN);' $< > $@ 98 | 99 | $(WEB_TARGET): ../markup/* 100 | rm -rf $(WEB_TARGET) 101 | mkdir $(WEB_TARGET) 102 | cp -r ../markup/ $(WEB_TARGET) 103 | cp -r ../skins/ $(WEB_TARGET) 104 | 105 | # ---------------------------------------------------- 106 | # Install Target 107 | # ---------------------------------------------------- 108 | 109 | install: all $(WEB_TARGET) 110 | # $(INSTALL_DIR) $(INSTALL_DST)/src 111 | # $(INSTALL_DATA) $(ERL_FILES) $(INSTALL_DST)/src 112 | # $(INSTALL_DATA) $(INTERNAL_HRL_FILES) $(INSTALL_DST)/src 113 | # $(INSTALL_DIR) $(INSTALL_DST)/include 114 | # $(INSTALL_DATA) $(HRL_FILES) $(INSTALL_DST)/include 115 | # $(INSTALL_DIR) $(INSTALL_DST)/ebin 116 | # $(INSTALL_DATA) $(TARGET_FILES) $(INSTALL_DST)/ebin 117 | -------------------------------------------------------------------------------- /deps/thrift/src/test_handler.erl: -------------------------------------------------------------------------------- 1 | -module(test_handler). 2 | 3 | -export([handle_function/2]). 4 | 5 | handle_function(add, Params = {A, B}) -> 6 | io:format("Got params: ~p~n", [Params]), 7 | {reply, A + B}. 8 | -------------------------------------------------------------------------------- /deps/thrift/src/test_service.erl: -------------------------------------------------------------------------------- 1 | -module(test_service). 2 | % 3 | % Test service definition 4 | 5 | -export([function_info/2]). 6 | 7 | function_info(add, params_type) -> 8 | {struct, [{1, i32}, 9 | {2, i32}]}; 10 | function_info(add, reply_type) -> i32. 11 | -------------------------------------------------------------------------------- /deps/thrift/src/thrift.app.src: -------------------------------------------------------------------------------- 1 | %%% -*- mode:erlang -*- 2 | {application, %APP_NAME%, 3 | [ 4 | % A quick description of the application. 5 | {description, "Thrift bindings"}, 6 | 7 | % The version of the applicaton 8 | {vsn, "%VSN%"}, 9 | 10 | % All modules used by the application. 11 | {modules, [ 12 | %MODULES% 13 | ]}, 14 | 15 | % All of the registered names the application uses. This can be ignored. 16 | {registered, []}, 17 | 18 | % Applications that are to be started prior to this one. This can be ignored 19 | % leave it alone unless you understand it well and let the .rel files in 20 | % your release handle this. 21 | {applications, 22 | [ 23 | kernel, 24 | stdlib 25 | ]}, 26 | 27 | % OTP application loader will load, but not start, included apps. Again 28 | % this can be ignored as well. To load but not start an application it 29 | % is easier to include it in the .rel file followed by the atom 'none' 30 | {included_applications, []}, 31 | 32 | % configuration parameters similar to those in the config file specified 33 | % on the command line. can be fetched with gas:get_env 34 | {env, [ 35 | % If an error/crash occurs during processing of a function, 36 | % should the TApplicationException serialized back to the client 37 | % include the erlang backtrace? 38 | {exceptions_include_traces, true} 39 | ]}, 40 | 41 | % The Module and Args used to start this application. 42 | {mod, {thrift_app, []}} 43 | ] 44 | }. 45 | -------------------------------------------------------------------------------- /deps/thrift/src/thrift.appup.src: -------------------------------------------------------------------------------- 1 | {"%VSN%",[],[]}. 2 | -------------------------------------------------------------------------------- /deps/thrift/src/thrift_app.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: thrift_app.erl 3 | %%% @author Cliff Moon <> [] 4 | %%% @copyright 2009 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2009-04-04 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(thrift_app). 12 | -author(''). 13 | 14 | -behaviour(application). 15 | 16 | %% Application callbacks 17 | -export([start/2, stop/1]). 18 | 19 | %%==================================================================== 20 | %% Application callbacks 21 | %%==================================================================== 22 | %%-------------------------------------------------------------------- 23 | %% @spec start(Type, StartArgs) -> {ok, Pid} | 24 | %% {ok, Pid, State} | 25 | %% {error, Reason} 26 | %% @doc This function is called whenever an application 27 | %% is started using application:start/1,2, and should start the processes 28 | %% of the application. If the application is structured according to the 29 | %% OTP design principles as a supervision tree, this means starting the 30 | %% top supervisor of the tree. 31 | %% @end 32 | %%-------------------------------------------------------------------- 33 | start(_Type, StartArgs) -> 34 | case thrift_sup:start_link() of 35 | {ok, Pid} -> 36 | {ok, Pid}; 37 | Error -> 38 | Error 39 | end. 40 | 41 | %%-------------------------------------------------------------------- 42 | %% @spec stop(State) -> void() 43 | %% @doc This function is called whenever an application 44 | %% has stopped. It is intended to be the opposite of Module:start/2 and 45 | %% should do any necessary cleaning up. The return value is ignored. 46 | %% @end 47 | %%-------------------------------------------------------------------- 48 | stop(_State) -> 49 | ok. 50 | 51 | %%==================================================================== 52 | %% Internal functions 53 | %%==================================================================== 54 | 55 | 56 | -------------------------------------------------------------------------------- /deps/thrift/src/thrift_base64_transport.erl: -------------------------------------------------------------------------------- 1 | -module(thrift_base64_transport). 2 | 3 | -behaviour(thrift_transport). 4 | 5 | %% API 6 | -export([new/1, new_transport_factory/1]). 7 | 8 | %% thrift_transport callbacks 9 | -export([write/2, read/2, flush/1, close/1]). 10 | 11 | %% State 12 | -record(b64_transport, {wrapped}). 13 | 14 | new(Wrapped) -> 15 | State = #b64_transport{wrapped = Wrapped}, 16 | thrift_transport:new(?MODULE, State). 17 | 18 | 19 | write(#b64_transport{wrapped = Wrapped}, Data) -> 20 | thrift_transport:write(Wrapped, base64:encode(iolist_to_binary(Data))). 21 | 22 | 23 | %% base64 doesn't support reading quite yet since it would involve 24 | %% nasty buffering and such 25 | read(#b64_transport{wrapped = Wrapped}, Data) -> 26 | {error, no_reads_allowed}. 27 | 28 | 29 | flush(#b64_transport{wrapped = Wrapped}) -> 30 | thrift_transport:write(Wrapped, <<"\n">>), 31 | thrift_transport:flush(Wrapped). 32 | 33 | 34 | close(Me = #b64_transport{wrapped = Wrapped}) -> 35 | flush(Me), 36 | thrift_transport:close(Wrapped). 37 | 38 | 39 | %%%% FACTORY GENERATION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 40 | new_transport_factory(WrapFactory) -> 41 | F = fun() -> 42 | {ok, Wrapped} = WrapFactory(), 43 | new(Wrapped) 44 | end, 45 | {ok, F}. 46 | -------------------------------------------------------------------------------- /deps/thrift/src/thrift_disk_log_transport.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : thrift_disk_log_transport.erl 3 | %%% Author : Todd Lipcon 4 | %%% Description : Write-only Thrift transport outputting to disk_log 5 | %%% Created : 22 Apr 2008 by Todd Lipcon 6 | %%% 7 | %%% Todo: this might be better off as a gen_server type of transport 8 | %%% that handles stuff like group commit, similar to TFileTransport 9 | %%% in cpp land 10 | %%%------------------------------------------------------------------- 11 | -module(thrift_disk_log_transport). 12 | 13 | -behaviour(thrift_transport). 14 | 15 | %% API 16 | -export([new/2, new_transport_factory/2, new_transport_factory/3]). 17 | 18 | %% thrift_transport callbacks 19 | -export([read/2, write/2, force_flush/1, flush/1, close/1]). 20 | 21 | %% state 22 | -record(dl_transport, {log, 23 | close_on_close = false, 24 | sync_every = infinity, 25 | sync_tref}). 26 | 27 | 28 | %% Create a transport attached to an already open log. 29 | %% If you'd like this transport to close the disk_log using disk_log:lclose() 30 | %% when the transport is closed, pass a {close_on_close, true} tuple in the 31 | %% Opts list. 32 | new(LogName, Opts) when is_atom(LogName), is_list(Opts) -> 33 | State = parse_opts(Opts, #dl_transport{log = LogName}), 34 | 35 | State2 = 36 | case State#dl_transport.sync_every of 37 | N when is_integer(N), N > 0 -> 38 | {ok, TRef} = timer:apply_interval(N, ?MODULE, force_flush, State), 39 | State#dl_transport{sync_tref = TRef}; 40 | _ -> State 41 | end, 42 | 43 | thrift_transport:new(?MODULE, State2). 44 | 45 | 46 | parse_opts([], State) -> 47 | State; 48 | parse_opts([{close_on_close, Bool} | Rest], State) when is_boolean(Bool) -> 49 | State#dl_transport{close_on_close = Bool}; 50 | parse_opts([{sync_every, Int} | Rest], State) when is_integer(Int), Int > 0 -> 51 | State#dl_transport{sync_every = Int}. 52 | 53 | 54 | %%%% TRANSPORT IMPLENTATION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 55 | 56 | %% disk_log_transport is write-only 57 | read(_State, Len) -> 58 | {error, no_read_from_disk_log}. 59 | 60 | write(#dl_transport{log = Log}, Data) -> 61 | disk_log:balog(Log, erlang:iolist_to_binary(Data)). 62 | 63 | force_flush(#dl_transport{log = Log}) -> 64 | error_logger:info_msg("~p syncing~n", [?MODULE]), 65 | disk_log:sync(Log). 66 | 67 | flush(#dl_transport{log = Log, sync_every = SE}) -> 68 | case SE of 69 | undefined -> % no time-based sync 70 | disk_log:sync(Log); 71 | _Else -> % sync will happen automagically 72 | ok 73 | end. 74 | 75 | 76 | %% On close, close the underlying log if we're configured to do so. 77 | close(#dl_transport{close_on_close = false}) -> 78 | ok; 79 | close(#dl_transport{log = Log}) -> 80 | disk_log:lclose(Log). 81 | 82 | 83 | %%%% FACTORY GENERATION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 84 | 85 | new_transport_factory(Name, ExtraLogOpts) -> 86 | new_transport_factory(Name, ExtraLogOpts, [{close_on_close, true}, 87 | {sync_every, 500}]). 88 | 89 | new_transport_factory(Name, ExtraLogOpts, TransportOpts) -> 90 | F = fun() -> factory_impl(Name, ExtraLogOpts, TransportOpts) end, 91 | {ok, F}. 92 | 93 | factory_impl(Name, ExtraLogOpts, TransportOpts) -> 94 | LogOpts = [{name, Name}, 95 | {format, external}, 96 | {type, wrap} | 97 | ExtraLogOpts], 98 | Log = 99 | case disk_log:open(LogOpts) of 100 | {ok, Log} -> 101 | Log; 102 | {repaired, Log, Info1, Info2} -> 103 | error_logger:info_msg("Disk log ~p repaired: ~p, ~p~n", [Log, Info1, Info2]), 104 | Log 105 | end, 106 | new(Log, TransportOpts). 107 | -------------------------------------------------------------------------------- /deps/thrift/src/thrift_file_transport.erl: -------------------------------------------------------------------------------- 1 | -module(thrift_file_transport). 2 | -author(todd@amiestreet.com). 3 | 4 | -behaviour(thrift_transport). 5 | 6 | -export([new_reader/1, 7 | new/1, 8 | new/2, 9 | write/2, read/2, flush/1, close/1]). 10 | 11 | -record(t_file_transport, {device, 12 | should_close = true, 13 | mode = write}). 14 | 15 | %%%% CONSTRUCTION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 16 | 17 | new_reader(Filename) -> 18 | case file:open(Filename, [read, binary, {read_ahead, 1024*1024}]) of 19 | {ok, IODevice} -> 20 | new(IODevice, [{should_close, true}, {mode, read}]); 21 | Error -> Error 22 | end. 23 | 24 | new(Device) -> 25 | new(Device, []). 26 | 27 | %% Device :: io_device() 28 | %% 29 | %% Device should be opened in raw and binary mode. 30 | new(Device, Opts) when is_list(Opts) -> 31 | State = parse_opts(Opts, #t_file_transport{device = Device}), 32 | thrift_transport:new(?MODULE, State). 33 | 34 | 35 | %% Parse options 36 | parse_opts([{should_close, Bool} | Rest], State) when is_boolean(Bool) -> 37 | parse_opts(Rest, State#t_file_transport{should_close = Bool}); 38 | parse_opts([{mode, Mode} | Rest], State) 39 | when Mode =:= write; 40 | Mode =:= read -> 41 | parse_opts(Rest, State#t_file_transport{mode = Mode}); 42 | parse_opts([], State) -> 43 | State. 44 | 45 | 46 | %%%% TRANSPORT IMPL %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 47 | 48 | write(#t_file_transport{device = Device, mode = write}, Data) -> 49 | file:write(Device, Data); 50 | write(_T, _D) -> 51 | {error, read_mode}. 52 | 53 | 54 | read(#t_file_transport{device = Device, mode = read}, Len) 55 | when is_integer(Len), Len >= 0 -> 56 | file:read(Device, Len); 57 | read(_T, _D) -> 58 | {error, read_mode}. 59 | 60 | flush(#t_file_transport{device = Device, mode = write}) -> 61 | file:sync(Device). 62 | 63 | close(#t_file_transport{device = Device, should_close = SC}) -> 64 | case SC of 65 | true -> 66 | file:close(Device); 67 | false -> 68 | ok 69 | end. 70 | -------------------------------------------------------------------------------- /deps/thrift/src/thrift_service.erl: -------------------------------------------------------------------------------- 1 | -module(thrift_service). 2 | 3 | -export([behaviour_info/1]). 4 | 5 | behaviour_info(callbacks) -> 6 | [{function_info, 2}]. 7 | -------------------------------------------------------------------------------- /deps/thrift/src/thrift_socket_transport.erl: -------------------------------------------------------------------------------- 1 | -module(thrift_socket_transport). 2 | 3 | -behaviour(thrift_transport). 4 | 5 | -export([new/1, 6 | new/2, 7 | write/2, read/2, flush/1, close/1, 8 | 9 | new_transport_factory/3]). 10 | 11 | -record(data, {socket, 12 | recv_timeout=infinity}). 13 | 14 | new(Socket) -> 15 | new(Socket, []). 16 | 17 | new(Socket, Opts) when is_list(Opts) -> 18 | State = 19 | case lists:keysearch(recv_timeout, 1, Opts) of 20 | {value, {recv_timeout, Timeout}} 21 | when is_integer(Timeout), Timeout > 0 -> 22 | #data{socket=Socket, recv_timeout=Timeout}; 23 | _ -> 24 | #data{socket=Socket} 25 | end, 26 | thrift_transport:new(?MODULE, State). 27 | 28 | %% Data :: iolist() 29 | write(#data{socket = Socket}, Data) -> 30 | gen_tcp:send(Socket, Data). 31 | 32 | read(#data{socket=Socket, recv_timeout=Timeout}, Len) 33 | when is_integer(Len), Len >= 0 -> 34 | case gen_tcp:recv(Socket, Len, Timeout) of 35 | Err = {error, timeout} -> 36 | error_logger:info_msg("read timeout: peer conn ~p", [inet:peername(Socket)]), 37 | gen_tcp:close(Socket), 38 | Err; 39 | Data -> Data 40 | end. 41 | 42 | %% We can't really flush - everything is flushed when we write 43 | flush(_) -> 44 | ok. 45 | 46 | close(#data{socket = Socket}) -> 47 | gen_tcp:close(Socket). 48 | 49 | 50 | %%%% FACTORY GENERATION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 51 | 52 | 53 | %% The following "local" record is filled in by parse_factory_options/2 54 | %% below. These options can be passed to new_protocol_factory/3 in a 55 | %% proplists-style option list. They're parsed like this so it is an O(n) 56 | %% operation instead of O(n^2) 57 | -record(factory_opts, {connect_timeout = infinity, 58 | sockopts = [], 59 | framed = false}). 60 | 61 | parse_factory_options([], Opts) -> 62 | Opts; 63 | parse_factory_options([{framed, Bool} | Rest], Opts) when is_boolean(Bool) -> 64 | parse_factory_options(Rest, Opts#factory_opts{framed=Bool}); 65 | parse_factory_options([{sockopts, OptList} | Rest], Opts) when is_list(OptList) -> 66 | parse_factory_options(Rest, Opts#factory_opts{sockopts=OptList}); 67 | parse_factory_options([{connect_timeout, TO} | Rest], Opts) when TO =:= infinity; is_integer(TO) -> 68 | parse_factory_options(Rest, Opts#factory_opts{connect_timeout=TO}). 69 | 70 | 71 | %% 72 | %% Generates a "transport factory" function - a fun which returns a thrift_transport() 73 | %% instance. 74 | %% This can be passed into a protocol factory to generate a connection to a 75 | %% thrift server over a socket. 76 | %% 77 | new_transport_factory(Host, Port, Options) -> 78 | ParsedOpts = parse_factory_options(Options, #factory_opts{}), 79 | 80 | F = fun() -> 81 | SockOpts = [binary, 82 | {packet, 0}, 83 | {active, false}, 84 | {nodelay, true} | 85 | ParsedOpts#factory_opts.sockopts], 86 | case catch gen_tcp:connect(Host, Port, SockOpts, 87 | ParsedOpts#factory_opts.connect_timeout) of 88 | {ok, Sock} -> 89 | {ok, Transport} = thrift_socket_transport:new(Sock), 90 | {ok, BufTransport} = 91 | case ParsedOpts#factory_opts.framed of 92 | true -> thrift_framed_transport:new(Transport); 93 | false -> thrift_buffered_transport:new(Transport) 94 | end, 95 | {ok, BufTransport}; 96 | Error -> 97 | Error 98 | end 99 | end, 100 | {ok, F}. 101 | -------------------------------------------------------------------------------- /deps/thrift/src/thrift_sup.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: thrift_sup.erl 3 | %%% @author Cliff Moon <> [] 4 | %%% @copyright 2009 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2009-04-04 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(thrift_sup). 12 | -author(''). 13 | 14 | -behaviour(supervisor). 15 | 16 | %% API 17 | -export([start_link/0]). 18 | 19 | %% Supervisor callbacks 20 | -export([init/1]). 21 | 22 | -define(SERVER, ?MODULE). 23 | 24 | %%==================================================================== 25 | %% API functions 26 | %%==================================================================== 27 | %%-------------------------------------------------------------------- 28 | %% @spec start_link() -> {ok,Pid} | ignore | {error,Error} 29 | %% @doc Starts the supervisor 30 | %% @end 31 | %%-------------------------------------------------------------------- 32 | start_link() -> 33 | supervisor:start_link({local, ?SERVER}, ?MODULE, []). 34 | 35 | %%==================================================================== 36 | %% Supervisor callbacks 37 | %%==================================================================== 38 | %%-------------------------------------------------------------------- 39 | %% @spec init(Args) -> {ok, {SupFlags, [ChildSpec]}} | 40 | %% ignore | 41 | %% {error, Reason} 42 | %% @doc Whenever a supervisor is started using 43 | %% supervisor:start_link/[2,3], this function is called by the new process 44 | %% to find out about restart strategy, maximum restart frequency and child 45 | %% specifications. 46 | %% @end 47 | %%-------------------------------------------------------------------- 48 | init([]) -> 49 | {ok,{{one_for_all,0,1}, []}}. 50 | 51 | %%==================================================================== 52 | %% Internal functions 53 | %%==================================================================== 54 | -------------------------------------------------------------------------------- /deps/thrift/src/thrift_transport.erl: -------------------------------------------------------------------------------- 1 | -module(thrift_transport). 2 | 3 | -export([behaviour_info/1]). 4 | 5 | -export([new/2, 6 | write/2, 7 | read/2, 8 | flush/1, 9 | close/1 10 | ]). 11 | 12 | behaviour_info(callbacks) -> 13 | [{read, 2}, 14 | {write, 2}, 15 | {flush, 1}, 16 | {close, 1} 17 | ]. 18 | 19 | -record(transport, {module, data}). 20 | 21 | new(Module, Data) when is_atom(Module) -> 22 | {ok, #transport{module = Module, 23 | data = Data}}. 24 | 25 | %% Data :: iolist() 26 | write(Transport, Data) -> 27 | Module = Transport#transport.module, 28 | Module:write(Transport#transport.data, Data). 29 | 30 | read(Transport, Len) when is_integer(Len) -> 31 | Module = Transport#transport.module, 32 | Module:read(Transport#transport.data, Len). 33 | 34 | flush(#transport{module = Module, data = Data}) -> 35 | Module:flush(Data). 36 | 37 | close(#transport{module = Module, data = Data}) -> 38 | Module:close(Data). 39 | -------------------------------------------------------------------------------- /deps/thrift/vsn.mk: -------------------------------------------------------------------------------- 1 | THRIFT_VSN=0.1 2 | -------------------------------------------------------------------------------- /dist_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "blocksize": 4096, 3 | "text_port": 11222, 4 | "thrift_port": 9200, 5 | "web_port": 8080, 6 | "buffered_writes": false, 7 | "directory": "/bfd/dyn-int-data", 8 | "storage_mod": "dets_storage", 9 | "n": 1, 10 | "r": 1, 11 | "w": 1, 12 | "q": 0, 13 | "cache": false, 14 | "cache_size": 104857600 15 | } -------------------------------------------------------------------------------- /doc/BLOOM_FORMAT.txt: -------------------------------------------------------------------------------- 1 | BIG ENDIAN 2 | 3 | 4 | VERSION:8/integer, N:64/integer, E:64/float, 5 | M:32/integer, K:32/integer, Keys:64/integer, 6 | Seed:32/integer, reserved:64 bytes, 7 | 8 | BITS 9 | -------------------------------------------------------------------------------- /doc/dynomite.R: -------------------------------------------------------------------------------- 1 | analyze_dynomite <- function(path) { 2 | cols <- c("time", "method", "latency", "key", "host") 3 | files <- list.files(path) 4 | data <- data.frame() 5 | for (file in files) { 6 | d <- read.table(paste(path, file, sep=""), header=FALSE, sep="\t", col.names = cols) 7 | data <- rbind(data, d) 8 | } 9 | print(summary(data)) 10 | print("reqs/s") 11 | print(length(data$method) / (max(data$time) - min(data$time))) 12 | graph_dynomite(data) 13 | } 14 | 15 | graph_dynomite <- function(data) { 16 | hosts <- unique(data$host) 17 | par(mfrow = c(ceiling(length(hosts)/2), 2), mai = c(0.5,0.1,0.1,0)) 18 | for(i in hosts) { 19 | h <- hosts[i] 20 | hostspec <- subset(data, data$host == h) 21 | plot(hostspec$time, hostspec$latency, main = h) 22 | } 23 | } -------------------------------------------------------------------------------- /ebin/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | -------------------------------------------------------------------------------- /ebin/dynomite.app: -------------------------------------------------------------------------------- 1 | {application, dynomite, 2 | [{description, "Dynomite Storage Node"}, 3 | {mod, {dynomite_app, []}}, 4 | {vsn, "?VERSION"}, 5 | {modules, 6 | [ 7 | block_server, bootstrap, commands, configuration, dets_storage, dict_storage, dmerkle, dynomite_rpc, 8 | dmtree, dynomite_pb, dummy_server, dynomite, dynomite_app, dynomite_prof, dynomite_sup, dynomite_thrift_service, dynomite_web, fail_storage, fnv, fs_storage, 9 | lib_misc, mediator, membership, mnesia_storage, murmur, partitions, rate, socket_server, stats_server, storage_manager, 10 | storage_server, storage_server_sup, stream, sync_manager, sync_server, sync_server_sup, tc_storage, ulimit, vector_clock, 11 | web_rpc 12 | ]}, 13 | {registered, []}, 14 | {applications, [kernel, stdlib, sasl, crypto, mochiweb, thrift]} 15 | ]}. -------------------------------------------------------------------------------- /elibs/bloom.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: bloom.erl 3 | %%% @author Cliff Moon [] 4 | %%% @copyright 2009 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2009-04-18 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(bloom). 12 | -author('cliff@powerset.com'). 13 | 14 | %% API 15 | -export([start/3, put/2, has/2, mem_size/1, key_size/1, stop/1]). 16 | 17 | %% COMMANDS 18 | -define(SETUP, $s). 19 | -define(PUT, $p). 20 | -define(HAS, $h). 21 | -define(MEM_SIZE, $m). 22 | -define(KEY_SIZE, $k). 23 | 24 | -ifdef(TEST). 25 | -include("etest/bloom_test.erl"). 26 | -endif. 27 | 28 | %%==================================================================== 29 | %% API 30 | %%==================================================================== 31 | %%-------------------------------------------------------------------- 32 | %% @spec 33 | %% @doc 34 | %% @end 35 | %%-------------------------------------------------------------------- 36 | 37 | start(Filename, N, E) -> 38 | case load_driver() of 39 | ok -> 40 | P = open_port({spawn, 'bloom_drv'}, [binary]), 41 | port_command(P, [?SETUP, term_to_binary({Filename, N, E})]), 42 | {ok, {bloom, P}}; 43 | {error, Err} -> 44 | Msg = erl_ddll:format_error(Err), 45 | {error, Msg} 46 | end. 47 | 48 | put({bloom, P}, Key) -> 49 | port_command(P, [?PUT, Key]). 50 | 51 | has({bloom, P}, Key) -> 52 | port_command(P, [?HAS, Key]), 53 | receive 54 | {P, {data,Bin}} -> binary_to_term(Bin) 55 | end. 56 | 57 | mem_size({bloom, P}) -> 58 | port_command(P, [?MEM_SIZE]), 59 | receive 60 | {P, {data,Bin}} -> binary_to_term(Bin) 61 | end. 62 | 63 | key_size({bloom, P}) -> 64 | port_command(P, [?KEY_SIZE]), 65 | receive 66 | {P, {data,Bin}} -> binary_to_term(Bin) 67 | end. 68 | 69 | stop({bloom, P}) -> 70 | unlink(P), 71 | port_close(P). 72 | 73 | %%==================================================================== 74 | %% Internal functions 75 | %%==================================================================== 76 | 77 | load_driver() -> 78 | Dir = filename:join([filename:dirname(code:which(bloom)), "..", "priv"]), 79 | erl_ddll:load(Dir, "bloom_drv"). 80 | 81 | -------------------------------------------------------------------------------- /elibs/commands.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: commands.erl 3 | %%% @author Cliff Moon [] 4 | %%% @copyright 2008 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2008-07-27 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(commands). 12 | -author('cliff@powerset.com'). 13 | 14 | %% API 15 | -export([start/0]). 16 | 17 | %%==================================================================== 18 | %% API 19 | %%==================================================================== 20 | %%-------------------------------------------------------------------- 21 | %% @spec 22 | %% @doc 23 | %% @end 24 | %%-------------------------------------------------------------------- 25 | 26 | start() -> 27 | {Node, Module, Function, Arguments} = process_arguments([node, m, f, a]), 28 | Result = rpc:call(Node, Module, Function, Arguments), 29 | io:format("~p~n", [Result]), 30 | timer:sleep(10). 31 | 32 | process_arguments(Args) -> 33 | process_arguments([], Args). 34 | 35 | process_arguments(Res, []) -> 36 | list_to_tuple(lists:reverse(Res)); 37 | 38 | process_arguments(Res, [Arg|Args]) -> 39 | case init:get_argument(Arg) of 40 | {ok, Lists} -> 41 | ArgList = lists:flatten(lists:map(fun atomize/1, Lists)), 42 | if 43 | length(ArgList) == 1 -> 44 | [A] = ArgList, 45 | process_arguments([A|Res], Args); 46 | true -> 47 | process_arguments([ArgList|Res], Args) 48 | end; 49 | error -> 50 | process_arguments([[]|Res], Args) 51 | end. 52 | 53 | atomize([E|L]) when is_list(E) -> 54 | lists:map(fun atomize/1, [E|L]); 55 | 56 | atomize(L) -> list_to_atom(L). 57 | -------------------------------------------------------------------------------- /elibs/dets_storage.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: dets_storage.erl 3 | %%% @author Cliff Moon <> [] 4 | %%% @copyright 2008 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2008-11-15 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(dets_storage). 12 | -author('cliff@powerset.com'). 13 | 14 | %% API 15 | -export([open/2, close/1, get/2, put/4, has_key/2, delete/2, fold/3]). 16 | 17 | -record(row, {key, context, values}). 18 | 19 | %%==================================================================== 20 | %% API 21 | %%==================================================================== 22 | %%-------------------------------------------------------------------- 23 | %% @spec 24 | %% @doc 25 | %% @end 26 | %%-------------------------------------------------------------------- 27 | 28 | open(Directory, Name) -> 29 | ok = filelib:ensure_dir(Directory ++ "/"), 30 | TableName = list_to_atom(lists:concat([Name, '/', node()])), 31 | dets:open_file(TableName, [{file, lists:concat([Directory, "/storage.dets"])}, {keypos, 2}]). 32 | 33 | close(Table) -> dets:close(Table). 34 | 35 | fold(Fun, Table, AccIn) when is_function(Fun) -> 36 | dets:foldl(fun(#row{key=Key,context=Context,values=Values}, Acc) -> 37 | Fun({Key, Context, Values}, Acc) 38 | end, AccIn, Table). 39 | 40 | put(Key, Context, Values, Table) -> 41 | case dets:insert(Table, [#row{key=Key,context=Context,values=Values}]) of 42 | ok -> {ok, Table}; 43 | Failure -> Failure 44 | end. 45 | 46 | get(Key, Table) -> 47 | case dets:lookup(Table, Key) of 48 | [] -> {ok, not_found}; 49 | [#row{context=Context,values=Values}] -> {ok, {Context, Values}} 50 | end. 51 | 52 | has_key(Key, Table) -> 53 | case dets:member(Table, Key) of 54 | true -> {ok, true}; 55 | false -> {ok, false}; 56 | Failure -> Failure 57 | end. 58 | 59 | delete(Key, Table) -> 60 | case dets:delete(Table, Key) of 61 | ok -> {ok, Table}; 62 | Failure -> Failure 63 | end. 64 | 65 | %%==================================================================== 66 | %% Internal functions 67 | %%==================================================================== 68 | 69 | -------------------------------------------------------------------------------- /elibs/dict_storage.erl: -------------------------------------------------------------------------------- 1 | -module (dict_storage). 2 | -export ([open/2, close/1, get/2, put/4, has_key/2, fold/3, delete/2, info/1]). 3 | 4 | % we ignore the name, since it can't really help us. 5 | open(_, _) -> {ok, dict:new()}. 6 | 7 | % noop 8 | close(_Table) -> ok. 9 | 10 | info(Table) -> dict:fetch_keys(Table). 11 | 12 | fold(Fun, Table, AccIn) when is_function(Fun) -> 13 | dict:fold(fun(Key, {Context, [Value]}, Acc) -> 14 | Fun({Key, Context, Value}, Acc) 15 | end, AccIn, Table). 16 | 17 | put(Key, Context, Value, Table) -> 18 | ToPut = if 19 | is_list(Value) -> Value; 20 | true -> [Value] 21 | end, 22 | {ok, dict:store(Key, {Context,ToPut}, Table)}. 23 | 24 | get(Key, Table) -> 25 | case dict:find(Key, Table) of 26 | {ok, Value} -> {ok, Value}; 27 | _ -> {ok, not_found} 28 | end. 29 | 30 | has_key(Key, Table) -> 31 | {ok, dict:is_key(Key, Table)}. 32 | 33 | delete(Key, Table) -> 34 | {ok, dict:erase(Key, Table)}. 35 | -------------------------------------------------------------------------------- /elibs/dummy_server.erl: -------------------------------------------------------------------------------- 1 | -module(dummy_server). 2 | 3 | -export([start_link/1, stop/1]). 4 | 5 | -behavior(gen_server). 6 | %% gen_server callbacks 7 | -export([init/1, handle_call/3, handle_cast/2, handle_info/2, 8 | terminate/2, code_change/3]). 9 | 10 | start_link(Name) -> 11 | gen_server:start_link({local, Name}, ?MODULE, [], []). 12 | 13 | stop(Server) -> 14 | gen_server:cast(Server, stop). 15 | 16 | %%%%%% DUMMY GEN_SERVER %%%%%%%%%%%%%%%%%%% 17 | 18 | init([]) -> 19 | {ok, undefined}. 20 | 21 | handle_call(connections, _From, State) -> 22 | {reply, 0, State}. 23 | 24 | handle_cast(stop, State) -> 25 | {stop, shutdown, State}; 26 | 27 | handle_cast(_Req, State) -> 28 | {noreply, State}. 29 | 30 | handle_info(_Msg, State) -> 31 | {noreply, State}. 32 | 33 | terminate(_Reason, State) -> 34 | ok. 35 | 36 | code_change(_OldVsn, State, _Extra) -> 37 | {ok, State}. -------------------------------------------------------------------------------- /elibs/dynomite.erl: -------------------------------------------------------------------------------- 1 | -module(dynomite). 2 | 3 | -export([start/0, running/1, running_nodes/0, pause_all_sync/0, start_all_sync/0]). 4 | 5 | -include("../include/common.hrl"). 6 | 7 | start() -> 8 | crypto:start(), 9 | load_and_start_apps([os_mon, thrift, mochiweb, dynomite]). 10 | 11 | % running(Node) when Node == node() -> 12 | % true; 13 | 14 | running(Node) -> 15 | Ref = erlang:monitor(process, {membership, Node}), 16 | R = receive 17 | {'DOWN', Ref, _, _, _} -> false 18 | after 1 -> 19 | true 20 | end, 21 | erlang:demonitor(Ref), 22 | R. 23 | 24 | running_nodes() -> 25 | [Node || Node <- nodes([this,visible]), dynomite:running(Node)]. 26 | 27 | pause_all_sync() -> 28 | SyncServers = lists:flatten(lists:map(fun(Node) -> 29 | rpc:call(Node, sync_manager, loaded, []) 30 | end, running_nodes())), 31 | lists:foreach(fun(Server) -> 32 | sync_server:pause(Server) 33 | end, SyncServers). 34 | 35 | start_all_sync() -> 36 | SyncServers = lists:flatten(lists:map(fun(Node) -> 37 | rpc:call(Node, sync_manager, loaded, []) 38 | end, running_nodes())), 39 | lists:foreach(fun(Server) -> 40 | sync_server:play(Server) 41 | end, SyncServers). 42 | 43 | %%============================================================== 44 | 45 | load_and_start_apps([]) -> 46 | ok; 47 | 48 | load_and_start_apps([App|Apps]) -> 49 | case application:load(App) of 50 | ok -> 51 | case application:start(App) of 52 | ok -> load_and_start_apps(Apps); 53 | Err -> 54 | ?infoFmt("error starting ~p: ~p~n", [App, Err]), 55 | timer:sleep(10), 56 | halt(1) 57 | end; 58 | Err -> 59 | ?infoFmt("error loading ~p: ~p~n", [App, Err]), 60 | Err, 61 | timer:sleep(10), 62 | halt(1) 63 | end. 64 | 65 | collect_loop() -> 66 | process_flag(trap_exit, true), 67 | Filename = io_lib:format("/home/cliff/dumps/~w-dyn.dump", [lib_misc:now_int()]), 68 | sys_info(Filename), 69 | receive 70 | nothing -> ok 71 | after 5000 -> collect_loop() 72 | end. 73 | 74 | sys_info(Filename) -> 75 | {ok, IO} = file:open(Filename, [write]), 76 | ok = io:format(IO, "count ~p~n", [erlang:system_info(process_count)]), 77 | ok = io:format(IO, "memory ~p~n", [erlang:memory()]), 78 | ok = file:write(IO, erlang:system_info(procs)), 79 | file:close(IO). 80 | -------------------------------------------------------------------------------- /elibs/dynomite_app.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: dynomite.erl 3 | %%% @author Cliff Moon [] 4 | %%% @copyright 2008 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2008-06-27 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(dynomite_app). 12 | -author('cliff@powerset.com'). 13 | 14 | -behaviour(application). 15 | 16 | -include("../include/config.hrl"). 17 | -include("../include/common.hrl"). 18 | 19 | %% Application callbacks 20 | -export([start/2, stop/1]). 21 | 22 | %%==================================================================== 23 | %% Application callbacks 24 | %%==================================================================== 25 | %%-------------------------------------------------------------------- 26 | %% @spec start(Type, StartArgs) -> {ok, Pid} | 27 | %% {ok, Pid, State} | 28 | %% {error, Reason} 29 | %% @doc This function is called whenever an application 30 | %% is started using application:start/1,2, and should start the processes 31 | %% of the application. If the application is structured according to the 32 | %% OTP design principles as a supervision tree, this means starting the 33 | %% top supervisor of the tree. 34 | %% @end 35 | %%-------------------------------------------------------------------- 36 | start(_Type, []) -> 37 | case application:get_env(pidfile) of 38 | {ok, Location} -> 39 | Pid = os:getpid(), 40 | ok = file:write_file(Location, list_to_binary(Pid)); 41 | undefined -> ok 42 | end, 43 | case application:get_env(config) of 44 | {ok, ConfigFile} -> 45 | case filelib:is_file(ConfigFile) of 46 | true -> join_and_start(ConfigFile); 47 | false -> {error, ?fmt("~p does not exist.", [ConfigFile])} 48 | end; 49 | undefined -> 50 | {error, ?fmt("No config file given.", [])} 51 | end. 52 | 53 | %%-------------------------------------------------------------------- 54 | %% @spec stop(State) -> void() 55 | %% @doc This function is called whenever an application 56 | %% has stopped. It is intended to be the opposite of Module:start/2 and 57 | %% should do any necessary cleaning up. The return value is ignored. 58 | %% @end 59 | %%-------------------------------------------------------------------- 60 | stop({_, Sup}) -> 61 | exit(Sup, shutdown), 62 | ok. 63 | 64 | %%==================================================================== 65 | %% Internal functions 66 | %%==================================================================== 67 | verify_ulimit(#config{q=Q}) -> 68 | Partitions = (2 bsl (Q-1)), 69 | % this is our estimated max # of fd's 2 per partition and 100 for connections 70 | FD = Partitions * 3 + 103, 71 | case ulimit:start() of 72 | {ok, U} -> alter_ulimit(U, FD); 73 | {error, Msg} -> error_logger:error_msg("Could not load ulimit driver ~p~n", [Msg]) 74 | end. 75 | 76 | alter_ulimit(U, FD) -> 77 | case ulimit:getulimit(U) of 78 | {SoftLim, _} when SoftLim < FD -> 79 | error_logger:info_msg("Setting ulimit to ~p to match the partition map.~n", [FD]), 80 | ulimit:setulimit(U, FD); 81 | _ -> ok 82 | end, 83 | ulimit:stop(U). 84 | 85 | join_and_start(ConfigFile) -> 86 | case application:get_env(jointo) of 87 | {ok, NodeName} -> 88 | ?infoFmt("attempting to contact ~p~n", [NodeName]), 89 | case net_adm:ping(NodeName) of 90 | pong -> 91 | ?infoFmt("Connected to ~p~n", [NodeName]), 92 | dynomite_sup:start_link(ConfigFile); 93 | pang -> 94 | {error, ?fmt("Could not connect to ~p. Exiting.", [NodeName])} 95 | end; 96 | undefined -> dynomite_sup:start_link(ConfigFile) 97 | end. 98 | -------------------------------------------------------------------------------- /elibs/dynomite_rpc.erl: -------------------------------------------------------------------------------- 1 | %% An interface to dynomite 2 | 3 | -module(dynomite_rpc). 4 | -author('cliff@powerset.com'). 5 | 6 | -export([connect/1, get/2, put/4, has_key/2, delete/2, close/1]). 7 | 8 | connect(Node) -> 9 | case net_adm:ping(Node) of 10 | pong -> 11 | {ok, Node}; 12 | pang -> {error, "Cannot connect."} 13 | end. 14 | 15 | get(Node, Key) -> 16 | GetFun = fun(N) -> 17 | case rpc:call(N, mediator, get, [Key]) of 18 | {badrpc, Reason} -> {failure, Reason}; 19 | Result -> Result 20 | end 21 | end, 22 | robustify(Node, GetFun). 23 | 24 | put(Node, Key, Context, Value) -> 25 | PutFun = fun(N) -> 26 | case rpc:call(N, mediator, put, [Key, Context, Value]) of 27 | {badrpc, Reason} -> {failure, Reason}; 28 | Result -> Result 29 | end 30 | end, 31 | robustify(Node, PutFun). 32 | 33 | has_key(Node, Key) -> 34 | HasFun = fun(N) -> 35 | case rpc:call(N, mediator, has_key, [Key]) of 36 | {badrpc, Reason} -> {failure, Reason}; 37 | Result -> Result 38 | end 39 | end, 40 | robustify(Node, HasFun). 41 | 42 | delete(Node, Key) -> 43 | DelFun = fun(N) -> 44 | case rpc:call(N, mediator, delete, [Key]) of 45 | {badrpc, Reason} -> {failure, Reason}; 46 | Result -> Result 47 | end 48 | end, 49 | robustify(Node, DelFun). 50 | 51 | close(Node) -> erlang:disconnect(Node). 52 | 53 | 54 | robustify(Node, Fun) -> 55 | erlang:monitor_node(Node, true), 56 | R = receive 57 | {nodedown, Node} -> 58 | % io:format("node ~p was down~n", [Node]), 59 | case dynomite:running_nodes() of 60 | [] -> {failure, "No dynomite nodes available."}; 61 | [NextNode|_] -> Fun(NextNode) 62 | end 63 | after 0 -> 64 | Fun(Node) 65 | end, 66 | erlang:monitor_node(Node, false), 67 | R. 68 | 69 | -------------------------------------------------------------------------------- /elibs/dynomite_thrift_client.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: dynomite_thrift_client.erl 3 | %%% @author Cliff Moon [] 4 | %%% @copyright 2009 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2009-03-15 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(dynomite_thrift_client). 12 | -author('cliff@powerset.com'). 13 | 14 | %% API 15 | -export([start_link/2, get/2, put/4, has/2, remove/2, stop/1]). 16 | 17 | %%==================================================================== 18 | %% API 19 | %%==================================================================== 20 | %%-------------------------------------------------------------------- 21 | %% @spec 22 | %% @doc 23 | %% @end 24 | %%-------------------------------------------------------------------- 25 | start_link(Host, Port) when is_integer(Port) -> 26 | thrift_client:start_link(Host, Port, dynomite_thrift). 27 | 28 | get(C, Key) -> 29 | thrift_client:call(C, get, [Key]). 30 | 31 | put(C, Key, Context, Value) -> 32 | thrift_client:call(C, put, [Key, Context, Value]). 33 | 34 | remove(C, Key) -> 35 | thrift_client:call(C, remove, [Key]). 36 | 37 | has(C, Key) -> 38 | thrift_client:call(C, hash, [Key]). 39 | 40 | stop(C) -> 41 | thrift_client:close(C). 42 | %%==================================================================== 43 | %% Internal functions 44 | %%==================================================================== 45 | -------------------------------------------------------------------------------- /elibs/dynomite_thrift_service.erl: -------------------------------------------------------------------------------- 1 | -module(dynomite_thrift_service). 2 | 3 | -export([start_link/0, stop/1, 4 | handle_function/2, 5 | 6 | % Internal 7 | put/3, 8 | get/1, 9 | has/1, 10 | remove/1 11 | ]). 12 | 13 | -include("config.hrl"). 14 | -include("common.hrl"). 15 | -include("dynomite_types.hrl"). 16 | 17 | %%%%% EXTERNAL INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 18 | 19 | start_link() -> 20 | Config = configuration:get_config(), 21 | case Config#config.thrift_port of 22 | undefined -> dummy_server:start_link(dynomite_thrift); 23 | Port -> thrift_socket_server:start([ 24 | {port, Port}, 25 | {name, dynomite_thrift}, 26 | {service, dynomite_thrift}, 27 | {handler, ?MODULE}, 28 | {max, 200}, 29 | {socket_opts, [{recv_timeout, infinity}]}]) 30 | end. 31 | 32 | stop(Server) -> 33 | thrift_socket_server:stop(Server), 34 | ok. 35 | 36 | 37 | %%%%% THRIFT INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 38 | 39 | handle_function(Function, Args) when is_atom(Function), is_tuple(Args) -> 40 | % ?infoFmt("handling thrift stuff in PID ~p~n", [self()]), 41 | case apply(?MODULE, Function, tuple_to_list(Args)) of 42 | ok -> ok; 43 | Reply -> {reply, Reply} 44 | end. 45 | 46 | put(Key, ContextData, Data) when 47 | is_binary(Key), 48 | (ContextData =:= undefined orelse is_binary(ContextData)), 49 | is_binary(Data) -> 50 | Context = if 51 | ContextData =:= undefined -> undefined; 52 | erlang:byte_size(ContextData) > 0 -> binary_to_term(ContextData); 53 | true -> undefined 54 | end, 55 | case mediator:put(binary_to_list(Key), {self(), Context}, Data) of 56 | {ok, N} -> N; 57 | {failure, Reason} -> throw(#failureException{message = iolist_to_binary(Reason)}) 58 | end. 59 | 60 | 61 | get(Key) when is_binary(Key) -> 62 | case mediator:get(binary_to_list(Key)) of 63 | {ok, not_found} -> #getResult{results = []}; 64 | {ok, {Context, Values}} -> 65 | #getResult{context = term_to_binary(Context), 66 | results = Values}; 67 | {failure, Error} -> 68 | throw(#failureException{message = iolist_to_binary(Error)}) 69 | end. 70 | 71 | has(Key) when is_binary(Key) -> 72 | case mediator:has_key(binary_to_list(Key)) of 73 | {ok, {Bool, N}} when is_boolean(Bool) -> 74 | N 75 | end. 76 | 77 | remove(Key) when is_binary(Key) -> 78 | {ok, N} = mediator:delete(binary_to_list(Key)), 79 | N. 80 | 81 | -------------------------------------------------------------------------------- /elibs/dynomite_web.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: dynomite_web.erl 3 | %%% @author Cliff Moon [] 4 | %%% @copyright 2008 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2008-08-12 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(dynomite_web). 12 | -author('cliff@powerset.com'). 13 | 14 | %% API 15 | -export([start_link/0, stop/0, loop/2]). 16 | 17 | -include("../include/config.hrl"). 18 | -include("../include/common.hrl"). 19 | 20 | 21 | %%==================================================================== 22 | %% API 23 | %%==================================================================== 24 | %%-------------------------------------------------------------------- 25 | %% @spec 26 | %% @doc 27 | %% @end 28 | %%-------------------------------------------------------------------- 29 | 30 | start_link() -> 31 | Config = configuration:get_config(), 32 | WebDir = find_web_dir(), 33 | case Config#config.web_port of 34 | undefined -> dummy_server:start_link(dynomite_web); 35 | Port -> 36 | Loop = fun(Req) -> 37 | dynomite_web:loop(Req, WebDir) 38 | end, 39 | mochiweb_http:start([{name, ?MODULE}, {loop, Loop}, {port, Port}]) 40 | end. 41 | 42 | stop() -> 43 | mochiweb_http:stop(?MODULE). 44 | 45 | loop(Req, DocRoot) -> 46 | "/" ++ Path = Req:get(path), 47 | % error_logger:info_msg("path: ~p~n", [Path]), 48 | case Req:get(method) of 49 | 'GET' -> 50 | case filelib:is_file(DocRoot ++ "/" ++ Path) of 51 | true -> 52 | case filelib:is_file(DocRoot ++ "/" ++ Path ++ "/index.html") of 53 | true -> 54 | {ok, Data} = file:read_file(DocRoot ++ "/" ++ Path ++ "/index.html"), 55 | Req:ok({"text/html",Data}); 56 | false -> 57 | {ok, Data} = file:read_file(DocRoot++"/"++Path), 58 | Req:ok({"text/html",Data}) 59 | end; 60 | false -> 61 | case Path of 62 | "rpc/" ++ FName -> rpc_invoke(FName, Req); 63 | _ -> Req:not_found() 64 | end 65 | end; 66 | _ -> 67 | Req:not_found() 68 | end. 69 | %%==================================================================== 70 | %% Internal functions 71 | %%==================================================================== 72 | 73 | rpc_invoke(Path, Req) -> 74 | [Meth, Arg] = [list_to_atom(M) || M <- string:tokens(Path, "/")], 75 | Result = web_rpc:Meth(Arg), 76 | % error_logger:info_msg("invoking web_rpc:~p(~p) got ~p~n", [Meth, Arg, Result]), 77 | Req:ok({"application/json", mochijson:encode(Result)}). 78 | 79 | get_option(Option, Options) -> 80 | {proplists:get_value(Option, Options), proplists:delete(Option, Options)}. 81 | 82 | find_web_dir() -> 83 | case filelib:is_dir(WebDir1 = filename:join([filename:dirname(code:which(?MODULE)), "..", "web"])) of 84 | true -> WebDir1; 85 | false -> filename:join([filename:dirname(code:which(?MODULE)), "..", "priv", "web"]) 86 | end. 87 | -------------------------------------------------------------------------------- /elibs/ets_storage.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: ets_storage.erl 3 | %%% @author Cliff Moon <> [] 4 | %%% @copyright 2008 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2008-11-15 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(ets_storage). 12 | -author('cliff@powerset.com'). 13 | 14 | %% API 15 | -export([open/2, close/1, get/2, put/4, has_key/2, delete/2, fold/3]). 16 | 17 | -include("common.hrl"). 18 | 19 | -record(row, {key, context, values}). 20 | 21 | %%==================================================================== 22 | %% API 23 | %%==================================================================== 24 | %%-------------------------------------------------------------------- 25 | %% @spec 26 | %% @doc 27 | %% @end 28 | %%-------------------------------------------------------------------- 29 | 30 | open(Directory, Name) -> 31 | ok = filelib:ensure_dir(Directory ++ "/"), 32 | TableName = list_to_atom(lists:concat([Name, '/', node()])), 33 | Tid = ets:new(TableName, [set, public, {keypos, 2}]), 34 | % ?infoFmt("table: ~p~n", [Tid]), 35 | {ok, Tid}. 36 | 37 | close(Table) -> dets:close(Table). 38 | 39 | fold(Fun, Table, AccIn) when is_function(Fun) -> 40 | ets:foldl(fun(#row{key=Key,context=Context,values=Values}, Acc) -> 41 | Fun({Key, Context, Values}, Acc) 42 | end, AccIn, Table). 43 | 44 | put(Key, Context, Values, Table) -> 45 | % ?infoFmt("ets:insert(~p, ~p)~n", [Table, #row{key=Key,context=Context,values=Values}]), 46 | ets:insert(Table, #row{key=Key,context=Context,values=Values}), 47 | {ok, Table}. 48 | 49 | get(Key, Table) -> 50 | case ets:lookup(Table, Key) of 51 | [] -> {ok, not_found}; 52 | [#row{context=Context,values=Values}] -> {ok, {Context, Values}} 53 | end. 54 | 55 | has_key(Key, Table) -> 56 | case ets:member(Table, Key) of 57 | true -> {ok, true}; 58 | false -> {ok, false}; 59 | Failure -> Failure 60 | end. 61 | 62 | delete(Key, Table) -> 63 | case ets:delete(Table, Key) of 64 | ok -> {ok, Table}; 65 | Failure -> Failure 66 | end. 67 | 68 | %%==================================================================== 69 | %% Internal functions 70 | %%==================================================================== 71 | 72 | -------------------------------------------------------------------------------- /elibs/fail_storage.erl: -------------------------------------------------------------------------------- 1 | -module (fail_storage). 2 | -export ([open/2, close/1, get/2, fold/3, put/4, has_key/2, delete/2]). 3 | 4 | % we ignore the name, since it can't really help us. 5 | open(_, _) -> {ok, dict:new()}. 6 | 7 | % noop 8 | close(_Table) -> ok. 9 | 10 | fold(_, _, Acc) -> Acc. 11 | 12 | put(_Key, _Context, _Value, _Table) -> 13 | {'EXIT', "Sysadmin accidentally destroyed pager with a large hammer."}. 14 | 15 | get(_Key, _Table) -> 16 | {'EXIT', "Groundskeepers stole the root password"}. 17 | 18 | has_key(_Key, _Table) -> 19 | {'EXIT', "piezo-electric interference!"}. 20 | 21 | delete(_Key, _Table) -> 22 | {'EXIT', "Netscape has crashed"}. -------------------------------------------------------------------------------- /elibs/fnv.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: fnv.erl 3 | %%% @author Cliff Moon <> [] 4 | %%% @copyright 2009 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2009-01-29 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(fnv). 12 | -author('cliff@powerset.com'). 13 | 14 | -include("../include/common.hrl"). 15 | 16 | -define(SEED, 2166136261). 17 | %% API 18 | -export([start/0, stop/1, hash/1, hash/2]). 19 | 20 | %%==================================================================== 21 | %% API 22 | %%==================================================================== 23 | %%-------------------------------------------------------------------- 24 | %% @spec 25 | %% @doc 26 | %% @end 27 | %%-------------------------------------------------------------------- 28 | start() -> 29 | case load_driver() of 30 | ok -> 31 | Pid = spawn_link(fun() -> 32 | P = open(), 33 | register(fnv_drv, P), 34 | loop(P) 35 | end), 36 | timer:sleep(1), 37 | {ok, Pid}; 38 | {error, Err} -> 39 | Msg = erl_ddll:format_error(Err), 40 | {error, Msg} 41 | end. 42 | 43 | stop(P) -> 44 | unlink(P), 45 | exit(P, die). 46 | 47 | hash(Thing) -> 48 | hash(Thing, ?SEED). 49 | 50 | % hash(Thing, Seed) when is_list(Thing) -> %assume io_list 51 | % P = get_or_open(), 52 | % port_command(P, [term_to_binary(Seed)] ++ Thing), 53 | % recv(P); 54 | 55 | hash(Thing, Seed) when is_binary(Thing) -> 56 | P = get_or_open(), 57 | convert(port_control(P, Seed, Thing)); 58 | % recv(P); 59 | 60 | hash(Thing, Seed) -> 61 | P = get_or_open(), 62 | convert(port_control(P, Seed, term_to_binary(Thing))). 63 | % recv(P). 64 | 65 | %%==================================================================== 66 | %% Internal functions 67 | %%==================================================================== 68 | loop(P) -> 69 | receive _ -> loop(P) end. 70 | 71 | convert(List) -> 72 | <> = list_to_binary(List), 73 | Hash. 74 | 75 | get_or_open() -> 76 | case get(fnv_drv) of 77 | undefined -> 78 | load_driver(), 79 | P = open(), 80 | put(fnv_drv, P), 81 | P; 82 | P -> P 83 | end. 84 | 85 | open() -> 86 | open_port({spawn, fnv_drv}, [binary]). 87 | 88 | load_driver() -> 89 | Dir = filename:join([filename:dirname(code:which(?MODULE)), "..", "priv"]), 90 | erl_ddll:load(Dir, "fnv_drv"). 91 | 92 | recv(P) -> 93 | receive 94 | {P, {data, Bin}} -> binary_to_term(Bin); 95 | V -> V 96 | end. 97 | -------------------------------------------------------------------------------- /elibs/murmur.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: murmur.erl 3 | %%% @author Cliff Moon <> [] 4 | %%% @copyright 2009 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2009-01-29 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(murmur). 12 | -author('cliff@powerset.com'). 13 | 14 | -define(SEED, 1465243800). 15 | %% API 16 | -export([start/0, stop/1, hash/1, hash/2]). 17 | 18 | %%==================================================================== 19 | %% API 20 | %%==================================================================== 21 | %%-------------------------------------------------------------------- 22 | %% @spec 23 | %% @doc 24 | %% @end 25 | %%-------------------------------------------------------------------- 26 | start() -> 27 | case load_driver() of 28 | ok -> open(); 29 | {error, Err} -> 30 | Msg = erl_ddll:format_error(Err), 31 | {error, Msg} 32 | end. 33 | 34 | stop(P) -> 35 | unlink(P), 36 | exit(P, die). 37 | 38 | hash(Thing) -> 39 | hash(Thing, ?SEED). 40 | 41 | hash(Thing, Seed) when is_list(Thing) -> %assume io_list 42 | P = get_or_open(), 43 | port_command(P, [term_to_binary(Seed)] ++ Thing), 44 | recv(P); 45 | 46 | hash(Thing, Seed) when is_binary(Thing) -> 47 | P = get_or_open(), 48 | port_command(P, [term_to_binary(Seed), Thing]), 49 | recv(P); 50 | 51 | hash(Thing, Seed) -> 52 | P = get_or_open(), 53 | port_command(P, [term_to_binary(Seed), term_to_binary(Thing)]), 54 | recv(P). 55 | 56 | %%==================================================================== 57 | %% Internal functions 58 | %%==================================================================== 59 | get_or_open() -> 60 | case get(murmur) of 61 | undefined -> start(); 62 | P -> P 63 | end. 64 | 65 | open() -> 66 | P = open_port({spawn, murmur_drv}, [binary]), 67 | put(murmur, P), 68 | P. 69 | 70 | load_driver() -> 71 | Dir = filename:join([filename:dirname(code:which(?MODULE)), "..", "priv"]), 72 | erl_ddll:load(Dir, "murmur_drv"). 73 | 74 | recv(P) -> 75 | receive 76 | {P, {data, Bin}} -> binary_to_term(Bin) 77 | end. 78 | -------------------------------------------------------------------------------- /elibs/storage_server_sup.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: untitled.erl 3 | %%% @author Cliff Moon [] 4 | %%% @copyright 2008 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2008-06-27 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(storage_server_sup). 12 | -author('cliff@powerset.com'). 13 | 14 | -behaviour(supervisor). 15 | 16 | %% API 17 | -export([start_link/0, storage_servers/0]). 18 | 19 | %% Supervisor callbacks 20 | -export([init/1]). 21 | 22 | -include("config.hrl"). 23 | 24 | -define(SERVER, ?MODULE). 25 | 26 | %%==================================================================== 27 | %% API functions 28 | %%==================================================================== 29 | %%-------------------------------------------------------------------- 30 | %% @spec start_link() -> {ok,Pid} | ignore | {error,Error} 31 | %% @doc Starts the supervisor 32 | %% @end 33 | %%-------------------------------------------------------------------- 34 | start_link() -> 35 | supervisor:start_link({local, storage_server_sup}, storage_server_sup, []). 36 | 37 | storage_servers() -> 38 | lists:filter(fun 39 | (undefined) -> false; 40 | (Child) -> true 41 | end, lists:map(fun({_Id, Child, _Type, _Modules}) -> 42 | Child 43 | end, supervisor:which_children(storage_server_sup))). 44 | 45 | %%==================================================================== 46 | %% Supervisor callbacks 47 | %%==================================================================== 48 | %%-------------------------------------------------------------------- 49 | %% @spec init(Args) -> {ok, {SupFlags, [ChildSpec]}} | 50 | %% ignore | 51 | %% {error, Reason} 52 | %% @doc Whenever a supervisor is started using 53 | %% supervisor:start_link/[2,3], this function is called by the new process 54 | %% to find out about restart strategy, maximum restart frequency and child 55 | %% specifications. 56 | %% @end 57 | %%-------------------------------------------------------------------- 58 | init([]) -> 59 | ChildSpecs = [], 60 | {ok,{{one_for_one,10,1}, ChildSpecs}}. 61 | 62 | %%==================================================================== 63 | %% Internal functions 64 | %%==================================================================== 65 | -------------------------------------------------------------------------------- /elibs/stream.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: stream.erl 3 | %%% @author Cliff Moon [] 4 | %%% @copyright 2009 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2009-01-01 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(stream). 12 | -author('cliff@powerset.com'). 13 | 14 | -include("chunk_size.hrl"). 15 | 16 | %% API 17 | -export([send/3, recv/3]). 18 | 19 | -ifdef(TEST). 20 | -include("etest/stream_test.erl"). 21 | -endif. 22 | 23 | %%==================================================================== 24 | %% API 25 | %%==================================================================== 26 | %%-------------------------------------------------------------------- 27 | %% @spec 28 | %% @doc 29 | %% @end 30 | %%-------------------------------------------------------------------- 31 | 32 | recv(Pid, Ref, Timeout) -> 33 | receive 34 | {Pid, Ref, {context, Context}} -> recv(Pid, Ref, Timeout, {Context, []}) 35 | after 36 | Timeout -> {error, timeout} 37 | end. 38 | 39 | recv(Pid, Ref, Timeout, {Context, Values}) -> 40 | receive 41 | {Pid, Ref, start_value} -> 42 | case recv_value(Pid, Ref, Timeout, <<0:0>>) of 43 | {error, timeout} -> {error, timeout}; 44 | Value -> recv(Pid, Ref, Timeout, {Context, [Value|Values]}) 45 | end; 46 | {Pid, Ref, eof} -> 47 | {ok, {Context, lists:reverse(Values)}} 48 | after 49 | Timeout -> {error, timeout} 50 | end. 51 | 52 | recv_value(Pid, Ref, Timeout, Bin) -> 53 | receive 54 | {Pid, Ref, {data, Data}} -> recv_value(Pid, Ref, Timeout, <>); 55 | {Pid, Ref, end_value} -> Bin 56 | after 57 | Timeout -> {error, timeout} 58 | end. 59 | 60 | send(RemotePid, Ref, {Context, Values}) -> 61 | RemotePid ! {self(), Ref, {context, Context}}, 62 | send(RemotePid, Ref, Values); 63 | 64 | send(RemotePid, Ref, []) -> 65 | RemotePid ! {self(), Ref, eof}; 66 | 67 | send(RemotePid, Ref, [Val|Values]) -> 68 | RemotePid ! {self(), Ref, start_value}, 69 | send_value(RemotePid, Ref, Val, 0), 70 | send(RemotePid, Ref, Values). 71 | 72 | send_value(RemotePid, Ref, Bin, Skip) when Skip >= byte_size(Bin) -> 73 | RemotePid ! {self(), Ref, end_value}; 74 | 75 | send_value(RemotePid, Ref, Bin, Skip) -> 76 | if 77 | (Skip + ?CHUNK_SIZE) > byte_size(Bin) -> 78 | <<_:Skip/binary, Chunk/binary>> = Bin; 79 | true -> 80 | <<_:Skip/binary, Chunk:?CHUNK_SIZE/binary, _/binary>> = Bin 81 | end, 82 | RemotePid ! {self(), Ref, {data, Chunk}}, 83 | send_value(RemotePid, Ref, Bin, Skip + ?CHUNK_SIZE). 84 | 85 | %%==================================================================== 86 | %% Internal functions 87 | %%==================================================================== 88 | 89 | -------------------------------------------------------------------------------- /elibs/sync_server.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: sync_server.erl 3 | %%% @author Cliff Moon [http://www.powerset.com/] 4 | %%% @copyright 2008 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2008-10-03 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(sync_server). 12 | -author('cliff@powerset.com'). 13 | 14 | %% API 15 | -export([start_link/2, pause/1, play/1, loop/1]). 16 | 17 | -record(state, {name, partition, paused}). 18 | 19 | %%==================================================================== 20 | %% API 21 | %%==================================================================== 22 | %%-------------------------------------------------------------------- 23 | %% @spec start_link() -> {ok,Pid} | ignore | {error,Error} 24 | %% @doc Starts the server 25 | %% @end 26 | %%-------------------------------------------------------------------- 27 | start_link(Name, Partition) -> 28 | Pid = proc_lib:spawn_link(fun() -> 29 | sync_server:loop(#state{name=Name,partition=Partition,paused=false}) 30 | end), 31 | register(Name, Pid), 32 | {ok, Pid}. 33 | 34 | pause(Server) -> 35 | Server ! pause. 36 | 37 | play(Server) -> 38 | Server ! play. 39 | 40 | %% Internal functions 41 | 42 | loop(State = #state{name=Name,partition=Partition,paused=Paused}) -> 43 | Timeout = round((random:uniform() * 0.5 + 1) * 3600000), 44 | Paused1 = receive 45 | pause -> true; 46 | play -> false 47 | after Timeout -> 48 | Paused 49 | end, 50 | if 51 | Paused -> ok; 52 | true -> 53 | Nodes = membership:nodes_for_partition(Partition), 54 | (catch run_sync(Nodes, Partition)) 55 | end, 56 | sync_server:loop(State#state{paused=Paused1}). 57 | 58 | run_sync(Nodes, _) when length(Nodes) == 1 -> 59 | noop; 60 | 61 | run_sync(Nodes, Partition) -> 62 | [Master|_] = Nodes, 63 | [NodeA,NodeB|_] = lib_misc:shuffle(Nodes), 64 | StorageName = list_to_atom(lists:concat([storage_, Partition])), 65 | KeyDiff = storage_server:diff({StorageName, NodeA}, {StorageName, NodeB}), 66 | sync_manager:sync(Partition, Master, NodeA, NodeB, length(KeyDiff)), 67 | storage_server:sync({StorageName, NodeA}, {StorageName, NodeB}), 68 | sync_manager:done(Partition). 69 | -------------------------------------------------------------------------------- /elibs/sync_server_sup.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: untitled.erl 3 | %%% @author Cliff Moon [] 4 | %%% @copyright 2008 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2008-06-27 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(sync_server_sup). 12 | -author('cliff@powerset.com'). 13 | 14 | -behaviour(supervisor). 15 | 16 | %% API 17 | -export([start_link/0, sync_servers/0]). 18 | 19 | %% Supervisor callbacks 20 | -export([init/1]). 21 | 22 | -include("config.hrl"). 23 | 24 | -define(SERVER, ?MODULE). 25 | 26 | %%==================================================================== 27 | %% API functions 28 | %%==================================================================== 29 | %%-------------------------------------------------------------------- 30 | %% @spec start_link() -> {ok,Pid} | ignore | {error,Error} 31 | %% @doc Starts the supervisor 32 | %% @end 33 | %%-------------------------------------------------------------------- 34 | start_link() -> 35 | supervisor:start_link({local, sync_server_sup}, sync_server_sup, []). 36 | 37 | sync_servers() -> 38 | lists:filter(fun 39 | (undefined) -> false; 40 | (Child) -> true 41 | end, lists:map(fun({_Id, Child, _Type, _Modules}) -> 42 | Child 43 | end, supervisor:which_children(sync_server_sup))). 44 | 45 | %%==================================================================== 46 | %% Supervisor callbacks 47 | %%==================================================================== 48 | %%-------------------------------------------------------------------- 49 | %% @spec init(Args) -> {ok, {SupFlags, [ChildSpec]}} | 50 | %% ignore | 51 | %% {error, Reason} 52 | %% @doc Whenever a supervisor is started using 53 | %% supervisor:start_link/[2,3], this function is called by the new process 54 | %% to find out about restart strategy, maximum restart frequency and child 55 | %% specifications. 56 | %% @end 57 | %%-------------------------------------------------------------------- 58 | init([]) -> 59 | ChildSpecs = [], 60 | {ok,{{one_for_one,10,1}, ChildSpecs}}. 61 | 62 | %%==================================================================== 63 | %% Internal functions 64 | %%==================================================================== 65 | -------------------------------------------------------------------------------- /elibs/tc_storage.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: tc_storage.erl 3 | %%% @author Cliff Moon [] 4 | %%% @copyright 2008 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2008-10-26 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(tc_storage). 12 | -author('cliff@powerset.com'). 13 | 14 | %% API 15 | -export([open/2, close/1, get/2, put/4, has_key/2, delete/2, fold/3]). 16 | 17 | -record(row, {key, context, value}). 18 | 19 | %%==================================================================== 20 | %% API 21 | %%==================================================================== 22 | %%-------------------------------------------------------------------- 23 | %% @spec 24 | %% @doc 25 | %% @end 26 | %%-------------------------------------------------------------------- 27 | open(Directory, Name) -> 28 | ok = filelib:ensure_dir(Directory ++ "/"), 29 | FileName = lists:concat([Directory, "/", Name]), 30 | case ensure_tcerl_start() of 31 | {error, Reason} -> {error, Reason}; 32 | ok -> tcbdbets:open_file([{file, FileName}, {type, ordered_set}, {keypos, 2}]) 33 | end. 34 | 35 | close(DBHandle) -> 36 | tcbdbets:close(DBHandle). 37 | 38 | get(Key, DBHandle) -> 39 | case tcbdbets:lookup(DBHandle, Key) of 40 | [#row{context=Context,value=Value}] -> {ok, {Context, Value}}; 41 | [] -> {ok, not_found} 42 | end. 43 | 44 | put(Key, Context, Value, DBHandle) -> 45 | ToPut = if 46 | is_list(Value) -> Value; 47 | true -> [Value] 48 | end, 49 | case tcbdbets:insert(DBHandle, [#row{key=Key,context=Context,value=ToPut}]) of 50 | ok -> 51 | tcbdbets:sync(DBHandle), 52 | {ok, DBHandle}; 53 | {error, Reason} -> {error, Reason} 54 | end. 55 | 56 | has_key(Key, DBHandle) -> 57 | case tcbdbets:member(DBHandle, Key) of 58 | true -> {ok, true}; 59 | false -> {ok, false}; 60 | {error, Reason} -> {error, Reason} 61 | end. 62 | 63 | delete(Key, DBHandle) -> 64 | case tcbdbets:delete(DBHandle, Key) of 65 | ok -> {ok, DBHandle}; 66 | {error, Reason} -> {error, Reason} 67 | end. 68 | 69 | fold(Fun, DBHandle, AccIn) when is_function(Fun) -> 70 | tcbdbets:foldl(fun(#row{key=Key,context=Context,value=Value}, Acc) -> 71 | Fun({Key, Context, Value}, Acc) 72 | end, AccIn, DBHandle). 73 | 74 | %%==================================================================== 75 | %% Internal functions 76 | %%==================================================================== 77 | ensure_tcerl_start() -> 78 | case tcerl:start() of 79 | ok -> ok; 80 | {error, {already_started,tcerl}} -> ok; 81 | {error, Reason} -> {error, Reason} 82 | end. -------------------------------------------------------------------------------- /elibs/ulimit.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File: ulimit.erl 3 | %%% @author Cliff Moon <> [] 4 | %%% @copyright 2009 Cliff Moon 5 | %%% @doc 6 | %%% 7 | %%% @end 8 | %%% 9 | %%% @since 2009-01-28 by Cliff Moon 10 | %%%------------------------------------------------------------------- 11 | -module(ulimit). 12 | -author('cliff@powerset.com'). 13 | 14 | -define(GET, $g). 15 | -define(SET, $s). 16 | 17 | %% API 18 | -export([start/0, stop/1, setulimit/2, getulimit/1]). 19 | 20 | %%==================================================================== 21 | %% API 22 | %%==================================================================== 23 | %%-------------------------------------------------------------------- 24 | %% @spec 25 | %% @doc 26 | %% @end 27 | %%-------------------------------------------------------------------- 28 | start() -> 29 | case load_driver() of 30 | ok -> {ok, {ulimit, open_port({spawn, ulimit_drv}, [binary])}}; 31 | {error, Err} -> 32 | Msg = erl_ddll:format_error(Err), 33 | {error, Msg} 34 | end. 35 | 36 | stop({ulimit, P}) -> 37 | unlink(P), 38 | exit(P, die). 39 | 40 | setulimit({ulimit, P}, N) when is_integer(N) -> 41 | port_command(P, [?SET, term_to_binary(N)]), 42 | recv(P). 43 | 44 | getulimit({ulimit, P}) -> 45 | port_command(P, [?GET]), 46 | recv(P). 47 | 48 | %%==================================================================== 49 | %% Internal functions 50 | %%==================================================================== 51 | 52 | 53 | load_driver() -> 54 | Dir = filename:join([filename:dirname(code:which(?MODULE)), "..", "priv"]), 55 | erl_ddll:load(Dir, "ulimit_drv"). 56 | 57 | recv(P) -> 58 | receive 59 | {P, {data, Bin}} -> binary_to_term(Bin) 60 | end. -------------------------------------------------------------------------------- /elibs/vector_clock.erl: -------------------------------------------------------------------------------- 1 | -module (vector_clock). 2 | -export ([create/1, truncate/1, increment/2, compare/2, resolve/2, merge/2]). 3 | 4 | -ifdef(TEST). 5 | -include("etest/vector_clock_test.erl"). 6 | -endif. 7 | 8 | create(NodeName) -> [{NodeName, lib_misc:now_float()}]. 9 | 10 | truncate(Clock) when length(Clock) > 10 -> 11 | lists:nthtail(length(Clock) - 10, lists:keysort(2, Clock)); 12 | 13 | truncate(Clock) -> Clock. 14 | 15 | increment(NodeName, [{NodeName, _Version}|Clocks]) -> 16 | [{NodeName, lib_misc:now_float()}|Clocks]; 17 | 18 | increment(NodeName, [NodeClock|Clocks]) -> 19 | [NodeClock|increment(NodeName, Clocks)]; 20 | 21 | increment(NodeName, []) -> 22 | [{NodeName, lib_misc:now_float()}]. 23 | 24 | resolve({ClockA, ValuesA}, {ClockB, ValuesB}) -> 25 | case compare(ClockA, ClockB) of 26 | less -> {ClockB, ValuesB}; 27 | greater -> {ClockA, ValuesA}; 28 | equal -> {ClockA, ValuesA}; 29 | concurrent -> {merge(ClockA,ClockB), ValuesA ++ ValuesB} 30 | end; 31 | resolve(not_found, {Clock, Values}) -> 32 | {Clock, Values}; 33 | resolve({Clock, Values}, not_found) -> 34 | {Clock, Values}; 35 | resolve(not_found, not_found) -> 36 | not_found. 37 | 38 | merge(ClockA, ClockB) -> 39 | merge([], ClockA, ClockB). 40 | 41 | merge(Merged, [], ClockB) -> lists:keysort(1, Merged ++ ClockB); 42 | 43 | merge(Merged, ClockA, []) -> lists:keysort(1, Merged ++ ClockA); 44 | 45 | merge(Merged, [{NodeA, VersionA}|ClockA], ClockB) -> 46 | case lists:keytake(NodeA, 1, ClockB) of 47 | {value, {NodeA, VersionB}, TrunkClockB} when VersionA > VersionB -> 48 | merge([{NodeA,VersionA}|Merged],ClockA,TrunkClockB); 49 | {value, {NodeA, VersionB}, TrunkClockB} -> 50 | merge([{NodeA,VersionB}|Merged],ClockA,TrunkClockB); 51 | false -> 52 | merge([{NodeA,VersionA}|Merged],ClockA,ClockB) 53 | end. 54 | 55 | compare(ClockA, ClockB) -> 56 | AltB = less_than(ClockA, ClockB), 57 | BltA = less_than(ClockB, ClockA), 58 | AeqB = equals(ClockA, ClockB), 59 | AccB = concurrent(ClockA, ClockB), 60 | if 61 | AltB -> less; 62 | BltA -> greater; 63 | AeqB -> equal; 64 | AccB -> concurrent 65 | end. 66 | 67 | % ClockA is less than ClockB if and only if ClockA[z] <= ClockB[z] for all instances z and there 68 | % exists an index z' such that ClockA[z'] < ClockB[z'] 69 | less_than(ClockA, ClockB) -> 70 | ForAll = lists:all(fun({Node, VersionA}) -> 71 | case lists:keysearch(Node, 1, ClockB) of 72 | {value, {_NodeB, VersionB}} -> VersionA =< VersionB; 73 | false -> false 74 | end 75 | end, ClockA), 76 | Exists = lists:any(fun({NodeA, VersionA}) -> 77 | case lists:keysearch(NodeA, 1, ClockB) of 78 | {value, {_NodeB, VersionB}} -> VersionA /= VersionB; 79 | false -> true 80 | end 81 | end, ClockA), 82 | %length takes care of the case when clockA is shorter than B 83 | ForAll and (Exists or (length(ClockA) < length(ClockB))). 84 | 85 | equals(ClockA, ClockB) -> 86 | Equivalent = lists:all(fun({NodeA, VersionA}) -> 87 | lists:any(fun(NodeClockB) -> 88 | case NodeClockB of 89 | {NodeA, VersionA} -> true; 90 | _ -> false 91 | end 92 | end, ClockB) 93 | end, ClockA), 94 | Equivalent and (length(ClockA) == length(ClockB)). 95 | 96 | concurrent(ClockA, ClockB) -> 97 | not (less_than(ClockA, ClockB) or less_than(ClockB, ClockA) or equals(ClockA, ClockB)). 98 | -------------------------------------------------------------------------------- /escripts/benchmark.escript: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env escript 2 | %%! +K true +A 128 +P 60000 -smp enable -sname benchmark -pa ./ebin -pa ./deps/thrift/ebin -setcookie e841d215484685567858aaec4d25af2f 3 | -mode(compile). 4 | 5 | -record(config, {hosts=[],concurrency=20,ratio=0.5,logdir="bench_log",size=100,keyspace=100000,method=thrift}). 6 | 7 | main(Options) -> 8 | net_kernel:start([benchmark, shortnames]), 9 | erlang:set_cookie(node(), e841d215484685567858aaec4d25af2f), 10 | code:add_pathsa(["./ebin", "./deps/thrift/ebin"]), 11 | process_flag(trap_exit, true), 12 | case parse_options(Options) of 13 | {error, Msg} -> usage(Msg); 14 | Config -> load_gen:run(Config) 15 | end, 16 | receive 17 | {ok, Val} -> 18 | io:format("died with ~p~n", [Val]), 19 | Val 20 | end. 21 | 22 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 23 | %% Options and what have you 24 | 25 | parse_options(Opts) -> 26 | parse_options(Opts, #config{}). 27 | 28 | parse_options([], Config) -> Config; 29 | 30 | parse_options([Name, Arg | Opts], Config) -> 31 | parse_options(Opts, set_option(longform(Name), Arg, Config)). 32 | 33 | longform("-h") -> "--host"; 34 | longform("-c") -> "--concurrency"; 35 | longform("-r") -> "--ratio"; 36 | longform("-l") -> "--log"; 37 | longform("-s") -> "--size"; 38 | longform("-k") -> "--keyspace"; 39 | longform("-m") -> "--method"; 40 | longform(N) -> N. 41 | 42 | set_option("--host", Arg, Config = #config{hosts=Hosts}) -> 43 | Config#config{hosts=[Arg|Hosts]}; 44 | 45 | set_option("--concurrency", Arg, Config) -> 46 | Config#config{concurrency=list_to_integer(Arg)}; 47 | 48 | set_option("--ratio", Arg, Config) -> 49 | Config#config{ratio=list_to_float(Arg)}; 50 | 51 | set_option("--log", Arg, Config) -> 52 | Config#config{logdir=Arg}; 53 | 54 | set_option("--size", Arg, Config) -> 55 | Config#config{size=list_to_integer(Arg)}; 56 | 57 | set_option("--keyspace", Arg, Config) -> 58 | Config#config{keyspace=list_to_integer(Arg)}; 59 | 60 | set_option("--method", Arg, Config) -> 61 | Config#config{method=list_to_atom(Arg)}. 62 | 63 | usage(Msg) -> 64 | io:format("error: ~p", [Msg]), 65 | io:format("Usage: distributed_bench [options]~n"), 66 | io:format(" -h, --host [HOST] Add another host to test against. Should add the whole cluster.~n"), 67 | io:format(" -c, --concurrency [INT] the concurrency level for the test. How many clients to start.~n"), 68 | io:format(" -r, --ratio [R] the ratio of gets to puts. 0.0 means all gets, 1.0 means all puts.~n"), 69 | io:format(" -l, --log [LOGDIR] Where the instances should log their raw performance data.~n"), 70 | io:format(" -s, --size [SIZE] The size of the values to use, in bytes.~n"), 71 | io:format(" -k, --keyspace [KEYSPACE] The integer size of the keyspace.~n"), 72 | io:format(" -m, --method [METHOD] The method of contacting the server (thrift, rpc).~n"), 73 | halt(1). 74 | -------------------------------------------------------------------------------- /etest/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/moonpolysoft/dynomite/a5618dcbe17b16cefdc9c567f27a1f4445aee005/etest/.placeholder -------------------------------------------------------------------------------- /etest/bloom_test.erl: -------------------------------------------------------------------------------- 1 | -include_lib("eunit/include/eunit.hrl"). 2 | 3 | simple_bloom_test() -> 4 | file:delete(data_file()), 5 | {ok, Bloom} = bloom:start(data_file(), 10000, 0.001), 6 | bloom:put(Bloom, "wut"), 7 | ?assertEqual(true, bloom:has(Bloom, "wut")), 8 | ?assertEqual(false, bloom:has(Bloom, "fuck")), 9 | bloom:stop(Bloom). 10 | 11 | insert_many_things_test() -> 12 | file:delete(data_file()), 13 | {ok, Bloom} = bloom:start(data_file(), 10000, 0.001), 14 | Keys = lists:map(fun(N) -> 15 | Key = "Key" ++ float_to_list(random:uniform()), 16 | bloom:put(Bloom, Key), 17 | Key 18 | end, lists:seq(1, 10000)), 19 | lists:foreach(fun(Key) -> 20 | ?assert(bloom:has(Bloom, Key)) 21 | end, Keys), 22 | bloom:stop(Bloom). 23 | 24 | false_positive_error_rate_test() -> 25 | file:delete(data_file()), 26 | {ok, Bloom} = bloom:start(data_file(), 10000, 0.001), 27 | lists:foreach(fun(N) -> 28 | Key = "Key" ++ float_to_list(random:uniform()), 29 | bloom:put(Bloom, Key) 30 | end, lists:seq(1, 10000)), 31 | FalsePositives = [X || X <- [bloom:has(Bloom, "butt" ++ float_to_list(random:uniform())) || N <- lists:seq(1,10000)], X == true], 32 | FPRate = length(FalsePositives) / 10000, 33 | ?debugFmt("false positives: ~p", [length(FalsePositives)]), 34 | ?debugFmt("false positives: ~p", [FPRate]), 35 | ?debugFmt("mem size ~p", [bloom:mem_size(Bloom)]), 36 | ?assert(FPRate < 0.001), 37 | ?assertEqual(10000, bloom:key_size(Bloom)), 38 | bloom:stop(Bloom). 39 | 40 | persist_test() -> 41 | file:delete(data_file()), 42 | {ok, Bloom} = bloom:start(data_file(), 10000, 0.001), 43 | Keys = lists:map(fun(N) -> 44 | Key = "Key" ++ float_to_list(random:uniform()), 45 | bloom:put(Bloom, Key), 46 | Key 47 | end, lists:seq(1, 10000)), 48 | ?debugMsg("got keys"), 49 | bloom:stop(Bloom), 50 | ?debugMsg("stopping bloom"), 51 | {ok, Bloom2} = bloom:start(data_file(), 10000, 0.001), 52 | ?debugMsg("restarted"), 53 | FalsePositives = [X || X <- [bloom:has(Bloom2, "butt" ++ float_to_list(random:uniform())) || N <- lists:seq(1,10000)], X == true], 54 | FPRate = length(FalsePositives) / 10000, 55 | ?debugFmt("false positives: ~p", [length(FalsePositives)]), 56 | ?debugFmt("false positives: ~p", [FPRate]), 57 | ?debugFmt("mem size ~p", [bloom:mem_size(Bloom2)]), 58 | ?assert(FPRate < 0.001), 59 | ?assertEqual(10000, bloom:key_size(Bloom2)), 60 | TruePositives = [X || X <- [bloom:has(Bloom2, Key) || Key <- Keys], X == true], 61 | ?debugFmt("true positives: ~p", [length(TruePositives)]), 62 | ?debugFmt("keys ~p", [length(Keys)]), 63 | TPRate = length(TruePositives) / 10000, 64 | ?assertEqual(1.0, TPRate), 65 | bloom:stop(Bloom2). 66 | 67 | priv_dir() -> 68 | Dir = filename:join(t:config(priv_dir), "data"), 69 | filelib:ensure_dir(filename:join(Dir, "bloom")), 70 | Dir. 71 | 72 | data_file() -> 73 | filename:join(priv_dir(), "bloom"). 74 | 75 | data_file(N) -> 76 | filename:join(priv_dir(), "bloom" ++ integer_to_list(N)). -------------------------------------------------------------------------------- /etest/bootstrap_test.erl: -------------------------------------------------------------------------------- 1 | -include_lib("eunit/include/eunit.hrl"). 2 | 3 | relative_path_test() -> 4 | Dir = "/blah/blaa/bloo/blee/1/", 5 | File = "/bleep/bloop/blop/blorp/1/file.idx", 6 | ?assertEqual("/blah/blaa/bloo/blee/1/file.idx", relative_path(Dir, File)). 7 | 8 | simple_send_test_() -> 9 | {timeout, 120, ?_test(test_simple_send())}. 10 | 11 | test_simple_send() -> 12 | process_flag(trap_exit, true), 13 | test_cleanup(), 14 | test_setup(), 15 | Ref = make_ref(), 16 | Receiver = spawn_link(fun() -> receive_bootstrap(priv_dir("b"), Ref) end), 17 | send_bootstrap(priv_dir("a"), Receiver, Ref), 18 | ?assertEqual(file:read_file(data_file("a")), file:read_file(data_file("b"))). 19 | 20 | test_cleanup() -> 21 | rm_rf_dir(priv_dir("a")), 22 | rm_rf_dir(priv_dir("b")). 23 | 24 | test_setup() -> 25 | ok = crypto:start(), 26 | {ok, IO} = file:open(data_file("a"), [raw, binary, write]), 27 | lists:foreach(fun(_) -> 28 | D = << << X:8 >> || X <- lists:seq(1, 1024) >>, 29 | file:write(IO, D) 30 | end, lists:seq(1, 100)), 31 | ok = file:close(IO). 32 | 33 | priv_dir(Root) -> 34 | Dir = filename:join([t:config(priv_dir), "bootstrap", Root, "1"]), 35 | filelib:ensure_dir(filename:join([Dir, "bootstrap"])), 36 | Dir. 37 | 38 | data_file(Root) -> 39 | filename:join([priv_dir(Root), "bootstrap"]). 40 | 41 | rm_rf_dir(Dir) -> 42 | ?cmd("rm -rf " ++ Dir). -------------------------------------------------------------------------------- /etest/configuration_test.erl: -------------------------------------------------------------------------------- 1 | -include_lib("eunit/include/eunit.hrl"). 2 | 3 | config_parsing_test() -> 4 | JSON = "{\"blocksize\": 4096,\"text_port\": 11222,\"thrift_port\": 9200,\"web_port\": 8080,\"directory\": \"/Users/cliff/data/tmp\",\"storage_mod\": \"dets_storage\",\"n\": 1,\"r\": 1,\"w\": 1,\"q\": 6}", 5 | Config = decode_json(mochijson:decode(JSON)), 6 | ?assertEqual(#config{ 7 | blocksize=4096, 8 | text_port=11222, 9 | thrift_port=9200, 10 | web_port=8080, 11 | directory="/Users/cliff/data/tmp", 12 | storage_mod="dets_storage", 13 | n=1, 14 | r=1, 15 | w=1, 16 | q=6}, Config). 17 | 18 | config_merge_test() -> 19 | Remote = #config{blocksize=20,storage_mod="dets_storage",n=3,r=3,w=3,q=4}, 20 | Local = #config{ 21 | blocksize=4096, 22 | text_port=11222, 23 | thrift_port=9200, 24 | web_port=8080, 25 | directory="/Users/cliff/data/tmp", 26 | storage_mod="derp_storage", 27 | n=1, 28 | r=1, 29 | w=1, 30 | q=6}, 31 | Merged = merge_configs(Remote, Local), 32 | ?assertEqual(#config{ 33 | blocksize=20, 34 | text_port=11222, 35 | thrift_port=9200, 36 | web_port=8080, 37 | directory="/Users/cliff/data/tmp", 38 | storage_mod="dets_storage", 39 | n=3, 40 | r=3, 41 | w=3, 42 | q=4}, Merged). -------------------------------------------------------------------------------- /etest/dmtree_test.erl: -------------------------------------------------------------------------------- 1 | -include_lib("eunit/include/eunit.hrl"). 2 | 3 | deserialize_node_test() -> 4 | NodeBin = <<0:8, 2:32, 5 | 1:32, 2:32, 0:32, 0:32, 6 | 3:32, 4:64, 5:32, 6:64, 7:32, 8:64, 0:32, 0:64, 0:32, 0:64>>, 7 | Node = deserialize(NodeBin, 4, 20), 8 | #node{m=2,children=Children,keys=Keys,offset=20} = Node, 9 | [{3, 4}, {5, 6}, {7, 8}] = Children, 10 | [1, 2] = Keys. 11 | 12 | deserialize_leaf_test() -> 13 | LeafBin = <<1:8, 2:32, 14 | 1:32, 2:64, 3:32, 15 | 4:32, 5:64, 6:32, 16 | 0:352>>, 17 | Leaf = deserialize(LeafBin, 4, 20), 18 | #leaf{m=2,values=Values,offset=20} = Leaf, 19 | [{1, 2, 3}, {4, 5, 6}] = Values. 20 | 21 | serialize_node_test() -> 22 | Bin = serialize(#node{ 23 | m=2, 24 | keys=[1, 2], 25 | children=[{3, 4}, {5, 6}, {7, 8}] 26 | }, 81), 27 | error_logger:info_msg("node bin ~p~n", [Bin]), 28 | Bin = <<0:8, 2:32, 29 | 1:32, 2:32, 0:32, 0:32, 30 | 3:32, 4:64, 5:32, 6:64, 7:32, 8:64, 0:192>>. 31 | 32 | serialize_leaf_test() -> 33 | Bin = serialize(#leaf{ 34 | m=2, 35 | values=[{1, 2, 3}, {4, 5, 6}, {7, 8, 9}] 36 | }, 81), 37 | error_logger:info_msg("leaf bin ~p~n", [Bin]), 38 | Bin = <<1:8, 2:32, 39 | 1:32, 2:64, 3:32, 40 | 4:32, 5:64, 6:32, 41 | 7:32, 8:64, 9:32, 42 | 0:224>>. 43 | 44 | node_round_trip_test() -> 45 | Node = #node{ 46 | m=2, 47 | keys=[1, 2], 48 | children=[{4, 5}, {6, 7}, {8, 9}], 49 | offset = 0 50 | }, 51 | Node = deserialize(serialize(Node, 81), 4, 0). 52 | 53 | leaf_round_trip_test() -> 54 | Leaf = #leaf{ 55 | m=2, 56 | values=[{1, 2, 3}, {4, 5, 6}], 57 | offset=0 58 | }, 59 | Leaf = deserialize(serialize(Leaf, 81), 4, 0). 60 | 61 | pointers_for_blocksize_test() -> 62 | ?assertEqual(5, ?pointers_from_blocksize(256)), 63 | ?assertEqual(1, ?pointers_from_blocksize(16)). 64 | 65 | pointer_for_size_test() -> 66 | ?assertEqual(1, ?pointer_for_size(14, 4096)). 67 | 68 | size_for_pointer_test() -> 69 | ?assertEqual(16, ?size_for_pointer(1)), 70 | ?assertEqual(256, ?size_for_pointer(5)). 71 | 72 | open_and_reopen_test() -> 73 | {ok, Pid} = dmtree:start_link(data_file(), 4096), 74 | Root = root(Pid), 75 | State = state(Pid), 76 | dmtree:stop(Pid), 77 | {ok, P2} = dmtree:start_link(data_file(), 4096), 78 | ?assertEqual(Root, root(P2)), 79 | S2 = state(P2), 80 | ?assertEqual(State#dmtree.kfpointers, S2#dmtree.kfpointers), 81 | dmtree:stop(Pid). 82 | 83 | adjacent_blocks_test() -> 84 | {ok, Pid} = dmtree:start_link(fixture("dm_adjacentblocks.idx"), 4096), 85 | dmtree:delete_key(4741, "afknf", Pid), 86 | dmtree:stop(Pid). 87 | 88 | fixture_dir() -> 89 | filename:join(t:config(test_dir), "fixtures"). 90 | 91 | fixture(Name) -> % need to copy the fixture for repeatability 92 | file:copy(filename:join(fixture_dir(), Name), data_file(Name)), 93 | data_file(Name). 94 | 95 | priv_dir() -> 96 | Dir = filename:join([t:config(priv_dir), "data", "dmtree"]), 97 | filelib:ensure_dir(Dir ++ "/"), 98 | Dir. 99 | 100 | data_file(Name) -> 101 | filename:join(priv_dir(), Name). 102 | 103 | data_file() -> 104 | filename:join(priv_dir(), "dmtree"). -------------------------------------------------------------------------------- /etest/fixtures/dm_adjacentblocks.idx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/moonpolysoft/dynomite/a5618dcbe17b16cefdc9c567f27a1f4445aee005/etest/fixtures/dm_adjacentblocks.idx -------------------------------------------------------------------------------- /etest/lib_misc_test.erl: -------------------------------------------------------------------------------- 1 | -include_lib("eunit/include/eunit.hrl"). 2 | 3 | pmap_test() -> 4 | L = [0, 1, 2], 5 | ?assertEqual([0,1], pmap(fun(N) -> 6 | timer:sleep(N), 7 | N 8 | end, L, 2)). 9 | 10 | pmap_1_test() -> 11 | L = [0], 12 | ?assertEqual([0], pmap(fun(N) -> 13 | N 14 | end, L, 1)). 15 | 16 | reverse_bits_test() -> 17 | 3869426816 = reverse_bits(19088743), 18 | 1458223569 = reverse_bits(2342344554). 19 | 20 | nthdelete_test() -> 21 | A = [1,2,3,4,5], 22 | ?assertEqual([1,2,3,4,5], nthdelete(0, A)), 23 | ?assertEqual([1,2,3,4,5], nthdelete(6, A)), 24 | ?assertEqual([2,3,4,5], nthdelete(1, A)), 25 | ?assertEqual([1,2,4,5], nthdelete(3, A)). 26 | 27 | zero_split_test() -> 28 | ?assertEqual({<<"">>, <<0,"abcdefg">>}, zero_split(<<0, "abcdefg">>)), 29 | ?assertEqual({<<"abd">>, <<0, "efg">>}, zero_split(<<"abd", 0, "efg">>)), 30 | ?assertEqual({<<"abcdefg">>, <<0>>}, zero_split(<<"abcdefg",0>>)), 31 | ?assertEqual(<<"abcdefg">>, zero_split(<<"abcdefg">>)). 32 | 33 | 34 | % hash_throughput_test_() -> 35 | % {timeout, 120, [{?LINE, fun() -> 36 | % Keys = lists:map(fun(N) -> 37 | % lists:duplicate(1000, random:uniform(255)) 38 | % end, lists:seq(1,1000)), 39 | % FNVStart = now_float(), 40 | % lists:foreach(fun(Key) -> 41 | % fnv(Key) 42 | % end, Keys), 43 | % FNVEnd = now_float(), 44 | % ?debugFmt("fnv took ~ps~n", [FNVEnd - FNVStart]), 45 | % MStart = now_float(), 46 | % lists:foreach(fun(Key) -> 47 | % hash(Key) 48 | % end, Keys), 49 | % MEnd = now_float(), 50 | % ?debugFmt("murmur took ~ps~n", [MEnd - MStart]), 51 | % FNVNStart = now_float(), 52 | % lists:foreach(fun(Key) -> 53 | % fnv:hash(Key) 54 | % end, Keys), 55 | % FNVNEnd = now_float(), 56 | % ?debugFmt("fnv native took ~ps~n", [FNVNEnd - FNVNStart]) 57 | % end}]}. 58 | % 59 | fnv_native_compat_test() -> 60 | ?assertEqual(fnv("blah"), fnv:hash("blah")), 61 | ?assertEqual(fnv(<<"blah">>), fnv:hash(<<"blah">>)), 62 | ?assertEqual(fnv([<<"blah">>, "bleg"]), fnv:hash([<<"blah">>, "bleg"])). 63 | 64 | shuffle_test() -> 65 | %we can really only test that they aren't equals, which won't even always work, weak 66 | A = [a, b, c, d, e, f, g], 67 | B = shuffle(A), 68 | ?debugFmt("shuffled: ~p", [B]), 69 | ?assertEqual(7, length(B)), 70 | ?assert(A =/= B). 71 | 72 | rm_rf_test() -> 73 | lists:foldl(fun(N, Dir) -> 74 | NewDir = filename:join(Dir, N), 75 | File = filename:join(NewDir, "file"), 76 | filelib:ensure_dir(File), 77 | file:write_file(File, "blahblah"), 78 | NewDir 79 | end, priv_dir(), ["a", "b", "c", "d", "e"]), 80 | rm_rf(filename:join(priv_dir(), "a")), 81 | ?assertEqual({ok, []}, file:list_dir(priv_dir())). 82 | 83 | priv_dir() -> 84 | Dir = filename:join([t:config(priv_dir), "lib_misc"]), 85 | filelib:ensure_dir(filename:join([Dir, "lib_misc"])), 86 | Dir. -------------------------------------------------------------------------------- /etest/mediator_test.erl: -------------------------------------------------------------------------------- 1 | -include_lib("eunit/include/eunit.hrl"). 2 | 3 | 4 | init_integrated(Good, Bad, Config) -> 5 | process_flag(trap_exit, true), 6 | {ok, _} = configuration:start_link(Config), 7 | GoodServers = start_storage_servers(dict_storage, good_store, Good, []), 8 | BadServers = start_storage_servers(fail_storage, bad_store, Bad, []), 9 | {ok, MockMem} = mock_genserver:start_link({local, membership}), 10 | mock_genserver:expects_call(MockMem, {servers_for_key, unbound}, fun(_, _) -> GoodServers ++ BadServers end). 11 | 12 | stop_integrated(Good, Bad) -> 13 | ok = stop_storage_servers(bad_store, Bad), 14 | ok = stop_storage_servers(good_store, Good), 15 | configuration:stop(), 16 | receive {'EXIT', Pid, Val} -> ok end, 17 | mock_genserver:stop(membership). 18 | 19 | start_storage_servers(_Module, _Name, 0, Servers) -> Servers; 20 | start_storage_servers(Module, Name, N, Servers) -> 21 | {ok, Pid} = storage_server:start_link(Module, ok, list_to_atom(lists:concat([Name, N])), 0, 0, undefined), 22 | start_storage_servers(Module, Name, N-1, [Pid|Servers]). 23 | 24 | stop_storage_servers(_Name, 0) -> ok; 25 | stop_storage_servers(Name, N) -> 26 | storage_server:close(list_to_atom(lists:concat([Name, N]))), 27 | receive {'EXIT', Pid, Val} -> ok end, 28 | stop_storage_servers(Name, N-1). 29 | 30 | all_servers_working_test() -> 31 | init_integrated(3, 0, #config{n=3,r=2,w=2,q=1}), 32 | % we get back 2 because we bank out before the 3rd can come back 33 | ?assertEqual({ok, 2}, mediator:put(<<"key1">>, [], <<"value1">>)), 34 | {ok, {_, [<<"value1">>]}} = mediator:get(<<"key1">>), 35 | ?assertEqual({ok, {true, 2}}, mediator:has_key(<<"key1">>)), 36 | ?assertEqual({ok, 2}, mediator:delete(<<"key1">>)), 37 | ?assertEqual({ok, {false, 2}}, mediator:has_key(<<"key1">>)), 38 | ?assertEqual({ok, not_found}, mediator:get(<<"key1">>)), 39 | stop_integrated(3, 0). 40 | 41 | one_bad_server_test() -> 42 | init_integrated(2, 1, #config{n=3,r=2,w=2,q=1}), 43 | ?assertEqual({ok, 2}, mediator:put(<<"key1">>, [], <<"value1">>)), 44 | {ok, {_Context, [<<"value1">>]}} = mediator:get(<<"key1">>), 45 | ?assertEqual({ok, {true, 2}}, mediator:has_key(<<"key1">>)), 46 | ?assertEqual({ok, 2}, mediator:delete(<<"key1">>)), 47 | ?assertEqual({ok, {false, 2}}, mediator:has_key(<<"key1">>)), 48 | ?assertEqual({ok, not_found}, mediator:get(<<"key1">>)), 49 | stop_integrated(2, 1). 50 | 51 | two_bad_servers_test() -> 52 | init_integrated(1, 2, #config{n=3,r=2,w=2,q=1}), 53 | {failure, _} = mediator:put(<<"key1">>, [], <<"value1">>), 54 | {failure, _} = mediator:get(<<"key1">>), 55 | {failure, _} = mediator:delete(<<"key1">>), 56 | {failure, _} = mediator:has_key(<<"key1">>), 57 | stop_integrated(1, 2). 58 | 59 | three_bad_servers_test() -> 60 | init_integrated(0, 3, #config{n=3,r=2,w=2,q=1}), 61 | {failure, _} = mediator:put(<<"key1">>, [], <<"value1">>), 62 | {failure, _} = mediator:get(<<"key1">>), 63 | {failure, _} = mediator:delete(<<"key1">>), 64 | {failure, _} = mediator:has_key(<<"key1">>), 65 | stop_integrated(0, 3). -------------------------------------------------------------------------------- /etest/rate_test.erl: -------------------------------------------------------------------------------- 1 | -include_lib("eunit/include/eunit.hrl"). 2 | 3 | basic_rate_test() -> 4 | {ok, Pid} = rate:start_link(100), 5 | ?assertEqual(0.0, rate:get_rate(Pid, 100)), 6 | rate:add_datapoint(Pid, 100, now()), 7 | ?assertEqual(100.0, rate:get_rate(Pid, 100)), 8 | rate:close(Pid). 9 | 10 | limiting_test() -> 11 | {ok, Pid} = rate:start_link(3000), 12 | lists:foreach(fun(N) -> 13 | rate:add_datapoint(Pid, 1, now()) 14 | end, lists:seq(1, 350)), 15 | ?assertEqual(1, length(rate:get_datapoints(Pid))), 16 | ?assertEqual(350.0, rate:get_rate(Pid, 3000)), 17 | rate:close(Pid). 18 | 19 | time_limiting_test_() -> 20 | {timeout, 120, ?_test(test_time_limiting())}. 21 | 22 | test_time_limiting() -> 23 | {ok, Pid} = rate:start_link(1), 24 | lists:foreach(fun(N) -> 25 | rate:add_datapoint(Pid, 1, now()) 26 | end, lists:seq(1, 50)), 27 | ?assertEqual(1, length(rate:get_datapoints(Pid))), 28 | timer:sleep(2000), 29 | lists:foreach(fun(N) -> 30 | rate:add_datapoint(Pid, 1, now()) 31 | end, lists:seq(1, 50)), 32 | ?assertEqual(1, length(rate:get_datapoints(Pid))), 33 | ?assertEqual(50.0, rate:get_rate(Pid, 1)), 34 | rate:close(Pid). 35 | 36 | queue_test() -> 37 | Q = {[],[{324,1232143984}]}, 38 | NQ = update(1,1232143985, Q), 39 | ?assertEqual({[{324,1232143984}],[{1,1232143985}]}, NQ). -------------------------------------------------------------------------------- /etest/storage_manager_test.erl: -------------------------------------------------------------------------------- 1 | -include_lib("eunit/include/eunit.hrl"). 2 | 3 | all_test_() -> 4 | {foreach, 5 | fun() -> test_setup() end, 6 | fun(V) -> test_teardown(V) end, 7 | [ 8 | {"test_initial_load", ?_test(test_initial_load())}, 9 | {"test_reload_same_layout", ?_test(test_reload_same_layout())}, 10 | {"test_loadout_change", ?_test(test_loadout_change())}, 11 | {"test_loadout_change_with_bootstrap", ?_test(test_loadout_change_with_bootstrap())}, 12 | {"test_unload_servers", ?_test(test_unload_servers())} 13 | ]}. 14 | 15 | test_initial_load() -> 16 | Partitions = partitions:create_partitions(1, node(), [node()]), 17 | expect_start_servers([storage_1, storage_2147483649]), 18 | storage_manager:load(node(), Partitions, lists:map(fun({_,P}) -> P end, Partitions)), 19 | verify(). 20 | 21 | test_reload_same_layout() -> 22 | Partitions = partitions:create_partitions(1, node(), [node()]), 23 | expect_start_servers([storage_1, storage_2147483649]), 24 | storage_manager:load(node(), Partitions, lists:map(fun({_,P}) -> P end, Partitions)), 25 | % should not trigger any reload behavior 26 | storage_manager:load(node(), Partitions, lists:map(fun({_,P}) -> P end, Partitions)), 27 | verify(). 28 | 29 | test_loadout_change() -> 30 | Partitions = partitions:create_partitions(0, a, [a]), 31 | P2 = partitions:create_partitions(1, a, [a]), 32 | expect_start_servers([storage_1]), 33 | storage_manager:load(a, Partitions, [1]), 34 | verify(), 35 | expect_start_servers([storage_2147483649]), 36 | storage_manager:load(a, P2, [1, 2147483649]), 37 | verify(). 38 | 39 | test_loadout_change_with_bootstrap() -> 40 | P1 = partitions:create_partitions(1, a, [a, b]), 41 | P2 = partitions:create_partitions(1, a, [a]), 42 | expect_start_servers([storage_1,storage_2147483649]), 43 | mock:expects(bootstrap, start, fun({_, Node, _}) -> Node == b end, fun({_, _, CB}, _) -> CB() end), 44 | storage_manager:load(a, P1, [1]), 45 | storage_manager:load(b, P2, [1, 2147483649]), 46 | verify(). 47 | 48 | test_unload_servers() -> 49 | P1 = partitions:create_partitions(1, a, [a]), 50 | P2 = partitions:create_partitions(1, a, [a, b]), 51 | expect_start_servers([storage_1,storage_2147483649]), 52 | expect_stop_servers([storage_2147483649]), 53 | storage_manager:load(b, P1, [1, 2147483649]), 54 | storage_manager:load(a, P2, [1]), 55 | verify(). 56 | 57 | test_setup() -> 58 | configuration:start_link(#config{n=0,r=1,w=1,q=6,directory=priv_dir()}), 59 | {ok, _} = mock:mock(supervisor), 60 | {ok, _} = mock:mock(bootstrap), 61 | {ok, _} = storage_manager:start_link(). 62 | 63 | verify() -> 64 | ok = mock:verify(supervisor), 65 | ok = mock:verify(bootstrap). 66 | 67 | test_teardown(_) -> 68 | storage_manager:stop(), 69 | mock:stop(supervisor), 70 | mock:stop(bootstrap). 71 | 72 | priv_dir() -> 73 | Dir = filename:join([t:config(priv_dir), "data", "storage_manager"]), 74 | filelib:ensure_dir(filename:join(Dir, "storage_manager")), 75 | Dir. 76 | 77 | expect_start_servers([]) -> ok; 78 | 79 | expect_start_servers([Part|Parts]) -> 80 | mock:expects(supervisor, start_child, fun({storage_server_sup, Spec}) -> 81 | element(1, Spec) == Part 82 | end, ok), 83 | expect_start_servers(Parts). 84 | 85 | expect_stop_servers([]) -> ok; 86 | 87 | expect_stop_servers([Part|Parts]) -> 88 | mock:expects(supervisor, terminate_child, fun({storage_server_sup, Name}) -> 89 | Name == Part 90 | end, ok), 91 | mock:expects(supervisor, delete_child, fun({storage_server_sup, Name}) -> 92 | Name == Part 93 | end, ok), 94 | expect_stop_servers(Parts). -------------------------------------------------------------------------------- /etest/stream_test.erl: -------------------------------------------------------------------------------- 1 | -include_lib("eunit/include/eunit.hrl"). 2 | 3 | -define(MEG, 1048576). 4 | 5 | simple_streaming_test() -> 6 | Bits = ?MEG*8, 7 | Bin = <<0:Bits>>, 8 | ?MEG = byte_size(Bin), 9 | Ref = make_ref(), 10 | Parent = self(), 11 | process_flag(trap_exit, true), 12 | Receiver = spawn_link(fun() -> 13 | receive 14 | {Ref, Sender} -> 15 | Results = recv(Sender, Ref, 1000), 16 | Parent ! {Ref, Results} 17 | end 18 | end), 19 | Sender = spawn_link(fun() -> 20 | send(Receiver, Ref, {context, [Bin]}) 21 | end), 22 | Receiver ! {Ref, Sender}, 23 | {ok, {context, [Bin]}} = receive 24 | {Ref, Results} -> Results 25 | end, 26 | receive 27 | {'EXIT', Receiver, _} -> ok 28 | end, 29 | receive 30 | {'EXIT', Sender, _} -> ok 31 | end. -------------------------------------------------------------------------------- /etest/sync_manager_test.erl: -------------------------------------------------------------------------------- 1 | -include_lib("eunit/include/eunit.hrl"). 2 | 3 | all_test_() -> 4 | {foreach, 5 | fun() -> test_setup() end, 6 | fun(V) -> test_teardown(V) end, 7 | [ 8 | {"test_initial_load", ?_test(test_initial_load())}, 9 | {"test_reload_same_layout", ?_test(test_reload_same_layout())}, 10 | {"test_loadout_change", ?_test(test_loadout_change())}, 11 | {"test_unload_servers", ?_test(test_unload_servers())} 12 | ]}. 13 | 14 | test_initial_load() -> 15 | P = partitions:create_partitions(1, a, [a]), 16 | expect_start_servers([sync_1, sync_2147483649]), 17 | sync_manager:load(a, P, p_for_n(a, P)), 18 | verify(). 19 | 20 | test_reload_same_layout() -> 21 | P = partitions:create_partitions(1, a, [a]), 22 | expect_start_servers([sync_1, sync_2147483649]), 23 | sync_manager:load(a, P, p_for_n(a, P)), 24 | sync_manager:load(a, P, p_for_n(a, P)), 25 | verify(). 26 | 27 | test_loadout_change() -> 28 | P1 = partitions:create_partitions(0, a, [a]), 29 | P2 = partitions:create_partitions(1, a, [a]), 30 | expect_start_servers([sync_1]), 31 | sync_manager:load(a, P1, p_for_n(a, P1)), 32 | verify(), 33 | expect_start_servers([sync_2147483649]), 34 | sync_manager:load(a, P2, p_for_n(a, P2)), 35 | verify(). 36 | 37 | test_unload_servers() -> 38 | P1 = partitions:create_partitions(1, a, [a]), 39 | P2 = partitions:create_partitions(1, a, [a, b]), 40 | ?debugFmt("p1 ~p", [P1]), 41 | ?debugFmt("p2 ~p", [P2]), 42 | expect_start_servers([sync_1,sync_2147483649]), 43 | expect_stop_servers([sync_1,sync_2147483649]), 44 | sync_manager:load(b, P1, p_for_n(a, P1)), 45 | sync_manager:load(a, P2, p_for_n(a, P2)), 46 | verify(). 47 | 48 | test_setup() -> 49 | {ok, _} = sync_manager:start_link(), 50 | {ok, _} = mock:mock(supervisor). 51 | 52 | verify() -> 53 | ok = mock:verify(supervisor). 54 | 55 | test_teardown(_) -> 56 | sync_manager:stop(), 57 | mock:stop(supervisor). 58 | 59 | p_for_n(N, P) -> 60 | lists:map(fun({_,P}) -> P end, lists:filter(fun({A,_}) -> A == N end, P)). 61 | 62 | expect_start_servers([]) -> ok; 63 | 64 | expect_start_servers([Part|Parts]) -> 65 | mock:expects(supervisor, start_child, fun({sync_server_sup, Spec}) -> 66 | element(1, Spec) == Part 67 | end, ok), 68 | expect_start_servers(Parts). 69 | 70 | expect_stop_servers([]) -> ok; 71 | 72 | expect_stop_servers([Part|Parts]) -> 73 | mock:expects(supervisor, terminate_child, fun({sync_server_sup, Name}) -> 74 | Name == Part 75 | end, ok), 76 | mock:expects(supervisor, delete_child, fun({sync_server_sup, Name}) -> 77 | Name == Part 78 | end, ok), 79 | expect_stop_servers(Parts). -------------------------------------------------------------------------------- /etest/t.erl: -------------------------------------------------------------------------------- 1 | -module(t). 2 | 3 | -include_lib("eunit/include/eunit.hrl"). 4 | -export([start/0, config/1]). 5 | 6 | start() -> 7 | eunit:test(config(src_dir)). 8 | 9 | 10 | config(src_dir) -> 11 | Root = filename:dirname(config(test_dir)), 12 | filename:absname(filename:join([Root, "elibs"])); 13 | config(test_dir) -> 14 | filename:dirname(?FILE); 15 | config(priv_dir) -> 16 | case init:get_argument(priv_dir) of 17 | {ok, [[Dir]]} -> 18 | Dir; 19 | Other -> 20 | ?debugFmt("priv_dir argument result: ~p", [Other]), 21 | Root = config(test_dir), 22 | filename:absname( 23 | filename:join([Root, "log", atom_to_list(node())])) 24 | end. 25 | 26 | -------------------------------------------------------------------------------- /etest/vector_clock_test.erl: -------------------------------------------------------------------------------- 1 | -include_lib("eunit/include/eunit.hrl"). 2 | 3 | increment_clock_test() -> 4 | Clock = create(a), 5 | Clock2 = increment(b, Clock), 6 | true = less_than(Clock, Clock2), 7 | Clock3 = increment(a, Clock2), 8 | true = less_than(Clock2, Clock3), 9 | Clock4 = increment(b, Clock3), 10 | true = less_than(Clock3, Clock4), 11 | Clock5 = increment(c, Clock4), 12 | true = less_than(Clock4, Clock5), 13 | Clock6 = increment(b, Clock5), 14 | true = less_than(Clock5, Clock6). 15 | 16 | less_than_concurrent_test() -> 17 | ClockA = [{b, 1}], 18 | ClockB = [{a, 1}], 19 | false = less_than(ClockA, ClockB), 20 | false = less_than(ClockB, ClockA). 21 | 22 | less_than_causal_test() -> 23 | ClockA = [{a,2}, {b,4}, {c,1}], 24 | ClockB = [{c,1}], 25 | true = less_than(ClockB, ClockA), 26 | false = less_than(ClockA, ClockB). 27 | 28 | less_than_causal_2_test() -> 29 | ClockA = [{a,2}, {b,4}, {c,1}], 30 | ClockB = [{a,3}, {b,4}, {c,1}], 31 | true = less_than(ClockA, ClockB), 32 | false = less_than(ClockB, ClockA). 33 | 34 | mixed_up_ordering_test() -> 35 | ClockA = [{b,4}, {a,2}], 36 | ClockB = [{a,1}, {b,3}], 37 | true = less_than(ClockB, ClockA), 38 | false = less_than(ClockA, ClockB). 39 | 40 | equivalence_test() -> 41 | ClockA = [{b,4}, {a,2}], 42 | ClockB = [{a,2}, {b,4}], 43 | false = less_than(ClockA, ClockA), 44 | false = less_than(ClockA, ClockB), 45 | false = less_than(ClockB, ClockA), 46 | true = equals(ClockA, ClockA), 47 | true = equals(ClockB, ClockA), 48 | true = equals(ClockA, ClockB), 49 | false = equals(ClockA, [{a,3}, {b,3}]). 50 | 51 | concurrent_test() -> 52 | ClockA = [{a,1}], 53 | ClockB = [{b,1}], 54 | true = concurrent(ClockA, ClockB). 55 | 56 | simple_merge_test() -> 57 | ClockA = [{a,1}], 58 | ClockB = [{b,1}], 59 | [{a,1},{b,1}] = merge(ClockA,ClockB). 60 | 61 | overlap_equals_merge_test() -> 62 | ClockA = [{a,3},{b,4}], 63 | ClockB = [{a,3},{c,1}], 64 | [{a,3},{b,4},{c,1}] = merge(ClockA, ClockB). 65 | 66 | overlap_unequal_merge_test() -> 67 | ClockA = [{a,3},{b,4}], 68 | ClockB = [{a,4},{c,5}], 69 | [{a,4},{b,4},{c,5}] = merge(ClockA, ClockB). 70 | 71 | resolve_notfound_test() -> 72 | ClockVals = {[{a,1}, {b, 2}], ["a", "b"]}, 73 | ClockVals = resolve(not_found, ClockVals), 74 | ClockVals = resolve(ClockVals, not_found). 75 | 76 | clock_truncation_test() -> 77 | Clock = [{a,1},{b,2},{c,3},{d,4},{e,5},{f,6},{g,7},{h,8},{i,9},{j,10},{k,11}], 78 | Clock1 = truncate(Clock), 79 | ?assertEqual(10, length(Clock1)), 80 | ?assertEqual(false, lists:any(fun(E) -> E =:= {a, 1} end, Clock1)). 81 | -------------------------------------------------------------------------------- /gen-erl/dynomite_constants.hrl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Autogenerated by Thrift 3 | %% 4 | %% DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | %% 6 | 7 | 8 | -include("dynomite_types.hrl"). 9 | 10 | -------------------------------------------------------------------------------- /gen-erl/dynomite_thrift.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Autogenerated by Thrift 3 | %% 4 | %% DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | %% 6 | 7 | -module(dynomite_thrift). 8 | -behaviour(thrift_service). 9 | 10 | 11 | -include("dynomite_thrift.hrl"). 12 | 13 | -export([struct_info/1, function_info/2]). 14 | 15 | struct_info('i am a dummy struct') -> undefined. 16 | %%% interface 17 | % get(This, Key) 18 | function_info('get', params_type) -> 19 | {struct, [{1, string}]} 20 | ; 21 | function_info('get', reply_type) -> 22 | {struct, [{1, string}, 23 | {2, {list, string}}]} 24 | ; 25 | function_info('get', exceptions) -> 26 | {struct, [{1, {struct, {'dynomite_types', 'failureException'}}}]} 27 | ; 28 | % put(This, Key, Context, Data) 29 | function_info('put', params_type) -> 30 | {struct, [{1, string}, 31 | {2, string}, 32 | {3, string}]} 33 | ; 34 | function_info('put', reply_type) -> 35 | i32; 36 | function_info('put', exceptions) -> 37 | {struct, [{1, {struct, {'dynomite_types', 'failureException'}}}]} 38 | ; 39 | % has(This, Key) 40 | function_info('has', params_type) -> 41 | {struct, [{1, string}]} 42 | ; 43 | function_info('has', reply_type) -> 44 | i32; 45 | function_info('has', exceptions) -> 46 | {struct, [{1, {struct, {'dynomite_types', 'failureException'}}}]} 47 | ; 48 | % remove(This, Key) 49 | function_info('remove', params_type) -> 50 | {struct, [{1, string}]} 51 | ; 52 | function_info('remove', reply_type) -> 53 | i32; 54 | function_info('remove', exceptions) -> 55 | {struct, [{1, {struct, {'dynomite_types', 'failureException'}}}]} 56 | ; 57 | function_info(xxx, dummy) -> dummy. 58 | 59 | -------------------------------------------------------------------------------- /gen-erl/dynomite_thrift.hrl: -------------------------------------------------------------------------------- 1 | -ifndef(_dynomite_included). 2 | -define(_dynomite_included, yeah). 3 | -include("dynomite_types.hrl"). 4 | 5 | -endif. 6 | -------------------------------------------------------------------------------- /gen-erl/dynomite_types.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Autogenerated by Thrift 3 | %% 4 | %% DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | %% 6 | 7 | -module(dynomite_types). 8 | 9 | -include("dynomite_types.hrl"). 10 | 11 | -export([struct_info/1]). 12 | %% struct getResult 13 | 14 | % -record(getResult, {context, results}). 15 | 16 | struct_info('getResult') -> 17 | {struct, [{1, string}, 18 | {2, {list, string}}]} 19 | ; 20 | 21 | %% struct failureException 22 | 23 | % -record(failureException, {message}). 24 | 25 | struct_info('failureException') -> 26 | {struct, [{1, string}]} 27 | ; 28 | 29 | struct_info('i am a dummy struct') -> undefined. 30 | -------------------------------------------------------------------------------- /gen-erl/dynomite_types.hrl: -------------------------------------------------------------------------------- 1 | -ifndef(_dynomite_types_included). 2 | -define(_dynomite_types_included, yeah). 3 | 4 | -record(getResult, {context, results}). 5 | 6 | -record(failureException, {message}). 7 | 8 | -endif. 9 | -------------------------------------------------------------------------------- /gen-rb/dynomite_constants.rb: -------------------------------------------------------------------------------- 1 | # 2 | # Autogenerated by Thrift 3 | # 4 | # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | # 6 | 7 | require 'thrift/protocol' 8 | require File.dirname(__FILE__) + '/dynomite_types' 9 | 10 | -------------------------------------------------------------------------------- /gen-rb/dynomite_types.rb: -------------------------------------------------------------------------------- 1 | # 2 | # Autogenerated by Thrift 3 | # 4 | # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | # 6 | 7 | require 'thrift/protocol' 8 | 9 | class GetResult 10 | include ::Thrift::Struct 11 | ::Thrift::Struct.field_accessor self, :context, :results 12 | FIELDS = { 13 | 1 => {:type => ::Thrift::Types::STRING, :name => 'context'}, 14 | 2 => {:type => ::Thrift::Types::LIST, :name => 'results', :element => {:type => ::Thrift::Types::STRING}} 15 | } 16 | end 17 | 18 | class FailureException < StandardError 19 | include ::Thrift::Struct 20 | def initialize(message=nil) 21 | super() 22 | self.message = message 23 | end 24 | 25 | ::Thrift::Struct.field_accessor self, :message 26 | FIELDS = { 27 | 1 => {:type => ::Thrift::Types::STRING, :name => 'message'} 28 | } 29 | end 30 | 31 | -------------------------------------------------------------------------------- /if/dynomite.thrift: -------------------------------------------------------------------------------- 1 | typedef binary ContextData 2 | typedef string Data 3 | 4 | struct GetResult { 5 | 1:ContextData context 6 | 2:list results 7 | } 8 | 9 | exception FailureException { 10 | 1:string message 11 | } 12 | 13 | 14 | service Dynomite { 15 | GetResult get(1:Data key) throws (1:FailureException fail) 16 | 17 | /** 18 | * Store a piece of data 19 | * 20 | * @return number of servers stored 21 | */ 22 | i32 put(1:Data key, 2:ContextData context, 3:Data data) throws (1:FailureException fail) 23 | 24 | 25 | /** 26 | * @return number of servers that have this key 27 | */ 28 | i32 has(1:Data key) throws (1:FailureException fail) 29 | 30 | /** 31 | * @return the number of servers deleted from 32 | */ 33 | i32 remove(1:Data key) throws (1:FailureException fail) 34 | } -------------------------------------------------------------------------------- /include/chunk_size.hrl: -------------------------------------------------------------------------------- 1 | -define(CHUNK_SIZE, 5120). -------------------------------------------------------------------------------- /include/common.hrl: -------------------------------------------------------------------------------- 1 | 2 | -define(fmt(Msg, Args), lists:flatten(io_lib:format(Msg, Args))). 3 | -define(infoFmt(Msg, Args), error_logger:info_msg(Msg, Args)). 4 | -define(infoMsg(Msg), error_logger:info_msg(Msg)). -------------------------------------------------------------------------------- /include/config.hrl: -------------------------------------------------------------------------------- 1 | 2 | -ifndef(CONFIG_HRL). 3 | -define(CONFIG_HRL, true). 4 | %we don't want to turn protocol buffers on by default, since the library is not included 5 | %it should be very easy for new users to start up an instance 6 | -record(config, {n=3, r=1, w=1, q=6, directory, web_port, text_port=11222, storage_mod=dets_storage, blocksize=4096, thrift_port=9200, pb_port=undefined, buffered_writes=undefined, cache=undefined, cache_size=1048576}). 7 | 8 | -endif. 9 | -------------------------------------------------------------------------------- /include/couch_db.hrl: -------------------------------------------------------------------------------- 1 | % Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | % use this file except in compliance with the License. You may obtain a copy of 3 | % the License at 4 | % 5 | % http://www.apache.org/licenses/LICENSE-2.0 6 | % 7 | % Unless required by applicable law or agreed to in writing, software 8 | % distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | % WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | % License for the specific language governing permissions and limitations under 11 | % the License. 12 | 13 | -define(LOCAL_DOC_PREFIX, "_local/"). 14 | -define(DESIGN_DOC_PREFIX0, "_design"). 15 | -define(DESIGN_DOC_PREFIX, "_design/"). 16 | 17 | -define(JSON_ENCODE(V), mochijson2:encode(V)). 18 | -define(JSON_DECODE(V), mochijson2:decode(V)). 19 | 20 | -define(b2l(V), binary_to_list(V)). 21 | -define(l2b(V), list_to_binary(V)). 22 | 23 | -define(DEFAULT_ATTACHMENT_CONTENT_TYPE, <<"application/octet-stream">>). 24 | 25 | -define(LOG_DEBUG(Format, Args), 26 | case couch_log:debug_on() of 27 | true -> error_logger:info_report(couch_debug, {Format, Args}); 28 | false -> ok 29 | end). 30 | 31 | -define(LOG_INFO(Format, Args), 32 | case couch_log:info_on() of 33 | true -> error_logger:info_report(couch_info, {Format, Args}); 34 | false -> ok 35 | end). 36 | 37 | -define(LOG_ERROR(Format, Args), 38 | error_logger:info_report(couch_error, {Format, Args})). 39 | 40 | -record(doc_info, 41 | { 42 | id = <<"">>, 43 | rev = <<"">>, 44 | update_seq = 0, 45 | summary_pointer = nil, 46 | conflict_revs = [], 47 | deleted_conflict_revs = [], 48 | deleted = false 49 | }). 50 | 51 | -record(full_doc_info, 52 | {id = <<"">>, 53 | update_seq = 0, 54 | deleted = false, 55 | rev_tree = [] 56 | }). 57 | 58 | -record(httpd, 59 | {mochi_req, 60 | method, 61 | path_parts, 62 | db_url_handlers 63 | }). 64 | 65 | 66 | -record(doc, 67 | { 68 | id = <<"">>, 69 | revs = [], 70 | 71 | % the json body object. 72 | body = {[]}, 73 | 74 | % each attachment contains: 75 | % {data, Type, <>} 76 | % or: 77 | % {pointer, Type, {FileHandle, StreamPointer, Length}} 78 | attachments = [], 79 | 80 | deleted = false, 81 | 82 | % key/value tuple of meta information, provided when using special options: 83 | % couch_db:open_doc(Db, Id, Options). 84 | meta = [] 85 | }). 86 | 87 | 88 | 89 | 90 | 91 | -record(db_header, 92 | {write_version = 0, 93 | update_seq = 0, 94 | summary_stream_state = nil, 95 | fulldocinfo_by_id_btree_state = nil, 96 | docinfo_by_seq_btree_state = nil, 97 | local_docs_btree_state = nil, 98 | purge_seq = 0, 99 | purged_docs = nil 100 | }). 101 | 102 | -record(db, 103 | {main_pid=nil, 104 | update_pid=nil, 105 | compactor_pid=nil, 106 | fd, 107 | header = #db_header{}, 108 | summary_stream, 109 | fulldocinfo_by_id_btree, 110 | docinfo_by_seq_btree, 111 | local_docs_btree, 112 | update_seq, 113 | name, 114 | filepath, 115 | validate_doc_funs=[], 116 | user_ctx={[]} 117 | }). 118 | 119 | 120 | -record(view_query_args, { 121 | start_key = nil, 122 | end_key = {}, 123 | count = 10000000000, % a huge huge default number. Picked so we don't have 124 | % to do different logic for when there is no count 125 | % limit 126 | update = true, 127 | direction = fwd, 128 | start_docid = nil, 129 | end_docid = {}, 130 | skip = 0, 131 | group_level = 0, 132 | reduce = true, 133 | include_docs = false 134 | }). 135 | 136 | 137 | % small value used in revision trees to indicate the revision isn't stored 138 | -define(REV_MISSING, []). 139 | -------------------------------------------------------------------------------- /include/dmerkle.hrl: -------------------------------------------------------------------------------- 1 | -define(VERSION, 2). 2 | -define(STATIC_HEADER, 93). 3 | 4 | -define(d_from_blocksize(BlockSize), trunc((BlockSize - 17)/16)). 5 | -define(pointers_from_blocksize(BlockSize), (lib_misc:ceiling(math:log(BlockSize)/math:log(2)) - 3)). 6 | -define(pointer_for_size(Size, BlockSize), (if Size =< 16 -> 1; Size =< BlockSize -> ?pointers_from_blocksize(Size); true -> last end)). 7 | -define(size_for_pointer(N), (2 bsl (N+2))). 8 | -define(headersize_from_blocksize(BlockSize), (?STATIC_HEADER + ?pointers_from_blocksize(BlockSize) * 8)). 9 | -define(aligned(Ptr, HeaderSize, BlockSize), (((Ptr - (HeaderSize)) rem BlockSize) == 0)). 10 | -define(block(Ptr, HeaderSize, BlockSize), ((Ptr - (HeaderSize)) div BlockSize)). 11 | 12 | -record(node, {m=0, keys=[], children=[], offset=eof}). 13 | -record(leaf, {m=0, values=[], offset=eof}). 14 | -record(free, {offset,size=0,pointer=0}). -------------------------------------------------------------------------------- /include/profile.hrl: -------------------------------------------------------------------------------- 1 | -ifdef(PROF). 2 | -define(balance_prof, dynomite_prof:balance_prof()). 3 | -define(prof(Label), dynomite_prof:start_prof(Label)). 4 | -define(forp(Label), dynomite_prof:stop_prof(Label)). 5 | -else. 6 | -define(prof(Label), true). 7 | -define(forp(Label), true). 8 | -define(balance_prof, true). 9 | -endif. 10 | -------------------------------------------------------------------------------- /pylibs/dynomite/__init__.py: -------------------------------------------------------------------------------- 1 | from dynomite.client import Client 2 | 3 | -------------------------------------------------------------------------------- /pylibs/dynomite/constants.py: -------------------------------------------------------------------------------- 1 | # 2 | # Autogenerated by Thrift 3 | # 4 | # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | # 6 | 7 | from thrift.Thrift import * 8 | from ttypes import * 9 | 10 | -------------------------------------------------------------------------------- /pylibs/dynomite/thrift_client.py: -------------------------------------------------------------------------------- 1 | from dynomite import Dynomite 2 | from dynomite.ttypes import * 3 | 4 | from thrift import Thrift 5 | from thrift.transport import TSocket 6 | from thrift.transport import TTransport 7 | from thrift.protocol import TBinaryProtocol 8 | 9 | from threading import local, RLock 10 | 11 | class Client(object): 12 | """ 13 | Simplified thrift client abstracts some thrift mechanics. Presents same 14 | api as dynomite.Dynomite.Client, adding automatic setup of the transport 15 | and protocol, and threadsafety. 16 | 17 | """ 18 | def __init__(self, host, port): 19 | self._host = host 20 | self._port = port 21 | self._lock = RLock() 22 | self._con = local() 23 | 24 | def get(self, key): 25 | self.connect() 26 | return self._con.client.get(key) 27 | 28 | def put(self, key, value, context=''): 29 | self.connect() 30 | return self._con.client.put(key, context, value) 31 | 32 | def has(self, key): 33 | self.connect() 34 | return self._con.client.has(key) 35 | 36 | def remove(self, key): 37 | self.connect() 38 | return self._con.client.remove(key) 39 | 40 | def connect(self): 41 | if hasattr(self._con, 'client'): 42 | return 43 | self._lock.acquire() 44 | try: 45 | transport = TSocket.TSocket(self._host, self._port) 46 | transport = TTransport.TBufferedTransport(transport) 47 | protocol = TBinaryProtocol.TBinaryProtocol(transport) 48 | self._con.client = Dynomite.Client(protocol) 49 | self._con.transport = transport 50 | transport.open() 51 | finally: 52 | self._lock.release() 53 | 54 | def disconnect(self): 55 | if not hasattr(self._con, 'transport'): 56 | return 57 | self._con.transport.close() 58 | -------------------------------------------------------------------------------- /pylibs/setup.cfg: -------------------------------------------------------------------------------- 1 | [nosetests] 2 | with-doctest=1 3 | doctest-extension=.rst 4 | doctest-fixtures=_fixtures 5 | -------------------------------------------------------------------------------- /pylibs/setup.py: -------------------------------------------------------------------------------- 1 | try: 2 | from setuptools import setup 3 | except ImportError: 4 | from distutils import setup 5 | 6 | setup( 7 | name="Dynomite", 8 | version="0.1", 9 | packages=['dynomite'], 10 | tests_require=['nose>=0.11.0.dev', 'boto'] 11 | ) 12 | -------------------------------------------------------------------------------- /pylibs/test/ec2/ec2_load_test.py: -------------------------------------------------------------------------------- 1 | """ This functional test may be run under nosetests or by running this 2 | module directly. When running under nosetests, The following 3 | environment variables can be set to control test behavior and set ec2 4 | connection parameters, in lieu of the command line options available 5 | when run directly: 6 | 7 | AWS_USERID -- your amazon user id 8 | AWS_KEY -- your amazon key 9 | AWS_SECRET_KEY -- your amazon secret key 10 | AWS_SSH_KEY -- your amazon ssh public key 11 | 12 | EC2_AMI -- your AMI name. AMI must have erlang R12B-1 or 13 | better, ruby, rake, python 2.5.1 or better, and 14 | thrift installed 15 | EC2_INSTANCE_TYPE -- type of EC2 instances to start 16 | EC2_INSTANCES -- number of EC2 instances to start 17 | EC2_RUN_TIME -- length of time to run the load test 18 | EC2_CLIENTS_PER_HOST -- number of clients per instance 19 | EC2_GET_THRESHOLD -- If 99.9% of gets are not faster than this # of 20 | milliseconds, the test fails 21 | EC2_PUT_THRESHOLD -- If 99.9% of puts are not faster than this # of 22 | milliseconds, the test fails 23 | """ 24 | import os 25 | import sys 26 | import boto 27 | 28 | 29 | def load_test(conf=None): 30 | if conf is None: 31 | conf = configure() 32 | instances = start_instances(conf) 33 | try: 34 | start_load(conf, instances) 35 | wait(conf) 36 | stats = collect_stats(conf, instances) 37 | finally: 38 | stop_instances(conf, instances) 39 | evaluate_stats(conf, stats) 40 | 41 | 42 | def main(): 43 | conf = configure(sys.argv) 44 | load_test(conf) 45 | 46 | 47 | def configure(argv=None): 48 | if argv is None: 49 | argv = [] 50 | # FIXME 51 | env = os.environ 52 | return options 53 | 54 | 55 | if __name__ == '__main__': 56 | main() 57 | -------------------------------------------------------------------------------- /pylibs/test/functional/python_client.rst: -------------------------------------------------------------------------------- 1 | ---------------------- 2 | Dynomite Python Client 3 | ---------------------- 4 | 5 | FIXME: explain 6 | 7 | Basic usage 8 | =========== 9 | 10 | >>> from dynomite import Client 11 | >>> c = Client('localhost', 11222) 12 | >>> c.get('a') 13 | >>> c.put('a', 'a value') 14 | 1 15 | >>> c.get('a') # doctest: +ELLIPSIS 16 | ('...', ['a value']) 17 | >>> c.has('a') 18 | (True, 1) 19 | >>> c.remove('a') 20 | 1 21 | >>> c.has('a') 22 | (False, 1) 23 | >>> c.get('a') 24 | 25 | Thrift 26 | ====== 27 | 28 | >>> from dynomite.thrift_client import Client 29 | >>> c = Client('localhost', 9200) 30 | >>> c.get('b').results 31 | [] 32 | >>> c.put('b', 'b value') 33 | 1 34 | >>> c.get('b').results 35 | ['b value'] 36 | >>> c.remove('b') 37 | 1 38 | >>> c.get('b').results 39 | [] 40 | -------------------------------------------------------------------------------- /pylibs/test/functional/python_client_fixtures.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import sys 4 | import time 5 | import tempfile 6 | 7 | __test__ = {} 8 | 9 | 10 | ROOT = os.path.dirname( 11 | os.path.dirname( 12 | os.path.dirname( 13 | os.path.dirname(__file__)))) 14 | DYNOMITE = os.path.join(ROOT, 'bin', 'dynomite') 15 | TMP_DIR = None 16 | 17 | 18 | def setup_module(): 19 | cmd = "%s start -o dpct1 -p 11222 -t 9200 --data '%s' " \ 20 | "--storage dict_storage " \ 21 | "-n 1 -r 1 -w 1 --detached" % (DYNOMITE, tmp_dir()) 22 | os.system(cmd) 23 | time.sleep(2) 24 | 25 | 26 | def teardown_module(): 27 | os.system("%s stop -o dpct1" % DYNOMITE) 28 | if os.path.isdir(tmp_dir()): 29 | shutil.rmtree(tmp_dir()) 30 | 31 | 32 | def tmp_dir(): 33 | # Don't want to create dir at import time, only on demand 34 | global TMP_DIR 35 | if TMP_DIR: 36 | return TMP_DIR 37 | TMP_DIR = tempfile.mkdtemp() 38 | return TMP_DIR 39 | -------------------------------------------------------------------------------- /pylibs/tools/dbench_thrift.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from dynomite.thrift_client import Client 4 | from dynomite.ttypes import * 5 | 6 | from optparse import OptionParser 7 | from threading import Thread 8 | from Queue import Queue 9 | 10 | from time import time 11 | from random import choice 12 | 13 | ports = [9200] #, 9201, 9202, 9203, 9204] 14 | 15 | 16 | def main(): 17 | rq = Queue() 18 | results = {'requests': 0, 'get': [], 'put': []} 19 | options, junk = opts() 20 | workers = [] 21 | for i in range(0, int(options.clients)): 22 | t = Thread(target=run, args=(int(options.number), rq, 23 | int(options.keysize), 24 | int(options.valuesize))) 25 | workers.append(t) 26 | for w in workers: 27 | w.start() 28 | for w in workers: 29 | w.join() 30 | consolidate(rq.get(), results) 31 | print ".", 32 | 33 | total_time = 0.0 34 | for i in results['get']: 35 | total_time += i 36 | for i in results['put']: 37 | total_time += i 38 | 39 | print 40 | print "%s client(s) %s request(s) %f0.3s" % (options.clients, 41 | options.number, 42 | total_time) 43 | g = results['get'] 44 | g.sort() 45 | p = results['put'] 46 | p.sort() 47 | print "get avg: %f0.3ms mean: %f0.3ms 99.9: %f0.3ms" % ( 48 | (sum(g) / float(len(g))) * 1000, 49 | (g[len(g)/2]) * 1000, 50 | (g[int(len(g) * .999) -1]) * 1000) 51 | print "put avg: %f0.3ms mean: %f0.3ms 99.9: %f0.3ms" % ( 52 | (sum(p) / float(len(p))) * 1000, 53 | (p[len(p)/2]) * 1000, 54 | (p[int(len(p) * .999) -1]) * 1000) 55 | 56 | def run(num, rq, ks, vs): 57 | res = {'requests': 0, 58 | 'get': [], 59 | 'put': []} 60 | 61 | keys = "abcdefghijklmnop" 62 | 63 | client = Client('localhost', choice(ports)) 64 | 65 | for i in range(0, num): 66 | tk = 0.0 67 | key = ''.join([choice(keys) for i in range(0, ks)]) 68 | st = time() 69 | cur = client.get(key) 70 | tk += time() - st 71 | res['get'].append(tk) 72 | newval = rval(vs) 73 | st = time() 74 | client.put(key, newval, cur.context) 75 | tk += time() - st 76 | res['requests'] += 1 77 | res['put'].append(tk) 78 | rq.put(res) 79 | 80 | 81 | def consolidate(res, results): 82 | results['requests'] += res['requests'] 83 | results['get'].extend(res['get']) 84 | results['put'].extend(res['put']) 85 | 86 | 87 | def opts(): 88 | parser = OptionParser() 89 | parser.add_option('-n', '--number', dest='number', default='10', 90 | action='store', help='Number of requests per client') 91 | parser.add_option('-c', '--concurrency', '--clients', default='1', 92 | dest='clients', action='store', 93 | help='Number of concurrent clients') 94 | parser.add_option('-k', '--keysize', default='1', 95 | dest='keysize', action='store', 96 | help='Length of each key') 97 | parser.add_option('-v', '--valuesize', default='1024', 98 | dest='valuesize', action='store', 99 | help='Length of each value') 100 | 101 | return parser.parse_args() 102 | 103 | 104 | def rval(bsize=1024): 105 | b = [] 106 | for i in range(0, bsize): 107 | b.append(choice("abcdefghijklmnopqrstuvwxyz0123456789")) 108 | return ''.join(b) 109 | 110 | 111 | if __name__ == '__main__': 112 | main() 113 | -------------------------------------------------------------------------------- /pylibs/tools/dynomite-remote: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Autogenerated by Thrift 4 | # 5 | # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 6 | # 7 | 8 | import sys 9 | import pprint 10 | from urlparse import urlparse 11 | from thrift.transport import TTransport 12 | from thrift.transport import TSocket 13 | from thrift.transport import THttpClient 14 | from thrift.protocol import TBinaryProtocol 15 | 16 | import Dynomite 17 | from ttypes import * 18 | 19 | if len(sys.argv) <= 1 or sys.argv[1] == '--help': 20 | print '' 21 | print 'Usage: ' + sys.argv[0] + ' [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]' 22 | print '' 23 | print 'Functions:' 24 | print ' GetResult get(Data key)' 25 | print ' i32 put(Data key, ContextData context, Data data)' 26 | print ' i32 has(Data key)' 27 | print ' i32 remove(Data key)' 28 | print '' 29 | sys.exit(0) 30 | 31 | pp = pprint.PrettyPrinter(indent = 2) 32 | host = 'localhost' 33 | port = 9090 34 | uri = '' 35 | framed = False 36 | http = False 37 | argi = 1 38 | 39 | if sys.argv[argi] == '-h': 40 | parts = sys.argv[argi+1].split(':') 41 | host = parts[0] 42 | port = int(parts[1]) 43 | argi += 2 44 | 45 | if sys.argv[argi] == '-u': 46 | url = urlparse(sys.argv[argi+1]) 47 | parts = url[1].split(':') 48 | host = parts[0] 49 | if len(parts) > 1: 50 | port = int(parts[1]) 51 | else: 52 | port = 80 53 | uri = url[2] 54 | http = True 55 | argi += 2 56 | 57 | if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed': 58 | framed = True 59 | argi += 1 60 | 61 | cmd = sys.argv[argi] 62 | args = sys.argv[argi+1:] 63 | 64 | if http: 65 | transport = THttpClient.THttpClient(host, port, uri) 66 | else: 67 | socket = TSocket.TSocket(host, port) 68 | if framed: 69 | transport = TTransport.TFramedTransport(socket) 70 | else: 71 | transport = TTransport.TBufferedTransport(socket) 72 | protocol = TBinaryProtocol.TBinaryProtocol(transport) 73 | client = Dynomite.Client(protocol) 74 | transport.open() 75 | 76 | if cmd == 'get': 77 | if len(args) != 1: 78 | print 'get requires 1 args' 79 | sys.exit(1) 80 | pp.pprint(client.get(eval(args[0]),)) 81 | 82 | elif cmd == 'put': 83 | if len(args) != 3: 84 | print 'put requires 3 args' 85 | sys.exit(1) 86 | pp.pprint(client.put(eval(args[0]),eval(args[1]),eval(args[2]),)) 87 | 88 | elif cmd == 'has': 89 | if len(args) != 1: 90 | print 'has requires 1 args' 91 | sys.exit(1) 92 | pp.pprint(client.has(eval(args[0]),)) 93 | 94 | elif cmd == 'remove': 95 | if len(args) != 1: 96 | print 'remove requires 1 args' 97 | sys.exit(1) 98 | pp.pprint(client.remove(eval(args[0]),)) 99 | 100 | transport.close() 101 | -------------------------------------------------------------------------------- /releases/dynomite.rel: -------------------------------------------------------------------------------- 1 | {release, 2 | {"dynomite_rel", "dynomite_?VERSION"}, 3 | {erts, "5.6.3"}, 4 | [ 5 | {kernel, "2.13.1"}, 6 | {crypto, "1.6"}, 7 | {stdlib, "1.16.1"}, 8 | {sasl, "2.1.6"}, 9 | {mochiweb, "0.01"}, 10 | {thrift, "0.1"}, 11 | {dynomite, "?VERSION"} 12 | ] 13 | }. -------------------------------------------------------------------------------- /rlibs/analyze_bench.rb: -------------------------------------------------------------------------------- 1 | require 'rubygems' 2 | require 'ostruct' 3 | require 'optparse' 4 | 5 | class Array 6 | #assumes the array is sorted 7 | def percentile(perc) 8 | len = self.length 9 | index = (len * perc).round 10 | self[index] 11 | end 12 | end 13 | 14 | options = { 15 | :logdir => "bench_log" 16 | } 17 | 18 | OptionParser.new do |opts| 19 | opts.banner = "Usage: analyze_bench [options]" 20 | 21 | opts.on("-l", "--log [LOGDIR]", "Where the instances should log their raw performance data.") do |l| 22 | options[:logdir] = l 23 | end 24 | 25 | opts.on("-a", "--all [ALLDIR]", "Analyze all directories and spit out csv info") do |a| 26 | options[:all] = a 27 | end 28 | 29 | end.parse! 30 | 31 | 32 | def get_stats(dir) 33 | stats = [] 34 | error_count = 0 35 | 36 | Dir[dir + "/*.log"].each do |logfilename| 37 | File.open(logfilename, 'r') do |f| 38 | f.each_line do |line| 39 | fields = line.split("\t") 40 | if (fields[1] == "error") 41 | error_count += 1 42 | else 43 | stats << fields 44 | end 45 | end 46 | end 47 | end 48 | 49 | #the sum 50 | gets = [] 51 | get_sum = 0 52 | get_num = 0 53 | puts = [] 54 | put_sum = 0 55 | put_num = 0 56 | errors = 0 57 | stats.each do |time, op, latency, key, host| 58 | if op == "get" 59 | gets << latency.to_f 60 | get_sum += latency.to_f 61 | get_num +=1 62 | else 63 | puts << latency.to_f 64 | put_sum += latency.to_f 65 | put_num += 1 66 | end 67 | end 68 | stats.sort! {|a, b| a.first <=> b.first } 69 | 70 | get_avg = get_sum.to_f / get_num 71 | put_avg = put_sum.to_f / put_num 72 | 73 | gets.sort! 74 | puts.sort! 75 | get_median = gets.percentile(0.5) 76 | put_median = puts.percentile(0.5) 77 | get_999 = gets.percentile(0.999) 78 | put_999 = puts.percentile(0.999) 79 | OpenStruct.new( 80 | :dir => dir, 81 | :min_time => stats.first[0].to_f, 82 | :max_time => stats.last[0].to_f, 83 | :get_length => gets.length, 84 | :get_avg => get_avg, 85 | :get_median => get_median, 86 | :get_999 => get_999, 87 | :put_length => puts.length, 88 | :put_avg => put_avg, 89 | :put_median => put_median, 90 | :put_999 => put_999, 91 | :error_count => error_count 92 | ) 93 | end 94 | 95 | if options[:all] 96 | puts "dir,get_length,get_avg,get_median,get_999,put_length,put_avg,put_median,put_999,error_count" 97 | directories = Dir[options[:all] + "/*"].each do |dir| 98 | stats = get_stats(dir) 99 | STDOUT.write "#{stats.dir},#{stats.get_length},#{stats.get_avg},#{stats.get_median},#{stats.get_999}," 100 | puts "#{stats.put_length},#{stats.put_avg},#{stats.put_median},#{stats.put_999},#{stats.error_count}" 101 | end 102 | else 103 | stats = get_stats(options[:logdir]) 104 | puts "errors #{stats.error_count}" 105 | puts "get stats: #{stats.get_length} datapoints" 106 | puts "\tavg: #{stats.get_avg}" 107 | puts "\tmed: #{stats.get_median}" 108 | puts "\t99.9:#{stats.get_999}" 109 | puts "put stats: #{stats.put_length} datapoints" 110 | puts "\tavg: #{stats.put_avg}" 111 | puts "\tmed: #{stats.put_median}" 112 | puts "\t99.9:#{stats.put_999}" 113 | puts "#{(stats.put_length + stats.get_length) / (stats.max_time - stats.min_time)} reqs/s" 114 | end -------------------------------------------------------------------------------- /rlibs/cli/bench.rb: -------------------------------------------------------------------------------- 1 | require 'rubygems' 2 | require 'inline' 3 | require 'thrift' 4 | require 'thrift/transport/socket' 5 | require 'thrift/protocol/tbinaryprotocolaccelerated' 6 | $:.unshift "/p/share/dynomite_rpc/gen-rb" 7 | require '/p/share/dynomite_rpc/gen-rb/Dynomite' 8 | 9 | Kernel.inline do |builder| 10 | builder.c_raw <<-EOF 11 | static VALUE random_bytes(int argc, VALUE *argv, VALUE self) { 12 | int size = FIX2INT(argv[0]); 13 | VALUE buff = argv[1]; 14 | int i; 15 | for(i=0; iptr[i] = random_byte; 18 | } 19 | return buff; 20 | } 21 | EOF 22 | end 23 | 24 | def new_bytes(size) 25 | buff = "" 26 | size.times do 27 | buff << rand(256) 28 | end 29 | buff 30 | end 31 | 32 | 33 | options = {} 34 | 35 | OptionParser.new do |opts| 36 | opts.banner = "Usage: dynomite bench [options]" 37 | contents = File.read(File.dirname(__FILE__) + "/shared/common.rb") 38 | eval contents 39 | 40 | opts.separator "" 41 | opts.separator "Specific options:" 42 | opts.on("-m", "--data DIR", "data directory") do |dir| 43 | options[:data] = dir 44 | end 45 | end.parse! 46 | 47 | 48 | results = {:put => {}, :get => {}} 49 | 50 | %w(tc_storage fs_storage couch_storage dets_storage).each do |engine| 51 | results[:put][engine] = [] 52 | results[:get][engine] = [] 53 | 5.times do |i| 54 | FileUtils.rm_r(options[:data]) if File.exists?(options[:data]) 55 | size = 100*10**i 56 | puts "#{engine} at #{size} bytes" 57 | pid = fork do 58 | STDIN.reopen "/dev/null" 59 | STDOUT.reopen "/tmp/dyn.std.log" 60 | STDERR.reopen STDOUT 61 | exec "#{ROOT}/bin/dynomite start -m #{options[:data]} -o dynomite#{$$} -n 1 -w 1 -r 1 -q 0 -s #{engine} -l /tmp" 62 | end 63 | sleep(7) 64 | socket = Thrift::Socket.new('127.0.0.1', 9200) 65 | socket.open 66 | protocol = Thrift::BinaryProtocolAccelerated.new(Thrift::BufferedTransport.new(socket)) 67 | dyn = Dynomite::Client.new(protocol) 68 | buff = new_bytes(size) 69 | time = -Time.now.to_f 70 | ary = (1..1000).to_a.map do |i| 71 | begin 72 | key = "key#{rand(9000)}" 73 | # puts key 74 | dyn.put key, nil, random_bytes(size, buff) 75 | rescue => boom 76 | puts boom.message 77 | socket.open unless socket.open? 78 | end 79 | end 80 | time += Time.now.to_f 81 | results[:put][engine] << [size, time] 82 | time = -Time.now.to_f 83 | ary = (1..1000).to_a.map do |i| 84 | begin 85 | key = "key#{rand(9000)}" 86 | # puts key 87 | dyn.get key 88 | rescue => boom 89 | puts boom.message 90 | socket.open unless socket.open? 91 | end 92 | end 93 | time += Time.now.to_f 94 | results[:get][engine] << [size, time] 95 | Process.kill("KILL", pid) 96 | Process.waitpid(pid) 97 | end 98 | end 99 | 100 | [:get, :put].each do |type| 101 | results[type].each do |k, ary| 102 | mapped = ary.map do |bytes, time| 103 | [bytes, time] 104 | end 105 | puts "#{k}_#{type} =\t#{mapped.inspect}" 106 | end 107 | end 108 | # at_exit { 109 | # Process.kill("INT", File.read("/tmp/dynomite.pid").to_i) 110 | # } -------------------------------------------------------------------------------- /rlibs/cli/console.rb: -------------------------------------------------------------------------------- 1 | options = {} 2 | options[:port] = 11222 3 | options[:databases] = '' 4 | options[:config] = '' 5 | 6 | OptionParser.new do |opts| 7 | opts.banner = "Usage: dynomite console [options]" 8 | 9 | contents = File.read(File.dirname(__FILE__) + "/shared/common.rb") 10 | eval contents 11 | 12 | end.parse! 13 | 14 | cookie = Digest::MD5.hexdigest(options[:cluster] + "NomMxnLNUH8suehhFg2fkXQ4HVdL2ewXwM") 15 | 16 | str = "erl -sname remsh_#{$$} -remsh #{options[:name]}@#{`hostname -s`.chomp} -hidden -setcookie #{cookie}" 17 | puts str 18 | exec str -------------------------------------------------------------------------------- /rlibs/cli/leave.rb: -------------------------------------------------------------------------------- 1 | options = {} 2 | 3 | OptionParser.new do |opts| 4 | opts.banner = "Usage: dynomite stop [options]" 5 | 6 | contents = File.read(File.dirname(__FILE__) + "/shared/common.rb") 7 | eval contents 8 | end.parse! 9 | 10 | cookie = Digest::MD5.hexdigest(options[:cluster] + "NomMxnLNUH8suehhFg2fkXQ4HVdL2ewXwM") 11 | 12 | str = %Q(erl -sname remsh_#{$$} -remsh #{options[:name]}@#{`hostname -s`.chomp} -hidden -setcookie #{cookie}" -noshell -run membership leave) 13 | puts str 14 | exec str 15 | -------------------------------------------------------------------------------- /rlibs/cli/membership.rb: -------------------------------------------------------------------------------- 1 | options = {} 2 | options[:port] = 11222 3 | options[:databases] = '' 4 | options[:config] = '' 5 | 6 | OptionParser.new do |opts| 7 | opts.banner = "Usage: dynomite start [options]" 8 | 9 | contents = File.read(File.dirname(__FILE__) + "/shared/common.rb") 10 | eval contents 11 | 12 | opts.on('-m', "--module MODULE") do |mod| 13 | options[:module] = mod 14 | end 15 | 16 | opts.on('-f', "--function FUNCTION") do |func| 17 | options[:function] = func 18 | end 19 | 20 | opts.on('-a', "--arg ARG") do |arg| 21 | options[:args] ||= [] 22 | options[:args] << arg 23 | end 24 | 25 | end.parse! 26 | 27 | cookie = Digest::MD5.hexdigest(options[:cluster] + "NomMxnLNUH8suehhFg2fkXQ4HVdL2ewXwM") 28 | 29 | str = "erl \ 30 | +K true \ 31 | +A 128 \ 32 | -hidden \ 33 | -smp enable \ 34 | -pz #{ROOT}/ebin/ \ 35 | -sname command \ 36 | -noshell \ 37 | -setcookie #{cookie} \ 38 | -run commands start \ 39 | -extra #{options[:name]} #{options[:module]} #{options[:function]} #{(options[:args] || []).join(' ')}" 40 | puts str 41 | exec str -------------------------------------------------------------------------------- /rlibs/cli/production.rb: -------------------------------------------------------------------------------- 1 | options = {} 2 | options[:port] = "-dynomite port 11222" 3 | options[:databases] = '' 4 | options[:config] = '-dynomite config "config.json"' 5 | 6 | OptionParser.new do |opts| 7 | opts.banner = "Usage: dynomite start [options]" 8 | 9 | contents = File.read(File.dirname(__FILE__) + "/shared/common.rb") 10 | eval contents 11 | 12 | opts.separator "" 13 | opts.separator "Specific options:" 14 | 15 | opts.on("-c", "--config [CONFIGFILE]", "path to the config file") do |config| 16 | options[:config] = %Q(-dynomite config "#{config}") 17 | end 18 | 19 | opts.on("-l", "--log [LOGFILE]", "error log path") do |log| 20 | options[:log] = %Q[-kernel error_logger '{file,"#{File.join(log, 'dynomite.log')}"}' -sasl sasl_error_logger '{file,"#{File.join(log, 'sasl.log')}"}'] 21 | end 22 | 23 | opts.on('-j', "--join [NODENAME]", 'node to join with') do |node| 24 | options[:jointo] = %Q(-dynomite jointo "'#{node}'") 25 | end 26 | 27 | opts.on('-d', "--detached", "run detached from the shell") do |detached| 28 | options[:detached] = '-detached' 29 | end 30 | end.parse! 31 | 32 | cookie = Digest::MD5.hexdigest(options[:cluster] + "NomMxnLNUH8suehhFg2fkXQ4HVdL2ewXwM") 33 | 34 | str = "erl \ 35 | -boot start_sasl \ 36 | +K true \ 37 | +A 128 \ 38 | +P 60000 \ 39 | -smp enable \ 40 | -pz #{ROOT}/ebin/ \ 41 | -pz #{ROOT}/deps/mochiweb/ebin \ 42 | -pz #{ROOT}/deps/rfc4627/ebin \ 43 | -pz #{ROOT}/deps/thrift/ebin \ 44 | -sname #{options[:name]} \ 45 | #{options[:log]} \ 46 | #{options[:config]} \ 47 | #{options[:jointo]} \ 48 | -setcookie #{cookie} \ 49 | -run dynomite start \ 50 | #{options[:detached]} \ 51 | #{options[:profile]}" 52 | puts str 53 | exec str 54 | 55 | # -boot #{ROOT}/releases/0.5.0/dynomite_rel \ -------------------------------------------------------------------------------- /rlibs/cli/shared/common.rb: -------------------------------------------------------------------------------- 1 | #defaults 2 | options[:name] = 'dynomite' 3 | options[:cluster] = 'development' 4 | 5 | opts.separator "" 6 | opts.separator "common options:" 7 | 8 | opts.on("-o", "--node [NODE]", "The erlang nodename") do |name| 9 | options[:name] = name 10 | end 11 | 12 | opts.on("-n", "--cluster [CLUSTER]", "The cluster name (cookie token)") do |name| 13 | options[:cluster] = name 14 | end 15 | -------------------------------------------------------------------------------- /rlibs/cli/start.rb: -------------------------------------------------------------------------------- 1 | options = {} 2 | options[:port] = "-dynomite port 11222" 3 | options[:databases] = '' 4 | options[:config] = '-dynomite config "config.json"' 5 | options[:startup] = "-run dynomite start" 6 | 7 | OptionParser.new do |opts| 8 | opts.banner = "Usage: dynomite start [options]" 9 | 10 | contents = File.read(File.dirname(__FILE__) + "/shared/common.rb") 11 | eval contents 12 | 13 | opts.separator "" 14 | opts.separator "Specific options:" 15 | 16 | opts 17 | 18 | opts.on("-c", "--config [CONFIGFILE]", "path to the config file") do |config| 19 | options[:config] = %Q(-dynomite config "\\"#{config}\\"") 20 | end 21 | 22 | opts.on("-l", "--log [LOGFILE]", "error log path") do |log| 23 | options[:log] = %Q[-kernel error_logger '{file,"#{File.join(log, 'dynomite.log')}"}' -sasl sasl_error_logger '{file,"#{File.join(log, 'sasl.log')}"}'] 24 | end 25 | 26 | opts.on('-j', "--join [NODENAME]", 'node to join with') do |node| 27 | options[:jointo] = %Q(-dynomite jointo "'#{node}'") 28 | end 29 | 30 | opts.on('-d', "--detached", "run detached from the shell") do |detached| 31 | options[:detached] = '-detached' 32 | end 33 | 34 | opts.on('-p', "--pidfile PIDFILE", "write pidfile to PIDFILE") do |pidfile| 35 | options[:pidfile] = %Q(-dynomite pidfile "'#{pidfile}'") 36 | end 37 | end.parse! 38 | 39 | cookie = Digest::MD5.hexdigest(options[:cluster] + "NomMxnLNUH8suehhFg2fkXQ4HVdL2ewXwM") 40 | 41 | str = "erl \ 42 | -boot start_sasl \ 43 | +K true \ 44 | +A 30 \ 45 | +P 60000 \ 46 | -smp enable \ 47 | -pa #{ROOT}/ebin/ \ 48 | -pa #{ROOT}/deps/mochiweb/ebin \ 49 | -pa #{ROOT}/deps/rfc4627/ebin \ 50 | -pa #{ROOT}/deps/thrift/ebin \ 51 | -sname #{options[:name]} \ 52 | #{options[:log]} \ 53 | #{options[:config]} \ 54 | #{options[:jointo]} \ 55 | -setcookie #{cookie} \ 56 | #{options[:startup]} \ 57 | #{options[:detached]} \ 58 | #{options[:pidfile]} \ 59 | #{options[:profile]}" 60 | puts str 61 | exec str 62 | 63 | # -boot #{ROOT}/releases/0.5.0/dynomite_rel \ 64 | -------------------------------------------------------------------------------- /rlibs/cli/status.rb: -------------------------------------------------------------------------------- 1 | options = {} 2 | 3 | OptionParser.new do |opts| 4 | opts.banner = "Usage: dynomite status [options]" 5 | 6 | contents = File.read(File.dirname(__FILE__) + "/shared/common.rb") 7 | eval contents 8 | end.parse! 9 | 10 | cookie = Digest::MD5.hexdigest(options[:cluster] + "NomMxnLNUH8suehhFg2fkXQ4HVdL2ewXwM") 11 | 12 | str = %Q(erl -smp -sname console_#{$$} -hidden -setcookie #{cookie} -pa #{ROOT}/ebin/ -run commands start -run erlang halt -noshell -node #{options[:name]} -m membership -f status) 13 | puts str 14 | exec str 15 | -------------------------------------------------------------------------------- /rlibs/cli/stop.rb: -------------------------------------------------------------------------------- 1 | options = {} 2 | 3 | OptionParser.new do |opts| 4 | opts.banner = "Usage: dynomite stop [options]" 5 | 6 | contents = File.read(File.dirname(__FILE__) + "/shared/common.rb") 7 | eval contents 8 | end.parse! 9 | 10 | cookie = Digest::MD5.hexdigest(options[:cluster] + "NomMxnLNUH8suehhFg2fkXQ4HVdL2ewXwM") 11 | 12 | str = %Q(erl -smp -sname console_#{$$} -hidden -setcookie #{cookie} -pa #{ROOT}/ebin/ -run commands start -run erlang halt -noshell -node #{options[:name]}@#{`hostname -s`.chomp} -m init -f stop) 13 | puts str 14 | exec str 15 | -------------------------------------------------------------------------------- /rlibs/distributed_bench.rb: -------------------------------------------------------------------------------- 1 | require 'rubygems' 2 | require 'inline' 3 | require 'optparse' 4 | require 'thrift' 5 | require 'benchmark' 6 | require 'thrift/transport/socket' 7 | require 'thrift/protocol/tbinaryprotocolaccelerated' 8 | require File.dirname(__FILE__) + "/../gen-rb/Dynomite" 9 | 10 | options = { 11 | :concurrency => 20, 12 | :hosts => [], 13 | :size => 100, 14 | :ratio => 0.5, 15 | :keyspace => 100000, 16 | :logdir => File.dirname(__FILE__) + "/../../bench_log" 17 | } 18 | 19 | OptionParser.new do |opts| 20 | opts.banner = "Usage: distributed_bench [options]" 21 | 22 | opts.on('-h', '--host [HOST]', "Add another host to test against. Should add the whole cluster.") do |host| 23 | options[:hosts] << host 24 | end 25 | 26 | opts.on('-c', '--concurrency [INT]', "the concurrency level for the test. How many clients to start.") do |c| 27 | options[:concurrency] = c.to_i 28 | end 29 | 30 | opts.on('-r', '--ratio [R]', "the ratio of gets to puts. 0.0 means all puts, 1.0 means all gets.") do |r| 31 | options[:ratio] = r.to_f 32 | end 33 | 34 | opts.on("-l", "--log [LOGDIR]", "Where the instances should log their raw performance data.") do |l| 35 | options[:logdir] = l 36 | end 37 | 38 | opts.on("-s", "--size [SIZE]", "The size of the values to use, in bytes.") do |s| 39 | options[:size] = s.to_i 40 | end 41 | 42 | opts.on("-k", "--keyspace [KEYSPACE]", "The integer size of the keyspace.") do |k| 43 | options[:keyspace] = k.to_i 44 | end 45 | 46 | end.parse! 47 | 48 | def new_bytes(size) 49 | buff = "" 50 | size.times do 51 | buff << rand(256) 52 | end 53 | buff 54 | end 55 | 56 | FileUtils.mkdir_p options[:logdir] 57 | 58 | concurrency = options[:concurrency] 59 | hosts = options[:hosts] 60 | if concurrency < hosts.length 61 | hosts = hosts[0..concurrency] 62 | end 63 | 64 | kids_p_host = concurrency / hosts.length 65 | 66 | keyspace = options[:keyspace] 67 | keychars = (Math.log(keyspace) / Math.log(26)).ceil 68 | key = (0..keychars).to_a.map {|n| "a"}.join 69 | keys = Array.new(keyspace) 70 | keyspace.times do |n| 71 | keys << key.succ!.dup 72 | end 73 | keys.compact! 74 | 75 | worker = lambda do |host| 76 | log = File.open(options[:logdir] + "/#{$$}_#{host}_bench.log", "w") 77 | Signal.trap("INT") do 78 | log.close 79 | exit(0) 80 | end 81 | socket = Thrift::Socket.new(*host.split(":")) 82 | socket.open 83 | protocol = Thrift::BinaryProtocolAccelerated.new( 84 | Thrift::BufferedTransport.new(socket)) 85 | dyn = Dynomite::Client.new(protocol) 86 | 87 | bytes = new_bytes(options[:size]) 88 | 89 | while true 90 | begin 91 | index = rand(keys.length) 92 | key = keys[index] 93 | t = nil 94 | time = Benchmark.realtime { 95 | if (rand < options[:ratio]) 96 | t = "put" 97 | dyn.put key, nil, bytes.succ! 98 | else 99 | t = "get" 100 | dyn.get key 101 | end 102 | } 103 | log.puts "#{Time.now.to_f}\t#{t}\t#{time}\t#{key}\t#{host}" 104 | rescue => boom 105 | log.puts "#{Time.now.to_f}\terror\t#{time}\t#{key}\t#{host}" 106 | #reset socket 107 | socket = Thrift::Socket.new(*host.split(":")) 108 | socket.open 109 | protocol = Thrift::BinaryProtocolAccelerated.new( 110 | Thrift::BufferedTransport.new(socket)) 111 | dyn = Dynomite::Client.new(protocol) 112 | end 113 | end 114 | end 115 | 116 | pids = [] 117 | 118 | total_kids = 0 119 | hosts.each do |host| 120 | kids_p_host.times do 121 | total_kids += 1 122 | puts "worker for #{host}" 123 | pids << fork { worker.call(host) } 124 | end 125 | end 126 | i=0 127 | while total_kids < concurrency 128 | host = hosts[i] 129 | puts "worker for #{host}" 130 | pids << fork { worker.call(host) } 131 | total_kids += 1 132 | i += 1 133 | end 134 | 135 | Signal.trap("INT") do 136 | pids.each {|pid| Process.kill("INT", pid)} 137 | exit(0) 138 | end 139 | 140 | Process.waitall 141 | -------------------------------------------------------------------------------- /rlibs/dynomite.rb: -------------------------------------------------------------------------------- 1 | # ruby protocol handler for dynomite. 2 | 3 | require 'socket' 4 | require 'timeout' 5 | 6 | class DynomiteError < StandardError; end 7 | 8 | class Dynomite 9 | DEFAULTS = { 10 | :port => 11222, 11 | :host => 'localhost' 12 | } 13 | 14 | def initialize(options={}) 15 | options = DEFAULTS.merge(options) 16 | @addr = options 17 | connect 18 | end 19 | 20 | def get(key) 21 | timeout_retry(30, 3) { 22 | write("get #{key.length} #{key}\n") 23 | command = read_section 24 | case command 25 | when "fail" 26 | reason = read_line 27 | raise DynomiteError.new(reason) 28 | when "succ" 29 | items = read_section.to_i 30 | ctx_length = read_section.to_i 31 | ctx = read_binary(ctx_length) 32 | data_items = [] 33 | items.times do 34 | data_length = read_section.to_i 35 | data_items << read_binary(data_length) 36 | end 37 | [ctx, data_items] 38 | end 39 | } 40 | end 41 | 42 | def put(key, context, data) 43 | timeout_retry(30, 2) { 44 | ctx_length = context ? context.length : 0 45 | write("put #{key.length} #{key} #{ctx_length} #{context} #{data.length} ") 46 | write(data) 47 | write("\n") 48 | command = read_section 49 | case command 50 | when "fail" 51 | reason = read_line 52 | raise DynomiteError.new(reason) 53 | when "succ" 54 | return read_section.to_i 55 | end 56 | } 57 | end 58 | 59 | def has_key(key) 60 | timeout_retry(10, 3) { 61 | write("has #{key.length} #{key}\n") 62 | command = read_section 63 | case command 64 | when "fail" 65 | reason = read_line 66 | raise DynomiteError.new(reason) 67 | when "yes" 68 | n = read_section.to_i 69 | [true, n] 70 | when "no" 71 | n = read_section.to_i 72 | [false, n] 73 | end 74 | } 75 | end 76 | 77 | def delete(key) 78 | timeout_retry(30, 2) { 79 | write("del #{key.length} #{key}\n") 80 | command = read_section 81 | case command 82 | when "fail" 83 | reason = read_line 84 | raise DynomiteError.new(reason) 85 | when "succ" 86 | read_section.to_i 87 | end 88 | } 89 | end 90 | 91 | def close 92 | socket.close unless socket.closed? 93 | end 94 | 95 | private 96 | 97 | def timeout_retry(time, retries, &block) 98 | timeout(time, &block) 99 | rescue TimeoutError 100 | retries -= 1 101 | retry unless retries < 0 102 | end 103 | 104 | def socket 105 | connect if (!@socket or @socket.closed?) 106 | @socket 107 | end 108 | 109 | def connect 110 | @socket = TCPSocket.new(@addr[:host], @addr[:port]) 111 | @socket.sync = true 112 | @socket 113 | end 114 | 115 | def read(length) 116 | retries = 3 117 | socket.read(length) 118 | rescue => boom 119 | retries -= 1 120 | if retries > 0 121 | connect 122 | retry 123 | end 124 | end 125 | 126 | def write(data) 127 | retries = 3 128 | socket.write(data) 129 | rescue => boom 130 | retries -= 1 131 | if retries > 0 132 | connect 133 | retry 134 | end 135 | end 136 | 137 | def read_section 138 | buff = "" 139 | while ((char = read(1)) && char != ' ' && char != "\n") 140 | buff << char 141 | end 142 | buff 143 | end 144 | 145 | def read_line 146 | buff = "" 147 | while ((char = read(1)) && char != "\n") 148 | buff << char 149 | end 150 | buff 151 | end 152 | 153 | def read_binary(length) 154 | buff = read(length) 155 | #clear terminating char 156 | read(1) 157 | buff 158 | end 159 | 160 | end -------------------------------------------------------------------------------- /rlibs/stress_test.rb: -------------------------------------------------------------------------------- 1 | require 'rubygems' 2 | require 'inline' 3 | require File.dirname(__FILE__) + "/dynomite" 4 | 5 | Kernel.inline do |builder| 6 | builder.c_raw <<-EOF 7 | static VALUE random_bytes(int argc, VALUE *argv, VALUE self) { 8 | int size = FIX2INT(argv[0]); 9 | VALUE buff = argv[1]; 10 | int i; 11 | for(i=0; iptr[i] = random_byte; 14 | } 15 | return buff; 16 | } 17 | EOF 18 | end 19 | 20 | def new_bytes(size) 21 | buff = "" 22 | size.times do 23 | buff << rand(256) 24 | end 25 | buff 26 | end 27 | 28 | time = -Time.now.to_f 29 | 30 | dyn = Dynomite.new :port => ARGV.shift.to_i 31 | buff = new_bytes(300000) 32 | ary = (1..1000).to_a.map do |i| 33 | key = "key#{rand(9000)}" 34 | puts key 35 | dyn.put key, nil, random_bytes(300000, buff) 36 | end 37 | 38 | time += Time.now.to_f 39 | 40 | puts "time taken: #{time}" -------------------------------------------------------------------------------- /rlibs/thrift_bench.rb: -------------------------------------------------------------------------------- 1 | require 'rubygems' 2 | require 'inline' 3 | require 'thrift' 4 | require 'benchmark' 5 | require 'thrift/transport/socket' 6 | require 'thrift/protocol/tbinaryprotocolaccelerated' 7 | require File.dirname(__FILE__) + "/../gen-rb/Dynomite" 8 | 9 | Kernel.inline do |builder| 10 | builder.c_raw <<-EOF 11 | static VALUE random_bytes(int argc, VALUE *argv, VALUE self) { 12 | int size = FIX2INT(argv[0]); 13 | VALUE buff = argv[1]; 14 | int i; 15 | for(i=0; iptr[i] = random_byte; 18 | } 19 | return buff; 20 | } 21 | EOF 22 | end 23 | 24 | def new_bytes(size) 25 | buff = "" 26 | size.times do 27 | buff << rand(256) 28 | end 29 | buff 30 | end 31 | 32 | 33 | port = ARGV.shift.to_i 34 | puts port 35 | socket = Thrift::Socket.new('127.0.0.1', port) 36 | socket.open 37 | protocol = Thrift::BinaryProtocolAccelerated.new( 38 | Thrift::BufferedTransport.new(socket)) 39 | 40 | dyn = Dynomite::Client.new(protocol) 41 | 42 | buff = new_bytes(1000) 43 | ary = (1..1000).to_a.map do |i| 44 | key = "key#{rand(9000)}" 45 | puts key 46 | dyn.put key, nil, random_bytes(1000, buff) 47 | end 48 | 49 | time += Time.now.to_f 50 | 51 | puts "time taken: #{time}" -------------------------------------------------------------------------------- /rspecs/dynomite_spec.rb: -------------------------------------------------------------------------------- 1 | require File.dirname(__FILE__) + "/spec_helper" 2 | 3 | describe Dynomite do 4 | before(:each) do 5 | @dyn = Dynomite.new 6 | @write = StringIO.new("", "w") 7 | @read = StringIO.new("", "r") 8 | write = @write 9 | read = @read 10 | @dyn.meta_def :write do |data| 11 | write.write(data) 12 | end 13 | @dyn.meta_def :read do |length| 14 | read.read(length) 15 | end 16 | end 17 | 18 | it "should execute clean put operations" do 19 | @read.string = "succ 1\n" 20 | @dyn.put("mahkey", nil, "mahvalue").should == 1 21 | @write.string.should == "put 6 mahkey 0 8 mahvalue\n" 22 | end 23 | 24 | it "should execute context put operations" do 25 | @read.string = "succ 1\n" 26 | @dyn.put("mahkey", "mycontext", "mahvalue").should == 1 27 | @write.string.should == "put 6 mahkey 9 mycontext 8 mahvalue\n" 28 | end 29 | 30 | it "should execute get operations" do 31 | @read.string = "succ 2 9 mycontext 8 mahvalue 7 myvalue\n" 32 | @dyn.get("mahkey").should == ["mycontext", ["mahvalue", "myvalue"]] 33 | @write.string.should == "get 6 mahkey\n" 34 | end 35 | 36 | it "should execute true has key operations" do 37 | @read.string = "yes 3\n" 38 | @dyn.has_key("mahkey").should == [true, 3] 39 | @write.string.should == "has 6 mahkey\n" 40 | end 41 | 42 | it "should execute false has key operations" do 43 | @read.string = "no 3\n" 44 | @dyn.has_key("mahkey").should == [false,3] 45 | @write.string.should == "has 6 mahkey\n" 46 | end 47 | 48 | it "should execute delete operations" do 49 | @read.string = "succ 3\n" 50 | @dyn.delete("mahkey").should == 3 51 | @write.string.should == "del 6 mahkey\n" 52 | end 53 | 54 | it "should throw an error for bad put operations" do 55 | @read.string = "fail avast ye mateys\n" 56 | lambda { @dyn.put("mahkey", "mycontext", "mahvalue") }.should raise_error(DynomiteError, "avast ye mateys") 57 | end 58 | 59 | it "should throw an error for bad get operations" do 60 | @read.string = "fail monkey wrench\n" 61 | lambda { @dyn.get("mahkey") }.should raise_error(DynomiteError, "monkey wrench") 62 | end 63 | 64 | it "should throw an error for bad has_key operations" do 65 | @read.string = "fail argg me harty\n" 66 | lambda { @dyn.get("mahkey") }.should raise_error(DynomiteError, "argg me harty") 67 | end 68 | 69 | it "should throw an error for bad delete operations" do 70 | @read.string = "fail ohhh god no\n" 71 | lambda { @dyn.get("mahkey") }.should raise_error(DynomiteError, "ohhh god no") 72 | end 73 | end -------------------------------------------------------------------------------- /rspecs/spec_helper.rb: -------------------------------------------------------------------------------- 1 | require 'spec' 2 | require 'mocha' 3 | require 'stubba' 4 | require File.dirname(__FILE__) + "/../rlibs/dynomite" 5 | 6 | Spec::Runner.configure do |config| 7 | # config.before(:all) {} 8 | config.before(:each) { 9 | 10 | } 11 | # config.after(:all) {} 12 | config.after(:each) { 13 | 14 | } 15 | end 16 | 17 | 18 | def load_spec(filename) 19 | YAML.load_file(File.dirname(__FILE__) + "/#{filename}") 20 | end 21 | 22 | class Object 23 | class Bypass 24 | instance_methods.each do |m| 25 | undef_method m unless m =~ /^__/ 26 | end 27 | 28 | def initialize(ref) 29 | @ref = ref 30 | end 31 | 32 | def method_missing(sym, *args) 33 | @ref.__send__(sym, *args) 34 | end 35 | end 36 | 37 | class Assigns 38 | instance_methods.each do |m| 39 | undef_method m unless m =~ /^__/ 40 | end 41 | 42 | def initialize(ref) 43 | @ref = ref 44 | end 45 | 46 | def method_missing(sym, *args) 47 | if sym.to_s =~ /^(.+)=$/ 48 | @ref.instance_variable_set("@#{$1}", args.length == 1 ? args.first : args) 49 | else 50 | @ref.instance_variable_get("@#{sym}") 51 | end 52 | end 53 | end 54 | 55 | def bypass 56 | Bypass.new(self) 57 | end 58 | 59 | def assigns 60 | Assigns.new(self) 61 | end 62 | end 63 | 64 | class Object 65 | # The hidden singleton lurks behind everyone 66 | def metaclass; class << self; self; end; end 67 | def meta_eval &blk; metaclass.instance_eval &blk; end 68 | 69 | # Adds methods to a metaclass 70 | def meta_def name, &blk 71 | meta_eval { define_method name, &blk } 72 | end 73 | 74 | # Defines an instance method within a class 75 | def class_def name, &blk 76 | class_eval { define_method name, &blk } 77 | end 78 | end 79 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("gen-py") 3 | from dynomite import Dynomite 4 | from dynomite.ttypes import * 5 | 6 | from thrift import Thrift 7 | from thrift.transport import TSocket 8 | from thrift.transport import TTransport 9 | from thrift.protocol import TBinaryProtocol 10 | 11 | # Make socket 12 | transport = TSocket.TSocket('localhost', 9200) 13 | 14 | # Buffering is critical. Raw sockets are very slow 15 | transport = TTransport.TBufferedTransport(transport) 16 | 17 | # Wrap in a protocol 18 | protocol = TBinaryProtocol.TBinaryProtocol(transport) 19 | 20 | client = Dynomite.Client(protocol) 21 | transport.open() 22 | 23 | print client.put("b", None, "b value") 24 | rb = client.get("b") 25 | client.put("b", rb.context, "electric b-value monkey fish!") 26 | 27 | r = client.get("a") 28 | print r 29 | 30 | print client.put("a", r.context, "a new value") 31 | print client.get("a") 32 | 33 | print client.put("a", r.context, "a newer new value") 34 | 35 | print client.get("a") 36 | print client.get("b") 37 | 38 | 39 | transport.close() 40 | -------------------------------------------------------------------------------- /web/images/dynomite_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/moonpolysoft/dynomite/a5618dcbe17b16cefdc9c567f27a1f4445aee005/web/images/dynomite_logo.png -------------------------------------------------------------------------------- /web/javascripts/color.js: -------------------------------------------------------------------------------- 1 | function hsv2rgb(h, s, v) { 2 | var hi = Math.floor(h/60) % 6; 3 | var f = h / 60 - Math.floor(h/60); 4 | var p = Math.round(255 * v * (1-s)); 5 | var q = Math.round(255 * v * (1-f*s)); 6 | var t = Math.round(255 * v * (1-(1-f)*s)); 7 | v = Math.round(v * 255); 8 | switch (hi) { 9 | case 0: return [v,t,p]; 10 | case 1: return [q,v,p]; 11 | case 2: return [p,v,t]; 12 | case 3: return [p,q,v]; 13 | case 4: return [t,p,v]; 14 | case 5: return [v,p,q]; 15 | } 16 | } 17 | 18 | function hsv2rgba(h, s, v) { 19 | var color = hsv2rgb(h, s, v); 20 | return "rgba(" + color[0] + "," + color[1] + "," + color[2] + ",1)"; 21 | } -------------------------------------------------------------------------------- /web/javascripts/domec.js: -------------------------------------------------------------------------------- 1 | /** 2 | * jQuery DOMEC (DOM Elements Creator) 0.3 3 | * 4 | * Copyright (c) 2008 Lukasz Rajchel (lukasz@rajchel.pl | http://lukasz.rajchel.pl) 5 | * Dual licensed under the MIT (http://www.opensource.org/licenses/mit-license.php) 6 | * and GPL (http://www.opensource.org/licenses/gpl-license.php) licenses. 7 | * 8 | * Syntax: 9 | * $.create(element[, attributes[, children]]) 10 | * 11 | * Parameters: 12 | * - string element - name of the element to create 13 | * - object|array attributes - element properties to be set 14 | * - string|array children - child elements (could also contain text value) 15 | * 16 | * Changelog: 17 | * 0.3.1 (2008.04.10) 18 | * - code optimization 19 | * 20 | * 0.3 (2008.04.04) 21 | * - plugin function renamed from new to create (works now in IE) 22 | * 23 | * 0.2.1 (2008.04.01) 24 | * - namespace added 25 | * - fixed dates in changelog 26 | * - comments added 27 | * 28 | * 0.2 (2008.03.19) 29 | * - attributes and children parameters added 30 | * - changelog added 31 | * 32 | * 0.1 (2008.03.18) 33 | * - initial release 34 | */ 35 | 36 | (function($) { 37 | 38 | // register jQuery extension 39 | $.extend({ 40 | create: function(element, attributes, children) { 41 | 42 | // create new element 43 | var elem = $(document.createElement(element)); 44 | 45 | // add passed attributes 46 | if (typeof(attributes) == 'object') { 47 | for (key in attributes) { 48 | elem.attr(key, attributes[key]); 49 | } 50 | } 51 | 52 | // add passed child elements 53 | if (typeof(children) == 'string') { 54 | elem.text(children); 55 | } else if (typeof(children) == 'object') { 56 | for (i = 0; i < children.length; i++) { 57 | elem.append(children[i]); 58 | } 59 | } 60 | return elem; 61 | } 62 | }); 63 | 64 | })(jQuery); -------------------------------------------------------------------------------- /web/javascripts/drawarrows.js: -------------------------------------------------------------------------------- 1 | var DrawArrows = { 2 | arrow: [ 3 | [ 2, 0 ], 4 | [ -10, -4 ], 5 | [ -10, 4] 6 | ], 7 | 8 | rotateShape: function(shape,ang) { 9 | var rv = []; 10 | for(p in shape) 11 | rv.push(DrawArrows.rotatePoint(ang,shape[p][0],shape[p][1])); 12 | return rv; 13 | }, 14 | 15 | rotatePoint: function(ang,x,y) { 16 | return [ 17 | (x * Math.cos(ang)) - (y * Math.sin(ang)), 18 | (x * Math.sin(ang)) + (y * Math.cos(ang)) 19 | ]; 20 | }, 21 | 22 | translateShape: function(shape,x,y) { 23 | var rv = []; 24 | for(p in shape) 25 | rv.push([ shape[p][0] + x, shape[p][1] + y ]); 26 | return rv; 27 | }, 28 | 29 | enable: function(ctx) { 30 | 31 | ctx.drawFilledPolygon = function(shape) { 32 | this.beginPath(); 33 | this.moveTo(shape[0][0],shape[0][1]); 34 | 35 | for(p in shape) 36 | if (p > 0) this.lineTo(shape[p][0],shape[p][1]); 37 | 38 | this.lineTo(shape[0][0],shape[0][1]); 39 | this.fill(); 40 | }; 41 | 42 | ctx.drawLineArrow = function(x1,y1,x2,y2) { 43 | this.beginPath(); 44 | this.moveTo(x1,y1); 45 | this.lineTo(x2,y2); 46 | this.stroke(); 47 | var ang = Math.atan2(y2-y1,x2-x1); 48 | this.drawFilledPolygon(DrawArrows.translateShape(DrawArrows.rotateShape(DrawArrows.arrow,ang),x2,y2)); 49 | }; 50 | } 51 | 52 | 53 | }; 54 | -------------------------------------------------------------------------------- /web/javascripts/jquery.timer.js: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * jQuery Timer plugin v0.1 4 | * Matt Schmidt [http://www.mattptr.net] 5 | * 6 | * Licensed under the BSD License: 7 | * http://mattptr.net/license/license.txt 8 | * 9 | */ 10 | 11 | jQuery.timer = function (interval, callback) 12 | { 13 | /** 14 | * 15 | * timer() provides a cleaner way to handle intervals 16 | * 17 | * @usage 18 | * $.timer(interval, callback); 19 | * 20 | * 21 | * @example 22 | * $.timer(1000, function (timer) { 23 | * alert("hello"); 24 | * timer.stop(); 25 | * }); 26 | * @desc Show an alert box after 1 second and stop 27 | * 28 | * @example 29 | * var second = false; 30 | * $.timer(1000, function (timer) { 31 | * if (!second) { 32 | * alert('First time!'); 33 | * second = true; 34 | * timer.reset(3000); 35 | * } 36 | * else { 37 | * alert('Second time'); 38 | * timer.stop(); 39 | * } 40 | * }); 41 | * @desc Show an alert box after 1 second and show another after 3 seconds 42 | * 43 | * 44 | */ 45 | 46 | var interval = interval || 100; 47 | 48 | if (!callback) 49 | return false; 50 | 51 | _timer = function (interval, callback) { 52 | this.stop = function () { 53 | clearInterval(self.id); 54 | }; 55 | 56 | this.internalCallback = function () { 57 | callback(self); 58 | }; 59 | 60 | this.reset = function (val) { 61 | if (self.id) 62 | clearInterval(self.id); 63 | 64 | var val = val || 100; 65 | this.id = setInterval(this.internalCallback, val); 66 | }; 67 | 68 | this.interval = interval; 69 | this.id = setInterval(this.internalCallback, this.interval); 70 | 71 | var self = this; 72 | }; 73 | 74 | return new _timer(interval, callback); 75 | }; -------------------------------------------------------------------------------- /web/load.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 30 | 31 | 32 |
33 |

Dynomite

34 |

Load for

35 |
36 | 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /web/partitions.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 24 | 25 | 26 |
27 |

Dynomite

28 |

Partitioning for

29 |
30 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /web/stylesheets/master.css: -------------------------------------------------------------------------------- 1 | body { 2 | font: 11pt "Lucida Grande", Lucida, Verdana, sans-serif; 3 | margin: 0 0 0 0; 4 | padding: 0 0 0 0; 5 | cursor: default; 6 | background: #babdb6; 7 | } 8 | 9 | a { 10 | color: #193E4A; 11 | text-decoration: none; 12 | } 13 | 14 | div.header { 15 | padding: 0 0 0 0; 16 | margin: -22 0 0 0; 17 | background: #2e3436; 18 | } 19 | 20 | h1 { 21 | font: 25pt Helvetica, sans-serif; 22 | margin-bottom: 0px; 23 | color: #babdb6; 24 | 25 | } 26 | 27 | h2 { 28 | font: 11pt Helvetica, sans-serif; 29 | color: #888a85; 30 | padding: 0 0 0.5em 0; 31 | margin: 0 0 0 0; 32 | } 33 | 34 | h3 { 35 | font: 11pt Helvetica, sans-serif; 36 | padding: 0 0 0 0; 37 | margin: 1em 0 0 0; 38 | } 39 | 40 | #menu { 41 | padding: 0 0 0 0; 42 | margin: 0 0 0 0; 43 | } 44 | /*top right bottom left*/ 45 | #menu li { 46 | display: inline; 47 | margin: 0 0 0 -5px; 48 | padding: 0 5px 2px 5px; 49 | border-bottom: solid 1px #2E3036; 50 | border-right: solid 1px #2E3036; 51 | } -------------------------------------------------------------------------------- /web/sync.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 44 | 45 | 46 |
47 |

Dynomite

48 |

Replication for

49 |
50 | 56 | 57 | 58 | 59 | 60 | --------------------------------------------------------------------------------