├── .github └── workflows │ └── erlang.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.org ├── docs ├── MAPFOLD.md ├── NextGenAAE.md ├── NextGenREPL-GettingStarted.md ├── NextGenREPL.md ├── Node-Diversity.md ├── ReapErase.md ├── Strong-Consistency.md ├── Sync-On-Write.md ├── hll │ ├── hll.bib │ ├── hll.pdf │ ├── hll.tex │ └── images │ │ ├── bucket-run.png │ │ ├── register-store.png │ │ └── sto-avg.png ├── put-latency-mean.png ├── put-latency-tail.png ├── soft-limit-vnode.md ├── throughput.png └── update_line_numbers.erl ├── eqc ├── backend_eqc.erl ├── crdt_statem_eqc.erl ├── ec_eqc.erl ├── eraser_eqc.erl ├── fsm_eqc_util.erl ├── fsm_eqc_vnode.erl ├── get_fsm_eqc.erl ├── kv679_eqc.erl ├── kv679_fb_byz.eqc ├── kv679_fb_byz2.eqc ├── kv679_fb_byz3.eqc ├── kv_crdt_eqc.erl ├── kv_vnode_status_mgr_eqc.erl ├── put_fsm_eqc.erl ├── reaper_eqc.erl ├── replrtq_eqc.erl ├── replrtq_snk_eqc.erl ├── replrtq_snk_monitor.erl ├── riak_object_dvv_statem.erl ├── riak_object_eqc.erl └── sms_eqc.erl ├── include ├── .empty_for_hg ├── riak_kv_dtrace.hrl ├── riak_kv_index.hrl ├── riak_kv_map_phase.hrl ├── riak_kv_mrc_sink.hrl ├── riak_kv_types.hrl ├── riak_kv_vnode.hrl └── riak_object.hrl ├── priv ├── .empty_for_hg ├── mapred_builtins.js ├── multi_backend.schema ├── riak_kv.schema └── tracers │ ├── tracer_accumulating_time.erl │ ├── tracer_backend_latency.erl │ ├── tracer_eleveldb_put_size.erl │ ├── tracer_fsm_init.erl │ ├── tracer_func_args.erl │ ├── tracer_gc_latency.erl │ ├── tracer_large4.erl │ ├── tracer_latency_histogram.erl │ ├── tracer_merge_and_and_handoff.erl │ ├── tracer_read_bin_trace_file.erl │ └── tracer_timeit.erl ├── rebar.config ├── rebar3 ├── src ├── json_pp.erl ├── raw_link_walker.erl ├── riak.erl ├── riak_client.erl ├── riak_core.proto ├── riak_index.erl ├── riak_kv.app.src ├── riak_kv_2i_aae.erl ├── riak_kv_app.erl ├── riak_kv_backend.erl ├── riak_kv_bitcask_backend.erl ├── riak_kv_bucket.erl ├── riak_kv_buckets_fsm.erl ├── riak_kv_buckets_fsm_sup.erl ├── riak_kv_cinfo.erl ├── riak_kv_clusteraae_fsm.erl ├── riak_kv_clusteraae_fsm_sup.erl ├── riak_kv_console.erl ├── riak_kv_counter.erl ├── riak_kv_coverage_filter.erl ├── riak_kv_crdt.erl ├── riak_kv_crdt_json.erl ├── riak_kv_delete.erl ├── riak_kv_delete_sup.erl ├── riak_kv_eleveldb_backend.erl ├── riak_kv_ensemble_backend.erl ├── riak_kv_ensemble_console.erl ├── riak_kv_ensembles.erl ├── riak_kv_entropy_info.erl ├── riak_kv_entropy_manager.erl ├── riak_kv_env.erl ├── riak_kv_eraser.erl ├── riak_kv_exchange_fsm.erl ├── riak_kv_exometer_sidejob.erl ├── riak_kv_fold_buffer.erl ├── riak_kv_fsm_timing.erl ├── riak_kv_gcounter.erl ├── riak_kv_get_core.erl ├── riak_kv_get_fsm.erl ├── riak_kv_hll.erl ├── riak_kv_hooks.erl ├── riak_kv_hotbackup_fsm.erl ├── riak_kv_hotbackup_fsm_sup.erl ├── riak_kv_http_cache.erl ├── riak_kv_index_fsm.erl ├── riak_kv_index_fsm_sup.erl ├── riak_kv_index_hashtree.erl ├── riak_kv_keys_fsm.erl ├── riak_kv_keys_fsm_sup.erl ├── riak_kv_legacy_vnode.erl ├── riak_kv_leveled_backend.erl ├── riak_kv_mapred_filters.erl ├── riak_kv_mapred_json.erl ├── riak_kv_mapred_term.erl ├── riak_kv_mapreduce.erl ├── riak_kv_memory_backend.erl ├── riak_kv_mrc_map.erl ├── riak_kv_mrc_pipe.erl ├── riak_kv_mrc_sink.erl ├── riak_kv_mrc_sink_sup.erl ├── riak_kv_multi_backend.erl ├── riak_kv_multi_prefix_backend.erl ├── riak_kv_overflow_queue.erl ├── riak_kv_pb_aaefold.erl ├── riak_kv_pb_bucket.erl ├── riak_kv_pb_bucket_key_apl.erl ├── riak_kv_pb_counter.erl ├── riak_kv_pb_crdt.erl ├── riak_kv_pb_csbucket.erl ├── riak_kv_pb_index.erl ├── riak_kv_pb_mapred.erl ├── riak_kv_pb_object.erl ├── riak_kv_pipe_get.erl ├── riak_kv_pipe_index.erl ├── riak_kv_pipe_listkeys.erl ├── riak_kv_pncounter.erl ├── riak_kv_put_core.erl ├── riak_kv_put_fsm.erl ├── riak_kv_queue_manager.erl ├── riak_kv_reader.erl ├── riak_kv_reaper.erl ├── riak_kv_reformat.erl ├── riak_kv_replrtq_peer.erl ├── riak_kv_replrtq_snk.erl ├── riak_kv_replrtq_src.erl ├── riak_kv_requests.erl ├── riak_kv_stat.erl ├── riak_kv_stat_bc.erl ├── riak_kv_stat_worker.erl ├── riak_kv_status.erl ├── riak_kv_sup.erl ├── riak_kv_test_util.erl ├── riak_kv_tictacaae_repairs.erl ├── riak_kv_ttaaefs_manager.erl ├── riak_kv_update_hook.erl ├── riak_kv_util.erl ├── riak_kv_vnode.erl ├── riak_kv_vnode_status_mgr.erl ├── riak_kv_w1c_sup.erl ├── riak_kv_w1c_worker.erl ├── riak_kv_w_reduce.erl ├── riak_kv_web.erl ├── riak_kv_wm_aaefold.erl ├── riak_kv_wm_bucket_type.erl ├── riak_kv_wm_buckets.erl ├── riak_kv_wm_counter.erl ├── riak_kv_wm_crdt.erl ├── riak_kv_wm_index.erl ├── riak_kv_wm_keylist.erl ├── riak_kv_wm_link_walker.erl ├── riak_kv_wm_mapred.erl ├── riak_kv_wm_object.erl ├── riak_kv_wm_ping.erl ├── riak_kv_wm_props.erl ├── riak_kv_wm_queue.erl ├── riak_kv_wm_raw.hrl ├── riak_kv_wm_stats.erl ├── riak_kv_wm_utils.erl ├── riak_kv_worker.erl ├── riak_kv_yessir_backend.erl ├── riak_object.erl ├── riak_object_json.erl ├── sms.erl └── stacktrace.hrl └── test ├── backend_test_util.erl ├── bad_bitcask_multi.schema ├── put_fsm_precommit.js ├── rest_url_encoding_test.erl ├── riak_kv_entropy_manager_test.erl └── riak_kv_schema_tests.erl /.github/workflows/erlang.yml: -------------------------------------------------------------------------------- 1 | name: Erlang CI 2 | 3 | on: 4 | push: 5 | branches: [ develop-3.0 ] 6 | pull_request: 7 | branches: [ develop-3.0 ] 8 | 9 | 10 | jobs: 11 | 12 | build: 13 | 14 | runs-on: ubuntu-latest 15 | 16 | container: 17 | image: erlang:22.3.3 18 | 19 | steps: 20 | - uses: lukka/get-cmake@latest 21 | - uses: actions/checkout@v2 22 | - name: Compile 23 | run: ./rebar3 compile 24 | - name: Run xref and dialyzer 25 | run: ./rebar3 do xref, dialyzer 26 | - name: Run eunit 27 | run: ./rebar3 as gha do eunit 28 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .rebar/* 2 | .eunit/* 3 | deps/* 4 | priv/* 5 | *.o 6 | include/*_pb.hrl 7 | *.beam 8 | doc 9 | test.*-temp-data 10 | ebin 11 | /.eqc-info 12 | /current_counterexample.eqc 13 | .local_dialyzer_plt 14 | dialyzer_unhandled_warnings 15 | dialyzer_warnings 16 | tags 17 | erln8.config 18 | .idea 19 | riak_kv.iml 20 | _build/ 21 | .rebar3/ 22 | rebar.lock 23 | .DS_Store 24 | src/riak_core_pb.erl 25 | *@* 26 | undefined 27 | log/crash.log 28 | .eqc/*counterexample.eqc 29 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: compile rel cover test dialyzer eqc 2 | REBAR=./rebar3 3 | 4 | compile: 5 | $(REBAR) compile 6 | 7 | clean: 8 | $(REBAR) clean 9 | 10 | cover: test 11 | $(REBAR) cover 12 | 13 | test: compile 14 | $(REBAR) as test do eunit 15 | 16 | eqc: 17 | $(REBAR) eqc 18 | 19 | 20 | dialyzer: 21 | $(REBAR) dialyzer 22 | 23 | xref: 24 | $(REBAR) xref 25 | 26 | check: test dialyzer xref 27 | -------------------------------------------------------------------------------- /README.org: -------------------------------------------------------------------------------- 1 | * riak_kv 2 | ** Overview 3 | 4 | [[https://github.com/basho/riak_kv/actions][Build Status]] - [[https://github.com/basho/riak_kv/actions/workflows/erlang.yml/badge.svg?branch=develop-3.0]] 5 | 6 | Riak KV is an open source Erlang application that is distributed using the [[https://github.com/basho/riak_core][riak_core]] Erlang 7 | library. Riak KV provides a key/value datastore and features MapReduce, lightweight data relations, and several different client APIs. 8 | 9 | ** Quick Start 10 | You must have [[http://erlang.org/download.html][Erlang/OTP 20 or 22]] or later and a GNU-style build 11 | system to compile and run =riak_kv=. The easiest way to utilize riak_kv is by installing the full 12 | Riak application available on [[https://github.com/basho/riak][Github]]. 13 | 14 | ** Contributing 15 | We encourage contributions to =riak_kv= from the community. 16 | 17 | 1) Fork the =riak_kv= repository on [[https://github.com/basho/riak_kv][Github]]. 18 | 2) Clone your fork or add the remote if you already have a clone of 19 | the repository. 20 | #+BEGIN_SRC shell 21 | git clone git@github.com:yourusername/riak_kv.git 22 | # or 23 | git remote add mine git@github.com:yourusername/riak_kv.git 24 | #+END_SRC 25 | 3) Create a topic branch for your change. 26 | #+BEGIN_SRC shell 27 | git checkout -b some-topic-branch 28 | #+END_SRC 29 | 4) Make your change and commit. Use a clear and descriptive commit 30 | message, spanning multiple lines if detailed explanation is 31 | needed. 32 | 5) Push to your fork of the repository and then send a pull-request 33 | through Github. 34 | #+BEGIN_SRC shell 35 | git push mine some-topic-branch 36 | #+END_SRC 37 | 6) A Basho engineer or community maintainer will review your patch 38 | and merge it into the main repository or send you feedback. 39 | 40 | ** Testing 41 | 42 | #+BEGIN_SRC shell 43 | # standard tests 44 | ./rebar3 do xref, dialyzer, eunit 45 | # property-based tests 46 | ./rebar3 as test eqc --testing_budget 600 47 | #+END_SRC 48 | 49 | For a more complete set of tests, update riak_kv in the full Riak application and run any appropriate [[https://github.com/basho/riak_test/tree/develop-3.0/groups][Riak riak_test groups]] 50 | -------------------------------------------------------------------------------- /docs/ReapErase.md: -------------------------------------------------------------------------------- 1 | # Reap and Erase 2 | 3 | ## Background 4 | 5 | The Reaper and Eraser are now processes supported in Riak as at release 2.9.1. They are intended to be a starting point of improving a number of situations: 6 | 7 | * Riak supports [multiple delete modes](https://docs.riak.com/riak/kv/2.2.3/using/reference/object-deletion/index.html#configuring-object-deletion), and the recommended mode for safety is `keep`. However, a delete_mode of `keep` creates an unresolved problem of permanently uncollected garbage - the tombstones are never erased. Note, even with an `interval` delete mode, tombstones may fail to be cleared and then continue to exist forever. 8 | 9 | * Riak supports time-to-live for objects configured within the backend, also known as [Global Object Expiration](https://riak.com/products/riak-kv/global-object-expiration/index.html?p=12768.html). However, there are two flaws with this form of automated expiration: 10 | 11 | * It considers only when the object is added to the backend, not when the object is added to the database (i.e. the object's last modified time). An object may extend and survive expiry through handoffs. 12 | 13 | * Object expiry is not coordinated with anti-entropy changes, and so as objects expire they may be resurrected by anti-entropy, which in turn will reset their expiry data to further in the future. It can also lead to huge cascades in false repair action when AAE trees are rebuilt - as one vnode's AAE trees suddenly rebuild without all the expired objects. This has led to some customers attempting to coordinate AAE tree rebuilds to make the occur concurrently, which can have a significant performance impact during the rebuild period. 14 | 15 | To begin to address these problems, the `riak_kv_reaper` and `riak_kv_eraser` have been introduced in Riak KV 2.9.1. 16 | 17 | ## Riak KV Reaper 18 | 19 | The `riak_kv_reaper` is a process that receives requests to reap tombstones, queues those requests, and continuously reaps tombstones from that queue when processing time is available. 20 | 21 | The reaper queue can be fed via a tictac aae fold from the riak client - `aae_reap_tombs`: 22 | 23 | ``` 24 | -spec aae_reap_tombs(pid(), 25 | riakc_obj:bucket(), key_range(), 26 | segment_filter(), 27 | modified_range(), 28 | change_method()) -> 29 | {ok, non_neg_integer()} | {error, any()}. 30 | ``` 31 | 32 | Reaping tombstones can be done only against a single specific bucket at a time, and can be further restricted to a key range within the bucket. A segment filter can be added to only reap tombstones within a given part of the AAE tree; the segment filter may be useful when trying to break up reaping activity to do only a proportion of required reaps at a time. A modified range should be passed so that the reap can be restricted only to tombstones which have existed beyond a certain point - for example to only reap tombstones more than one month old. 33 | 34 | The reap fold will then discover keys to be reaped, and queue them for reaping (or count them if `change_method` is set to count). To run reaps in parallel across nodes in the cluster use `local` as the `change_method`. To have a single queue of reaps for a single process dedicated to this fold then `{job, JobID}` can be passed as the `change_method`. 35 | 36 | The actual reap will remove both the tombstone from the backend as well as removing the reference from the Active Anti-Entropy system. Before attempting a reap a check is made to ensure all primary vnodes in the preflist are online - and if not the reap will be deferred by switching it to the back of the queue. If a reap were to proceed without a primary being available, then it is likely to be eventually resurrected through anti-entropy. 37 | 38 | The reaping itself will only act if: 39 | 40 | * the object to be reaped is confirmed as a tombstone, and; 41 | 42 | * the object to be reaped has the same vector clock as when the reap requirement was discovered (the comparison is based on a hash of the sorted vector clock). 43 | 44 | Note that when using `riak_kv_ttaaefs_manager` for full-sync, or any riak_repl full-sync mechanism, that is reap jobs are not co-ordinated between clusters tombstones will be resurrected by full-sync jobs. 45 | 46 | ## Riak KV Eraser 47 | 48 | The `riak_kv_eraser` is a process that receives requests to delete objects, queues those requests, and continuously delete objects from that queue when processing time is available. The eraser is simply an unscheduled garbage collection process at the moment, but is planned to be extended in 2.9.2 to be part of a more complete TTL management solution. 49 | 50 | The eraser queue can be fed via a tictac aae fold from the riak client - `aae_erase_keys`: 51 | 52 | ``` 53 | -spec aae_erase_keys(pid(), 54 | riakc_obj:bucket(), key_range(), 55 | segment_filter(), 56 | modified_range(), 57 | change_method()) -> 58 | {ok, non_neg_integer()} | {error, any()}. 59 | ``` 60 | 61 | The function inputs are the same as with `aae_reap_tombs`. For this fold, the results will be queued for the `riak_kv_eraser`. If all primary vnodes are not up, then as with the reap the delete will not be attempted, but will be re-queued. 62 | 63 | The delete will only complete if the object to be deleted has a vector clock equal to that discovered at the time the delete was queued. 64 | 65 | 66 | ## Outstanding TODO 67 | 68 | * Allow for scheduled reap and erase jobs to generate reap and erase activity. 69 | 70 | * This makes more sense for erase jobs to have them auto-scheduled, as reap jobs won't naturally co-ordinate between clusters - so tombstones may resurrect through full-sync. 71 | 72 | 73 | * Change replication so that it will filter the sending of tombstones beyond a certain modified time, so as not to resurrect old tombstones via full-sync. 74 | 75 | * Have a TTL bucket property so that GETs can be filtered beyond that modified time. Need to consider local GETs (e.g. at a vnode before a PUT). 76 | -------------------------------------------------------------------------------- /docs/Sync-On-Write.md: -------------------------------------------------------------------------------- 1 | # Sync On Write 2 | 3 | ## Background 4 | 5 | The _Sync on Write_ feature, also known as _Selective Sync_, is the second of two features added to Riak intended to provide greater flexibility and control for write operations. These improvements have been developed to better support the durability and availability guarantees required by the NHS Spine project. 6 | 7 | The first of these improvements, made in the Riak 2.9 release, was to add the [node diversity](Node-Diversity.md#node-diversity) feature. Node diversity allows Riak users to be explicit about how many physical devices should have received a write, before a write is acknowledged. The second improvement is required to complete assurance about durability, adding further controls over when writes have actually been persisted to disk. 8 | 9 | For Riak prior to release 3.0.8, being specific about persistence required synchronisation to be enabled on the backend (i.e. leveled, bitcask or eleveldb). The Riak put process supports the `dw` value, which nominally denotes how many nodes on which the write has been made durable - however the meaning of durability in this case is "with the backend", it offers no information as to whether physical persistence has actually been forced. The `dw` parameter will mean with the backend *and* flushed to disk, if and only if synchronisation of each and every write is enabled at the backend. 10 | 11 | Forcing each and every write to be flushed to disk at the backend, in order to be sure that at least `dw` writes have been persisted, is expensive. With two clusters, and an n-val of 3 it requires 6 flushes for every write. This volume of disk sync actions can result in significant direct I/O charges in cloud environments. In other hosted environments, the costs can be mitigated by employing specialist hardware such as flash-backed write caches, but these devices can have a notable negative impact on node reliability. 12 | 13 | Further, with backend sync enabled, all writes are flushes not just writes being currently received from the application. So writes during transfers, read repairs and replication events all trigger disk syncs. These overheads combined, had led to a recommendation that sync should not be enabled on Riak backends; but guarantees of a flush are still useful to applications with exacting durability requirements. 14 | 15 | For the NHS Spine project, when we considered the actual durability need, the specific requirements were that: 16 | 17 | - For some buckets, assuming node diversity, it was necessary to know that a PUT had been flushed at least once before acknowledgement (to protect against concurrent and immediate power failure of multiple nodes). 18 | 19 | - For some buckets it was acceptable to simply rely on node diversity and the regular flushing of the file system. 20 | 21 | - It was explicitly not desirable to flush to disk background writes (such as those resulting from handoffs). 22 | 23 | To meet these requirements backend sync is currently enabled, but considering the actual requirement, more than 90% of the sync events in the database are unnecessary. Greater efficiency is clearly possible. 24 | 25 | ## The Solution 26 | 27 | In [Riak 3.0.8 the bucket property `sync_on_write` has been added](https://github.com/basho/riak_kv/pull/1794). Like other PUT-related bucket properties it can be over-written via the API on an individual PUT. There are three possible values: 28 | 29 | - `all`; flush on all n-val nodes for each PUT. 30 | 31 | - `one`; flush only on the co-ordinating vnode, on other nodes the default backend policy will be followed. 32 | 33 | - `backend`; follow in all cases the backend policy (the current behaviour, and default property). 34 | 35 | For PUTs not initiated by a client via the PUT API (e.g. replication events, read repairs, handoffs etc), then the backend policy will be honoured as before. 36 | 37 | This allows for the backend policy to be left to *not* sync on write, but still allow for more selective flushes using bucket properties or the Riak client API. 38 | 39 | ## Notes on Implementation 40 | 41 | The three most common persisted backends - bitcask, eleveldb and leveled - have been updated to support this feature. 42 | 43 | Using a `sync_on_write` bucket property of `one` is complimentary to the [vnode mailbox check feature](https://github.com/basho/riak_kv/pull/1670) added in Riak 2.9. The feature allows the size of the vnode queues to influence the choice of coordinator for a PUT. Having the coordinator only apply the flush now leads to reduce disk I/O load on nodes that are running slow (and have longer vnode mailbox queues), diverting this overhead and balancing work in the cluster to nodes that currently have more available capacity. 44 | 45 | ## Notes on Flushing 46 | 47 | In this case, the meaning of "flushing to disk" means the consequence of calling [file sync](http://erlang.org/doc/man/file.html#sync-1) on the file to which the object has just been written. The actual impact of this, and the precise reliability guarantee this offers may vary greatly depending on the operating system, the file system and the choice of hardware. More data may be flushed than the last write, and whether that data is [reliably persisted may not be certain](https://danluu.com/file-consistency/). 48 | -------------------------------------------------------------------------------- /docs/hll/hll.bib: -------------------------------------------------------------------------------- 1 | @ONLINE{Flatjolet:2007:Online, 2 | author = "Flajolet, P. and Fusy, E. and Gandouet, O. and Meunier, F.", 3 | title = {HyperLogLog: the analysis of a near-optimal 4 | cardinality estimation algorithm}, 5 | year = {2007}, 6 | url = {http://algo.inria.fr/flajolet/Publications/FlFuGaMe07.pdf} 7 | } 8 | 9 | @ONLINE{Durand:2003:Online, 10 | author = "Durand, M. and Flajolet, P.", 11 | title = {Loglog Counting of Large Cardinalities}, 12 | year = {2003}, 13 | url = {http://algo.inria.fr/flajolet/Publications/DuFl03-LNCS.pdf} 14 | } 15 | 16 | @ONLINE{Count-distinct:Online, 17 | author = "Wikipedia", 18 | title = {Count-distinct problem}, 19 | url = {https://en.wikipedia.org/wiki/Count-distinct_problem} 20 | } 21 | 22 | @ONLINE{Harmonic-mean:Online, 23 | author = "Wikipedia", 24 | title = {Harmonic mean}, 25 | url = {https://en.wikipedia.org/wiki/Harmonic_mean} 26 | } 27 | 28 | @ONLINE{Hyper:Online, 29 | author = "GameAnalytics", 30 | title = {Hyper}, 31 | url = {https://github.com/GameAnalytics/hyper} 32 | } 33 | 34 | @ONLINE{Heule:2013:Online, 35 | author = "Heule, S. and Nunkesser, M. and Hall, A.", 36 | title = {HyperLogLog in Practice: Algorithmic Engineering of a State of The Art 37 | Cardinality Estimation Algorithm}, 38 | month = mar, 39 | year = {2013}, 40 | url = {http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/40671.pdf} 41 | } 42 | 43 | @ONLINE{Kiip:Online, 44 | author = "Kiip", 45 | title = {Sketching \& Scaling: Everyday HyperLogLog}, 46 | url = {http://blog.kiip.me/engineering/sketching-scaling-everyday-hyperloglog} 47 | } 48 | 49 | @ONLINE{Neustar:Online, 50 | author = "Neustar", 51 | title = {Sketch of the Day: HyperLogLog — Cornerstone of a Big Data Infrastructure}, 52 | url = {https://research.neustar.biz/2012/10/25/sketch-of-the-day-hyperloglog-cornerstone-of-a-big-data-infrastructure/} 53 | } 54 | -------------------------------------------------------------------------------- /docs/hll/hll.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basho/riak_kv/18817d228ecd818f8fc997fc6a6a288d00372209/docs/hll/hll.pdf -------------------------------------------------------------------------------- /docs/hll/images/bucket-run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basho/riak_kv/18817d228ecd818f8fc997fc6a6a288d00372209/docs/hll/images/bucket-run.png -------------------------------------------------------------------------------- /docs/hll/images/register-store.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basho/riak_kv/18817d228ecd818f8fc997fc6a6a288d00372209/docs/hll/images/register-store.png -------------------------------------------------------------------------------- /docs/hll/images/sto-avg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basho/riak_kv/18817d228ecd818f8fc997fc6a6a288d00372209/docs/hll/images/sto-avg.png -------------------------------------------------------------------------------- /docs/put-latency-mean.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basho/riak_kv/18817d228ecd818f8fc997fc6a6a288d00372209/docs/put-latency-mean.png -------------------------------------------------------------------------------- /docs/put-latency-tail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basho/riak_kv/18817d228ecd818f8fc997fc6a6a288d00372209/docs/put-latency-tail.png -------------------------------------------------------------------------------- /docs/throughput.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basho/riak_kv/18817d228ecd818f8fc997fc6a6a288d00372209/docs/throughput.png -------------------------------------------------------------------------------- /docs/update_line_numbers.erl: -------------------------------------------------------------------------------- 1 | %% Escript that will update the line numbers for links to functions 2 | %% in the markdown documentation. 3 | %% 4 | %% It expects function references like the following in the text: 5 | %% - [module:function/arity][] 6 | %% - [name for reference][module:function/arity] 7 | %% - [module:function/arity](path/to/source.erl#L34) 8 | %% 9 | %% And writes a markdown reference section at the end of the file. 10 | %% These references are not visible when markdown is rendered. 11 | %% 12 | %% [mod:fun1/arity]: ../src/mod_file.erl#L34 13 | %% [mod:fun2/arity]: ../src/mod_file.erl#L56 14 | %% ... 15 | %% 16 | %% To update the docs, run this after compiling the library: 17 | %% 18 | %% $ cd riak_ensemble/doc 19 | %% $ escript update_line_numbers.erl ../ebin *.md 20 | 21 | -module(update_line_numbers). 22 | -mode(compile). 23 | -export([main/1]). 24 | 25 | main([Ebin | MdFiles]) -> 26 | [update_file(Ebin, MdFile) || MdFile <- MdFiles]. 27 | 28 | update_file(Ebin, MdFile) -> 29 | % Parse function references in file 30 | {ok, Text} = file:read_file(MdFile), 31 | WantedFuns = lists:usort(parse_funs(Text)), 32 | FunSet = sets:from_list(WantedFuns), 33 | WantedMods = lists:usort([Mod || {Mod, _, _} <- WantedFuns]), 34 | % Get line numbers from beam files into a dict 35 | FoundLines = lists:usort(lists:flatten([get_line_nums(beam_filename(Ebin, Mod), Mod, FunSet) || Mod <- WantedMods])), 36 | FoundSet = sets:from_list([MFA || {MFA, _Line} <- FoundLines]), 37 | MissingSet = sets:subtract(FunSet, FoundSet), 38 | sets:size(MissingSet) > 0 andalso report_missing(MissingSet), 39 | LineList = [fun_line_text(E) || E <- FoundLines], 40 | LineMap = dict:from_list(LineList), 41 | % Insert line number info into target files 42 | update_file(MdFile, Text, LineMap, LineList), 43 | io:format("Updated function line numbers in ~p\n", [MdFile]). 44 | 45 | report_missing(MissingSet) -> 46 | MissingList = lists:sort(sets:to_list(MissingSet)), 47 | io:format(standard_error, "[WARNING] Could not find the following functions:\n", []), 48 | [io:format(standard_error, "\t~p:~p/~p\n", [M, F, A]) || 49 | {M, F, A} <- MissingList]. 50 | 51 | parse_funs(Text) -> 52 | {ok, Re} = re:compile("\\[(\\w+):(\\w+)/(\\d+)\\]"), 53 | case re:run(Text, Re, [{capture, [1,2,3], binary}, global]) of 54 | nomatch -> 55 | []; 56 | {match, Matches} -> 57 | [{binary_to_atom(Mod, utf8), 58 | binary_to_atom(Fun, utf8), 59 | binary_to_integer(Arity)} 60 | || [Mod, Fun, Arity] <- lists:usort(Matches)] 61 | end. 62 | 63 | beam_filename(Ebin, Mod) -> 64 | filename:join(Ebin, atom_to_list(Mod) ++ ".beam"). 65 | 66 | update_file(Filename, Text, LineMap, LineList) -> 67 | NewText = update_lines(Text, LineMap, LineList), 68 | file:write_file(Filename, NewText). 69 | 70 | % Extracts all function line numbers from abstract code chunk in beam file. 71 | get_line_nums(BeamFile, Mod, FunSet) -> 72 | {ok, {_, [{abstract_code, {_, Items}}]}} = beam_lib:chunks(BeamFile, [abstract_code]), 73 | [{{Mod, Fun, Arity}, Line} || {function, Line, Fun, Arity, _} <- Items, 74 | sets:is_element({Mod, Fun, Arity}, FunSet)]. 75 | 76 | fun_line_text({{Mod, Fun, Arity}, Line}) -> 77 | {mfa_bin(Mod, Fun, Arity), line_url(Mod, Line)}. 78 | 79 | mfa_bin(M, F, A) -> 80 | BM = atom_to_binary(M, utf8), 81 | BF = atom_to_binary(F, utf8), 82 | BA = integer_to_binary(A), 83 | <<"[", BM/binary, ":", BF/binary,"/", BA/binary, "]">>. 84 | 85 | line_url(M, L) -> 86 | list_to_binary("../src/" ++ atom_to_list(M) ++ ".erl#L" ++ integer_to_list(L)). 87 | 88 | update_lines(Text, LineMap, LineList) -> 89 | Tokens = re:split(Text, "(\\[\\w+:\\w+/\\d+\\])\\s*(:.*$\n|\\([^)]*\\))", [multiline]), 90 | Lines1 = replace_line_nums(Tokens, LineMap, []), 91 | RefLines = lists:flatten([ [Fun, <<": ">>, Line, <<"\n">>] || {Fun, Line} <- LineList]), 92 | Lines1 ++ RefLines. 93 | 94 | replace_line_nums([], _, Acc) -> 95 | lists:reverse(Acc); 96 | replace_line_nums([_, <<":", _/binary>> | Rest], LineMap, Acc) -> 97 | replace_line_nums(Rest, LineMap, Acc); 98 | replace_line_nums([MaybeFun, Bin = <<"(", _/binary>> | Rest], LineMap, Acc) -> 99 | case dict:find(MaybeFun, LineMap) of 100 | {ok, _Line} -> 101 | NewText = list_to_binary([MaybeFun, "[]"]), 102 | replace_line_nums(Rest, LineMap, [NewText|Acc]); 103 | _ -> 104 | replace_line_nums(Rest, LineMap, [Bin, MaybeFun | Acc]) 105 | end; 106 | replace_line_nums([Bin|Rest], LineMap, Acc) -> 107 | replace_line_nums(Rest, LineMap, [Bin|Acc]). 108 | 109 | -------------------------------------------------------------------------------- /eqc/eraser_eqc.erl: -------------------------------------------------------------------------------- 1 | -module(eraser_eqc). 2 | 3 | -include_lib("eqc/include/eqc.hrl"). 4 | -include_lib("eqc/include/eqc_component.hrl"). 5 | 6 | -compile([export_all, nowarn_export_all]). 7 | 8 | %% -- State ------------------------------------------------------------------ 9 | initial_state() -> 10 | #{ deletes => [] }. 11 | 12 | %% -- Operations ------------------------------------------------------------- 13 | 14 | %% --- Operation: start --- 15 | start_pre(S) -> not maps:is_key(pid, S). 16 | 17 | start_args(_S) -> 18 | [gen_delete_mode()]. 19 | 20 | start(DelMode) -> 21 | FilePath = riak_kv_test_util:get_test_dir("eraser_eqc"), 22 | {ok, Pid} = riak_kv_eraser:start_link(FilePath), 23 | case DelMode of 24 | keep -> 25 | riak_kv_eraser:override_redo(false); 26 | immediate -> 27 | riak_kv_eraser:override_redo(true) 28 | end, 29 | ok = gen_server:call(Pid, {override_action, fun erase/2}), 30 | Pid. 31 | 32 | start_next(S, Pid, [DelMode]) -> 33 | S#{ delete_mode => DelMode, pid => Pid }. 34 | 35 | %% --- Operation: delete --- 36 | delete_pre(S) -> maps:is_key(pid, S). 37 | 38 | delete_args(#{ pid := Pid }) -> 39 | [Pid, gen_delete_ref()]. 40 | 41 | delete(Pid, {Ref, Retries}) -> 42 | ets:insert(?MODULE, {Ref, Retries}), 43 | riak_kv_eraser:request_delete(Pid, Ref). 44 | 45 | delete_next(S = #{ deletes := Ds }, _Value, [_Pid, {Ref, Retries}]) -> 46 | S#{ deletes := [{Ref, Retries} | Ds] }. 47 | 48 | delete_features(_, [_, {_, N}], _) -> 49 | [{retries, N}]. 50 | 51 | %% -- Generators ------------------------------------------------------------- 52 | gen_delete_mode() -> 53 | elements([keep, immediate]). 54 | 55 | gen_delete_ref() -> 56 | ?LET(Ref, gen_reference(), 57 | weighted_default({4, {Ref, 1}}, {1, {Ref, choose(2, 5)}})). 58 | 59 | gen_reference() -> 60 | os:timestamp(). 61 | 62 | %% -- Properties ------------------------------------------------------------- 63 | 64 | prop_eraser() -> 65 | application:set_env(riak_kv, eraser_redo_timeout, 20), 66 | ?FORALL(Cmds, commands(?MODULE), 67 | begin 68 | ets:new(?MODULE, [named_table, public]), 69 | {H, S, Res} = run_commands(Cmds), 70 | StopRes = stop_job(maps:get(pid, S, undefined)), 71 | ETSRes = ets:tab2list(?MODULE), 72 | ets:delete(?MODULE), 73 | aggregate(call_features(H), 74 | pretty_commands(?MODULE, Cmds, {H, S, Res}, 75 | conjunction([ 76 | {result, equals(Res, ok)}, 77 | {stop, equals(StopRes, ok)}, 78 | {ets, equals(ETSRes, [])}]))) 79 | end). 80 | 81 | stop_job(undefined) -> ok; 82 | stop_job(Pid) -> 83 | MonRef = monitor(process, Pid), 84 | riak_kv_eraser:stop_job(Pid), 85 | receive 86 | {'DOWN', MonRef, _, _, _} -> ok 87 | after 1000 -> 88 | timeout_stop_job 89 | end. 90 | 91 | 92 | erase(Ref, _) -> 93 | case ets:lookup(?MODULE, Ref) of 94 | [{_, 1}] -> 95 | ets:delete(?MODULE, Ref), true; 96 | [{_, N}] -> 97 | ets:insert(?MODULE, {Ref, N - 1}), false 98 | end. 99 | -------------------------------------------------------------------------------- /eqc/kv679_fb_byz.eqc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basho/riak_kv/18817d228ecd818f8fc997fc6a6a288d00372209/eqc/kv679_fb_byz.eqc -------------------------------------------------------------------------------- /eqc/kv679_fb_byz2.eqc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basho/riak_kv/18817d228ecd818f8fc997fc6a6a288d00372209/eqc/kv679_fb_byz2.eqc -------------------------------------------------------------------------------- /eqc/kv679_fb_byz3.eqc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basho/riak_kv/18817d228ecd818f8fc997fc6a6a288d00372209/eqc/kv679_fb_byz3.eqc -------------------------------------------------------------------------------- /eqc/kv_vnode_status_mgr_eqc.erl: -------------------------------------------------------------------------------- 1 | %%% @author Russell Brown 2 | %%% @copyright (C) 2014, Russell Brown 3 | %%% @doc 4 | %%% 5 | %%% @end 6 | %%% Created : 14 Nov 2014 by Russell Brown 7 | 8 | -module(kv_vnode_status_mgr_eqc). 9 | 10 | -ifdef(EQC). 11 | -include_lib("eqc/include/eqc.hrl"). 12 | -include_lib("eqc/include/eqc_statem.hrl"). 13 | -include_lib("eunit/include/eunit.hrl"). 14 | 15 | -compile([export_all, nowarn_export_all]). 16 | 17 | -record(state,{}). 18 | 19 | -define(MAX_INT, ((1 bsl 32) -1)). 20 | 21 | %% @doc Returns the state in which each test case starts. (Unless a different 22 | %% initial state is supplied explicitly to, e.g. commands/2.) 23 | -spec initial_state() -> eqc_statem:symbolic_state(). 24 | initial_state() -> 25 | #state{}. 26 | 27 | %% ------ Grouped operator: lease_counter 28 | lease_counter_args(_S) -> 29 | [ 30 | 31 | frequency([{5, ?SUCHTHAT(Lease, ?LET(I, largeint(), abs(I)), Lease > 0)}, 32 | {5, ?SUCHTHAT(Lease, ?LET(I, int(), abs(I)), Lease > 0)}]) 33 | ]. 34 | 35 | lease_counter(Lease) -> 36 | [{status, LastId, MoCnt, Pid}] = ets:lookup(vnode_status, status), 37 | NewMoLease = MoCnt + Lease, 38 | {NewMoId, NewCntrModel} = case {MoCnt == ?MAX_INT, NewMoLease > ?MAX_INT} of 39 | {true, _} -> 40 | %% New Id 41 | {LastId+1, min(Lease, ?MAX_INT)}; 42 | {false, true} -> 43 | {LastId+1, min(?MAX_INT, Lease)}; 44 | {false, false} -> 45 | {LastId, NewMoLease} 46 | end, 47 | ok = riak_kv_vnode_status_mgr:lease_counter(Pid, Lease), 48 | {VnodeId, NewCntr} = receive 49 | {counter_lease, {_, Id, NewLease}} -> 50 | {Id, NewLease} 51 | after 52 | 60000 -> %% one minute! 53 | io:format("timeout!!!! ~p ~n", [erlang:is_process_alive(Pid)]), 54 | timeout 55 | end, 56 | 57 | true = ets:insert(vnode_status, {status, NewMoId, NewCntrModel, Pid}), 58 | true = ets:insert(vnodeids, {VnodeId}), 59 | {NewCntrModel, NewCntr}. 60 | 61 | %% @doc lease_counter_post - Postcondition for lease_counter 62 | -spec lease_counter_post(S :: eqc_statem:dynamic_state(), 63 | Args :: [term()], R :: term()) -> true | term(). 64 | lease_counter_post(_S, _Args, {Cnt, Cnt}) -> 65 | true; 66 | lease_counter_post(_S, _Args, {MoCnt, Cnt}) -> 67 | {postcondition_failed, "Ets and Disk don't match", MoCnt, Cnt}. 68 | 69 | %% @doc weight/2 - Distribution of calls 70 | -spec weight(S :: eqc_statem:symbolic_state(), Command :: atom()) -> integer(). 71 | weight(_S, lease_counter) -> 1; 72 | weight(_S, _Cmd) -> 1. 73 | 74 | %% @doc Default generated property 75 | -spec prop_monotonic() -> eqc:property(). 76 | prop_monotonic() -> 77 | ?FORALL(Cmds, non_empty(commands(?MODULE)), 78 | begin 79 | ets:new(vnode_status, [named_table, set]), 80 | ets:new(vnodeids, [named_table, set]), 81 | TestPath = riak_kv_test_util:get_test_dir("status_mgr_eqc"), 82 | {ok, Pid} = riak_kv_vnode_status_mgr:test_link(self(), 1, true, TestPath), 83 | {ok, {ID, _Counter, _Lease}} = riak_kv_vnode_status_mgr:get_vnodeid_and_counter(Pid, 1), 84 | true = ets:insert(vnode_status, {status, 1, 1, Pid}), 85 | true = ets:insert(vnodeids, {ID}), 86 | {H, S, Res} = run_commands(?MODULE,Cmds), 87 | [{status, Id, MoCntr, Pid}] = ets:lookup(vnode_status, status), 88 | VnodeIds = ets:info(vnodeids, size), 89 | {ok, Status} = riak_kv_vnode_status_mgr:status(Pid), 90 | Cnt = proplists:get_value(counter, Status, 0), 91 | 92 | ets:delete(vnode_status), 93 | ets:delete(vnodeids), 94 | riak_kv_vnode_status_mgr:clear_vnodeid(Pid), 95 | ok = riak_kv_vnode_status_mgr:stop(Pid), 96 | 97 | measure(vnodeid_changes, Id, 98 | aggregate(command_names(Cmds), 99 | pretty_commands(?MODULE, Cmds, {H, S, Res}, 100 | conjunction([{result, equals(Res, ok)}, 101 | {values, equals(MoCntr, Cnt)}, 102 | {ids, equals(Id, VnodeIds)} 103 | ]) 104 | ) 105 | ) 106 | ) 107 | end). 108 | 109 | -endif. 110 | -------------------------------------------------------------------------------- /eqc/reaper_eqc.erl: -------------------------------------------------------------------------------- 1 | -module(reaper_eqc). 2 | 3 | -include_lib("eqc/include/eqc.hrl"). 4 | -include_lib("eqc/include/eqc_component.hrl"). 5 | 6 | -compile([export_all, nowarn_export_all]). 7 | 8 | %% -- State ------------------------------------------------------------------ 9 | initial_state() -> 10 | #{ reaps => [] }. 11 | 12 | %% -- Operations ------------------------------------------------------------- 13 | 14 | %% --- Operation: start --- 15 | start_pre(S) -> not maps:is_key(pid, S). 16 | 17 | start_args(_S) -> 18 | []. 19 | 20 | start() -> 21 | FilePath = riak_kv_test_util:get_test_dir("reaper_eqc"), 22 | {ok, Pid} = riak_kv_reaper:start_link(FilePath), 23 | ok = gen_server:call(Pid, {override_action, fun reaper/2}), 24 | Pid. 25 | 26 | start_next(S, Pid, []) -> 27 | S#{ pid => Pid }. 28 | 29 | %% --- Operation: reap --- 30 | reap_pre(S) -> maps:is_key(pid, S). 31 | 32 | reap_args(#{ pid := Pid }) -> 33 | [Pid, gen_reap_ref()]. 34 | 35 | reap(Pid, {Ref, Retries}) -> 36 | ets:insert(?MODULE, {Ref, Retries}), 37 | riak_kv_reaper:request_reap(Pid, Ref). 38 | 39 | reap_next(S = #{ reaps := Ds }, _Value, [_Pid, {Ref, Retries}]) -> 40 | S#{ reaps := [{Ref, Retries} | Ds] }. 41 | 42 | reap_features(_, [_, {_, N}], _) -> 43 | [{retries, N}]. 44 | 45 | %% -- Generators ------------------------------------------------------------- 46 | gen_reap_ref() -> 47 | ?LET(Ref, gen_reference(), 48 | weighted_default({4, {Ref, 1}}, {1, {Ref, choose(2, 5)}})). 49 | 50 | gen_reference() -> 51 | os:timestamp(). 52 | 53 | %% -- Properties ------------------------------------------------------------- 54 | 55 | prop_reaper() -> 56 | application:set_env(riak_kv, reaper_redo_timeout, 20), 57 | ?FORALL(Cmds, commands(?MODULE), 58 | begin 59 | ets:new(?MODULE, [named_table, public]), 60 | {H, S, Res} = run_commands(Cmds), 61 | StopRes = stop_job(maps:get(pid, S, undefined)), 62 | ETSRes = ets:tab2list(?MODULE), 63 | ets:delete(?MODULE), 64 | aggregate(call_features(H), 65 | pretty_commands(?MODULE, Cmds, {H, S, Res}, 66 | conjunction([ 67 | {result, equals(Res, ok)}, 68 | {stop, equals(StopRes, ok)}, 69 | {ets, equals(ETSRes, [])}]))) 70 | end). 71 | 72 | stop_job(undefined) -> ok; 73 | stop_job(Pid) -> 74 | MonRef = monitor(process, Pid), 75 | riak_kv_reaper:stop_job(Pid), 76 | receive 77 | {'DOWN', MonRef, _, _, _} -> ok 78 | after 1000 -> 79 | timeout_stop_job 80 | end. 81 | 82 | 83 | reaper(Ref, _) -> 84 | case ets:lookup(?MODULE, Ref) of 85 | [{_, 1}] -> 86 | ets:delete(?MODULE, Ref), true; 87 | [{_, N}] -> 88 | ets:insert(?MODULE, {Ref, N - 1}), false 89 | end. 90 | -------------------------------------------------------------------------------- /eqc/replrtq_snk_monitor.erl: -------------------------------------------------------------------------------- 1 | %%% File : replrtq_snk_monitor.erl 2 | %%% Author : Ulf Norell 3 | %%% Description : 4 | %%% Created : 10 Jun 2019 by Ulf Norell 5 | -module(replrtq_snk_monitor). 6 | 7 | -compile([export_all, nowarn_export_all]). 8 | 9 | -behaviour(gen_server). 10 | 11 | %% API 12 | -export([start_link/0, stop/0, fetch/2, push/4, suspend/1, resume/1, 13 | add_queue/4, remove_queue/1, update_workers/2]). 14 | 15 | %% gen_server callbacks 16 | -export([init/1, handle_call/3, handle_cast/2, handle_info/2, 17 | terminate/2, code_change/3]). 18 | 19 | -define(SERVER, ?MODULE). 20 | 21 | -record(state, {queues = [], peers = #{}, traces = #{}}). 22 | -record(queue, {ref, name, peers, workers, peerlimit}). 23 | 24 | %% -- API functions ---------------------------------------------------------- 25 | 26 | start_link() -> 27 | gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). 28 | 29 | stop() -> 30 | gen_server:call(?SERVER, stop). 31 | 32 | fetch(Client, QueueName) -> 33 | gen_server:call(?SERVER, {fetch, Client, QueueName}). 34 | 35 | push(RObj, Bool, List, LocalClient) -> 36 | gen_server:call(?SERVER, {push, RObj, Bool, List, LocalClient}). 37 | 38 | add_queue(Queue, Peers, Workers, Peerlimit) -> 39 | gen_server:call(?SERVER, {add_queue, Queue, Peers, Workers, Peerlimit}). 40 | 41 | remove_queue(Queue) -> 42 | gen_server:call(?SERVER, {remove, Queue}). 43 | 44 | suspend(Queue) -> 45 | gen_server:call(?SERVER, {suspend, Queue}). 46 | 47 | resume(Queue) -> 48 | gen_server:call(?SERVER, {resume, Queue}). 49 | 50 | update_workers(Queue, Workers) -> 51 | gen_server:call(?SERVER, {update_workers, Queue, Workers}). 52 | 53 | create(Host, Port, _, _) -> 54 | {Host, Port}. 55 | 56 | %% -- Callbacks -------------------------------------------------------------- 57 | 58 | init([]) -> 59 | {ok, #state{}}. 60 | 61 | handle_call({add_queue, Queue, Peers, Workers, Peerlimit}, _From, State) -> 62 | Ref = make_ref(), 63 | PeerMap = maps:from_list([{{{Host, Port}, Queue}, {Ref, Cfg}} || {{Host, Port, http}, Cfg} <- Peers]), 64 | Q = #queue{ref = Ref, name = Queue, peers = Peers, workers = Workers, peerlimit = Peerlimit}, 65 | State1 = State#state{ queues = [Q | State#state.queues], 66 | peers = maps:merge(State#state.peers, PeerMap) }, 67 | {reply, ok, add_trace(State1, Queue, {workers, Workers})}; 68 | handle_call({fetch, Client, QueueName}, From, State = #state{ peers = Peers }) -> 69 | State1 = add_trace(State, QueueName, {fetch, Client}), 70 | case maps:get({Client, QueueName}, Peers, undefined) of 71 | undefined -> 72 | catch replrtq_mock:error({bad_fetch, Client, QueueName}), 73 | {reply, error, State}; 74 | {_Ref, {Active, Delay}} -> 75 | erlang:send_after(Delay, self(), {return, From, QueueName, Active}), 76 | {noreply, State1} 77 | end; 78 | handle_call({push, _RObj, _Bool, _List, _LocalClient}, _From, State) -> 79 | {reply, {ok, os:timestamp()}, State}; 80 | handle_call(stop, _From, State) -> 81 | State1 = lists:foldl(fun(R, S) -> add_trace(S, R, stop) end, State, 82 | maps:keys(State#state.traces)), 83 | Ret = [ final_trace(State1, R) || R <- maps:keys(State1#state.traces) ], 84 | {stop, normal, Ret, State1}; 85 | handle_call({remove, Queue}, _From, State) -> 86 | {reply, ok, add_trace(State, Queue, remove)}; 87 | handle_call({suspend, Queue}, _From, State) -> 88 | {reply, ok, add_trace(State, Queue, suspend)}; 89 | handle_call({resume, Queue}, _From, State) -> 90 | {reply, ok, add_trace(State, Queue, resume)}; 91 | handle_call({update_workers, Q, Workers}, _From, State) -> 92 | Queue = lists:keyfind(Q, #queue.name, State#state.queues), 93 | NewQueue = Queue#queue{workers = Workers, peerlimit = Workers}, 94 | State1 = State#state{ queues = [NewQueue | State#state.queues -- [Queue]]}, 95 | {reply, ok, add_trace(State1, Q, {workers, Workers})}; 96 | handle_call(_Request, _From, State) -> 97 | Reply = ok, 98 | {reply, Reply, State}. 99 | 100 | handle_cast(_Msg, State) -> 101 | {noreply, State}. 102 | 103 | handle_info({return, From, QueueName, Active}, State) -> 104 | Reply = 105 | case Active of 106 | active -> {ok, <<"riak_obj">>}; 107 | inactive -> {ok, queue_empty}; 108 | error -> {error, no_client} 109 | end, 110 | gen_server:reply(From, Reply), 111 | {noreply, add_trace(State, QueueName, {return, Active})}; 112 | handle_info(_Info, State) -> 113 | {noreply, State}. 114 | 115 | terminate(_Reason, _State) -> 116 | ok. 117 | 118 | code_change(_OldVsn, State, _Extra) -> 119 | {ok, State}. 120 | 121 | %% -- Internal functions ----------------------------------------------------- 122 | 123 | add_trace(S, undefined, _) -> S; 124 | add_trace(S = #state{traces = Traces}, Ref, Event) when is_reference(Ref) -> 125 | Trace0 = maps:get(Ref, Traces, []), 126 | S#state{ traces = Traces#{ Ref => [{os:timestamp(), Event} | Trace0] } }; 127 | add_trace(S, QueueName, Event) -> 128 | add_trace(S, get_ref(S, QueueName), Event). 129 | 130 | get_ref(#state{queues = Queues}, QueueName) -> 131 | case lists:keyfind(QueueName, #queue.name, Queues) of 132 | #queue{ref = Ref} when is_reference(Ref) -> Ref; 133 | false -> undefined 134 | end. 135 | 136 | final_trace(#state{ queues = Queues, traces = Traces }, Ref) -> 137 | Trace = maps:get(Ref, Traces), 138 | #queue{name = Name, peers = Peers, workers = Workers, peerlimit = PL} = lists:keyfind(Ref, #queue.ref, Queues), 139 | {Name, PL, Peers, Workers, lists:reverse(Trace)}. 140 | -------------------------------------------------------------------------------- /eqc/riak_object_eqc.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_object_eqc: serialization/deserialization of riak_object for disk/wire 4 | %% and converting between versions 5 | %% 6 | %% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved. 7 | %% 8 | %% This file is provided to you under the Apache License, 9 | %% Version 2.0 (the "License"); you may not use this file 10 | %% except in compliance with the License. You may obtain 11 | %% a copy of the License at 12 | %% 13 | %% http://www.apache.org/licenses/LICENSE-2.0 14 | %% 15 | %% Unless required by applicable law or agreed to in writing, 16 | %% software distributed under the License is distributed on an 17 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 18 | %% KIND, either express or implied. See the License for the 19 | %% specific language governing permissions and limitations 20 | %% under the License. 21 | %% 22 | %% ------------------------------------------------------------------- 23 | -module(riak_object_eqc). 24 | -ifdef(EQC). 25 | 26 | -include_lib("eqc/include/eqc.hrl"). 27 | -include_lib("eunit/include/eunit.hrl"). 28 | 29 | -compile([export_all, nowarn_export_all]). 30 | 31 | %% deserializing a binary representation of a riak_object and 32 | %% reserializing it for the same version should result in the same 33 | %% binary 34 | prop_roundtrip() -> 35 | ?FORALL({B,K,ObjBin,BinVsn}, 36 | riak_object_bin(), 37 | collect(BinVsn, 38 | ObjBin =:= 39 | riak_object:to_binary(BinVsn, riak_object:from_binary(B,K,ObjBin)))). 40 | 41 | riak_object_bin() -> 42 | ?LET({Obj, Vsn}, 43 | {fsm_eqc_util:riak_object(), binary_version()}, 44 | {riak_object:bucket(Obj), 45 | riak_object:key(Obj), 46 | riak_object:to_binary(Vsn, Obj), 47 | Vsn}). 48 | 49 | binary_version() -> 50 | oneof([v0, v1]). 51 | 52 | %%==================================================================== 53 | %% Shell helpers 54 | %%==================================================================== 55 | 56 | test() -> 57 | test(100). 58 | 59 | test(N) -> 60 | quickcheck(numtests(N, prop_roundtrip())). 61 | 62 | check() -> 63 | check(prop_roundtrip(), current_counterexample()). 64 | 65 | -endif. %% EQC 66 | -------------------------------------------------------------------------------- /include/.empty_for_hg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basho/riak_kv/18817d228ecd818f8fc997fc6a6a288d00372209/include/.empty_for_hg -------------------------------------------------------------------------------- /include/riak_kv_dtrace.hrl: -------------------------------------------------------------------------------- 1 | -include_lib("riak_core/include/riak_core_dtrace.hrl"). 2 | 3 | %% Main wrapper macro for DTrace/SystemTap probe annotations 4 | %% NOTE: We assume there will be per-module dtrace_int() and dtrace() funcs! 5 | 6 | -define(DTRACE(Category, Ints, Strings), 7 | dtrace_int(Category, Ints, Strings)). 8 | 9 | -define(DTRACE(Cond, Category, Ints, Strings), 10 | case Cond of 11 | true -> 12 | dtrace_int(Category, Ints, Strings); 13 | _ -> ok 14 | end). 15 | 16 | %% Probe categories 17 | -define(C_GET_FSM_INIT, 500). 18 | -define(C_GET_FSM_PREPARE, 501). 19 | -define(C_GET_FSM_VALIDATE, 502). 20 | -define(C_GET_FSM_EXECUTE, 503). 21 | -define(C_GET_FSM_PREFLIST, 504). 22 | -define(C_GET_FSM_WAITING_R, 505). 23 | -define(C_GET_FSM_WAITING_R_TIMEOUT, 506). 24 | -define(C_GET_FSM_CLIENT_REPLY, 507). 25 | -define(C_GET_FSM_FINALIZE, 508). 26 | -define(C_GET_FSM_MAYBE_DELETE, 509). 27 | -define(C_GET_FSM_RR, 510). 28 | -define(C_GET_FSM_WAITING_RR, 511). 29 | -define(C_GET_FSM_WAITING_RR_TIMEOUT, 512). 30 | 31 | -define(C_PUT_FSM_INIT, 520). 32 | -define(C_PUT_FSM_PREPARE, 521). 33 | -define(C_PUT_FSM_VALIDATE, 522). 34 | -define(C_PUT_FSM_PRECOMMIT, 523). 35 | -define(C_PUT_FSM_EXECUTE_LOCAL, 524). 36 | -define(C_PUT_FSM_WAITING_LOCAL_VNODE, 525). 37 | -define(C_PUT_FSM_EXECUTE_REMOTE, 526). 38 | -define(C_PUT_FSM_WAITING_REMOTE_VNODE, 527). 39 | -define(C_PUT_FSM_PROCESS_REPLY, 528). 40 | -define(C_PUT_FSM_POSTCOMMIT, 529). 41 | -define(C_PUT_FSM_FINISH, 530). 42 | -define(C_PUT_FSM_DECODE_PRECOMMIT, 531). % errors only 43 | -define(C_PUT_FSM_DECODE_POSTCOMMIT, 532). % errors only 44 | 45 | -define(C_DELETE_INIT1, 535). 46 | -define(C_DELETE_INIT2, 536). 47 | -define(C_DELETE_REAPER_GET_DONE, 537). 48 | 49 | -define(C_BUCKETS_INIT, 540). 50 | -define(C_BUCKETS_PROCESS_RESULTS, 541). 51 | -define(C_BUCKETS_FINISH, 542). 52 | 53 | -define(C_KEYS_INIT, 545). 54 | -define(C_KEYS_PROCESS_RESULTS, 546). 55 | -define(C_KEYS_FINISH, 547). 56 | -------------------------------------------------------------------------------- /include/riak_kv_index.hrl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_index: central module for indexing. 4 | %% 5 | %% Copyright (c) 2007-2013 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | %% Index query records 24 | -record(riak_kv_index_v2, { 25 | start_key= <<>> :: binary(), 26 | filter_field :: binary() | undefined, 27 | start_term :: binary() | undefined, %% Note, in a $key query, start_key==start_term 28 | end_term :: binary() | undefined, %% Note, in an eq query, start==end 29 | return_terms=true :: boolean(), %% Note, should be false for an equals query 30 | start_inclusive=true :: boolean(), 31 | end_inclusive=true :: boolean(), 32 | return_body=false ::boolean() %% Note, only for riak cs bucket folds 33 | }). 34 | 35 | -record(riak_kv_index_v3, { 36 | start_key= <<>> :: binary(), 37 | filter_field :: binary() | undefined, 38 | start_term :: integer() | binary() | undefined, %% Note, in a $key query, start_key==start_term 39 | end_term :: integer() | binary() | undefined, %% Note, in an eq query, start==end 40 | return_terms=true :: boolean(), %% Note, should be false for an equals query 41 | start_inclusive=true :: boolean(), 42 | end_inclusive=true :: boolean(), 43 | return_body=false ::boolean(), %% Note, only for riak cs bucket folds 44 | term_regex :: binary() | undefined, 45 | max_results :: integer() | undefined 46 | }). 47 | 48 | -define(KV_INDEX_Q, #riak_kv_index_v3). 49 | -------------------------------------------------------------------------------- /include/riak_kv_map_phase.hrl: -------------------------------------------------------------------------------- 1 | -record(riak_kv_map_input, {bkey, 2 | bprops, 3 | kd, 4 | preflist}). 5 | -------------------------------------------------------------------------------- /include/riak_kv_mrc_sink.hrl: -------------------------------------------------------------------------------- 1 | %% used to communicate from riak_kv_mrc_sink to riak_kv_wm_mapred and 2 | %% riak_kv_pb_mapred 3 | -record(kv_mrc_sink, 4 | { 5 | ref :: reference(), % the pipe ref 6 | results :: [{PhaseId::integer(), Result::term()}], 7 | logs :: [{PhaseId::integer(), Message::term()}], 8 | done :: boolean() 9 | }). 10 | 11 | %% used by riak_kv_mrc_sink:mapred_stream_sink 12 | -record(mrc_ctx, 13 | { 14 | ref :: reference(), % the pipe ref (so we don't have to dig) 15 | pipe :: riak_pipe:pipe(), 16 | sink :: {pid(), reference()}, % sink and monitor 17 | sender :: {pid(), reference()} | undefined, % async sender and monitor 18 | timer :: {reference(), reference()} | undefined, % timeout timer and pipe ref 19 | keeps :: integer() 20 | }). 21 | -------------------------------------------------------------------------------- /include/riak_kv_types.hrl: -------------------------------------------------------------------------------- 1 | -record(crdt, {mod, ctype, value}). 2 | -record(crdt_op, {mod, op, ctx}). 3 | 4 | -define(CRDT, #crdt). 5 | -define(CRDT_OP, #crdt_op). 6 | 7 | %% Top Level Key->Type Types 8 | -define(V1_COUNTER_TYPE, riak_kv_pncounter). 9 | -define(V1_COUNTER_TYPE(Val), #crdt{mod=?V1_COUNTER_TYPE, ctype="application/riak_counter", value=Val}). 10 | -define(COUNTER_TYPE, riak_dt_pncounter). 11 | -define(COUNTER_TYPE(Val), #crdt{mod=?COUNTER_TYPE, ctype="application/riak_counter", value=Val}). 12 | 13 | -define(SET_TYPE, riak_dt_orswot). 14 | -define(SET_TYPE(Val), #crdt{mod=?SET_TYPE, ctype="application/riak_set", value=Val}). 15 | 16 | -define(HLL_TYPE, riak_kv_hll). 17 | -define(HLL_TYPE(Val), #crdt{mod=?HLL_TYPE, 18 | ctype="application/riak_hll", 19 | value=Val}). 20 | -define(GSET_TYPE, riak_dt_gset). 21 | -define(GSET_TYPE(Val), #crdt{mod=?GSET_TYPE, ctype="application/riak_gset", value=Val}). 22 | 23 | -define(MAP_TYPE, riak_dt_map). 24 | -define(MAP_TYPE(Val), #crdt{mod=?MAP_TYPE, ctype="application/riak_map", value=Val}). 25 | 26 | %% Internal Only Key->Map->Field->Type types 27 | -define(FLAG_TYPE, riak_dt_od_flag). 28 | -define(REG_TYPE, riak_dt_lwwreg). 29 | -define(EMCNTR_TYPE, riak_dt_emcntr). 30 | 31 | -define(V1_TOP_LEVEL_TYPES, [pncounter]). 32 | -define(V2_TOP_LEVEL_TYPES, [?COUNTER_TYPE, ?SET_TYPE, ?MAP_TYPE]). 33 | -define(V3_TOP_LEVEL_TYPES, [?HLL_TYPE]). 34 | -define(V4_TOP_LEVEL_TYPES, [?GSET_TYPE]). 35 | -define(TOP_LEVEL_TYPES, ?V1_TOP_LEVEL_TYPES ++ ?V2_TOP_LEVEL_TYPES ++ 36 | ?V3_TOP_LEVEL_TYPES ++ ?V4_TOP_LEVEL_TYPES). 37 | -define(ALL_TYPES, ?TOP_LEVEL_TYPES ++ [?FLAG_TYPE, ?REG_TYPE]). 38 | -define(EMBEDDED_TYPES, [{map, ?MAP_TYPE}, {set, ?SET_TYPE}, 39 | {counter, ?EMCNTR_TYPE}, {flag, ?FLAG_TYPE}, 40 | {register, ?REG_TYPE}]). 41 | 42 | 43 | -define(MOD_MAP, [{map, ?MAP_TYPE}, {set, ?SET_TYPE}, 44 | {counter, ?COUNTER_TYPE}, {hll, ?HLL_TYPE}, {gset, ?GSET_TYPE}]). 45 | 46 | -define(DATATYPE_STATS_DEFAULTS, [actor_count]). 47 | -define(HLL_STATS, [bytes]). 48 | 49 | %% These proplists represent the current versions of supported 50 | %% datatypes. The naming `EN_DATATYPE_VERSIONS' means `Epoch' and 51 | %% number. `N' is incremented when any new version of any datatype is 52 | %% introduced, thus bumping the data type `Epoch'. 53 | -define(E1_DATATYPE_VERSIONS, [{?COUNTER_TYPE, 2}]). 54 | -define(E2_DATATYPE_VERSIONS, [{?MAP_TYPE, 2}, 55 | {?SET_TYPE, 2}, 56 | {?COUNTER_TYPE, 2}]). 57 | -define(E3_DATATYPE_VERSIONS, ?E2_DATATYPE_VERSIONS ++ [{?HLL_TYPE, 1}]). 58 | -define(E4_DATATYPE_VERSIONS, ?E2_DATATYPE_VERSIONS ++ [{?HLL_TYPE, 1}, {?GSET_TYPE, 2}]). 59 | 60 | -type crdt() :: ?CRDT{}. 61 | -type crdt_op() :: ?CRDT_OP{}. 62 | 63 | %% Redis Default Yo Lolz, but a good default. 64 | -define(HYPER_DEFAULT_PRECISION, 14). 65 | -------------------------------------------------------------------------------- /include/riak_kv_vnode.hrl: -------------------------------------------------------------------------------- 1 | -include_lib("riak_core/include/riak_core_vnode.hrl"). 2 | 3 | 4 | -record(riak_kv_w1c_put_reply_v1, { 5 | reply :: ok | {error, term()}, 6 | type :: primary | fallback 7 | }). 8 | 9 | %% this is a legacy request *potentially* handled via riak_core_vnode_master 10 | %% we are not refactoring it because we think it likely should be deleted. 11 | %% TODO: investigate whether it *can* be deleted 12 | -record(riak_kv_listkeys_req_v2, { 13 | bucket :: binary()|'_'|tuple(), 14 | req_id :: non_neg_integer(), 15 | caller :: pid()}). 16 | 17 | -define(KV_W1C_PUT_REPLY, #riak_kv_w1c_put_reply_v1). 18 | 19 | %% @doc vnode_lock(PartitionIndex) is a kv per-vnode lock, used possibly, 20 | %% by AAE tree rebuilds, fullsync, and handoff. 21 | %% See @link riak_core_background_mgr:get_lock/1 22 | -define(KV_VNODE_LOCK(Idx), {vnode_lock, Idx}). 23 | 24 | -define(ENABLE_TICTACAAE, true). 25 | -define(PARALLEL_AAEORDER, leveled_ko). 26 | -define(REBUILD_SCHEDULE, {120, 14400}). 27 | -------------------------------------------------------------------------------- /include/riak_object.hrl: -------------------------------------------------------------------------------- 1 | -define(DOT, <<"dot">>). %% The event at which a value was written, stored in metadata 2 | 3 | 4 | -------------------------------------------------------------------------------- /priv/.empty_for_hg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basho/riak_kv/18817d228ecd818f8fc997fc6a6a288d00372209/priv/.empty_for_hg -------------------------------------------------------------------------------- /priv/mapred_builtins.js: -------------------------------------------------------------------------------- 1 | /** Helper functions start **/ 2 | 3 | var RiakHelper = function() { 4 | return { 5 | numericSorter: function(first, second) { 6 | return first - second; 7 | } 8 | }; 9 | }(); 10 | 11 | /** Helper functions end **/ 12 | 13 | var Riak = function() { 14 | 15 | return { 16 | getClassName: function(obj) { 17 | if (obj && obj.constructor && obj.constructor.toString) { 18 | var arr = obj.constructor.toString().match(/function\s*(\w+)/); 19 | if (arr && arr.length == 2) { 20 | return arr[1]; 21 | } 22 | } 23 | return undefined; 24 | }, 25 | filterNotFound: function(values) { 26 | return values.filter(function(value, index, data) { 27 | if (typeof value === 'object') { 28 | return value['not_found'] === undefined; 29 | } 30 | else { 31 | return true; 32 | } }); 33 | }, 34 | mapValues: function(value, keyData, arg) { 35 | if (value["not_found"]) { 36 | return [value]; 37 | } 38 | var data = value["values"][0]["data"]; 39 | if (Riak.getClassName(data) !== "Array") { 40 | return [data]; 41 | } 42 | else { 43 | return data; 44 | }}, 45 | mapValuesJson: function(value, keyData, arg) { 46 | if (value["not_found"]) { 47 | return [value]; 48 | } 49 | var newValues = Riak.mapValues(value, keyData, arg); 50 | return newValues.map(function(nv) { return JSON.parse(nv); }); 51 | }, 52 | mapByFields: function(value, keyData, fields) { 53 | if(!value.not_found) { 54 | var object = Riak.mapValuesJson(value)[0]; 55 | for(field in fields) { 56 | if(object[field] != fields[field]) { 57 | return []; 58 | } 59 | } 60 | return [object]; 61 | } 62 | else { 63 | return []; 64 | } 65 | }, 66 | reduceSum: function(values, arg) { 67 | values = Riak.filterNotFound(values); 68 | if (values.length > 0) { 69 | return [values.reduce(function(prev, curr, index, array) { return prev + curr; } )]; 70 | } 71 | else { 72 | return [0]; 73 | }}, 74 | reduceMin: function(values, arg) { 75 | if(values.length == 0) 76 | return []; 77 | else 78 | return [values.reduce(function(prev,next){ 79 | return (prev < next) ? prev : next; 80 | })]; 81 | }, 82 | reduceMax: function(values, arg) { 83 | if(values.length == 0) 84 | return []; 85 | else 86 | return [values.reduce(function(prev,next){ 87 | return (prev > next) ? prev : next; 88 | })]; 89 | }, 90 | reduceSort: function(value, arg) { 91 | try { 92 | var c = eval(arg); 93 | return value.sort(c); 94 | } 95 | catch (e) { 96 | return value.sort(); 97 | } 98 | }, 99 | reduceNumericSort: function(value, arg) { 100 | value.sort(RiakHelper.numericSorter); 101 | return value; 102 | }, 103 | reduceLimit: function(value, arg) { 104 | return value.slice(0, arg - 1); 105 | }, 106 | reduceSlice: function(value, arg) { 107 | var start = arg[0]; 108 | var end = arg[1]; 109 | if (end > value.length) { 110 | return value; 111 | } 112 | else { 113 | return value.slice(start, end); 114 | } 115 | } 116 | }; 117 | }(); 118 | -------------------------------------------------------------------------------- /priv/multi_backend.schema: -------------------------------------------------------------------------------- 1 | %%-*- mode: erlang -*- 2 | 3 | %% @doc The default name of a backend when one is not specified. 4 | {mapping, "multi_backend.default", "riak_kv.multi_backend_default", [ 5 | hidden 6 | ]}. 7 | 8 | {translation, 9 | "riak_kv.multi_backend_default", 10 | fun(Conf) -> 11 | list_to_binary(cuttlefish:conf_get(["multi_backend", "default"], Conf)) 12 | end}. 13 | 14 | %% @doc Storage_backend specifies the Erlang module defining the storage 15 | %% mechanism that will be used on this node. 16 | {mapping, "multi_backend.$name.storage_backend", "riak_kv.multi_backend", [ 17 | {default, bitcask}, 18 | {datatype, {enum, [bitcask, leveldb, leveled, memory]}}, 19 | hidden 20 | ]}. 21 | 22 | {translation, 23 | "riak_kv.multi_backend", 24 | fun(Conf, Schema) -> 25 | GenerateSubConfig = fun(Name, Prefix, ProplistKey, ModuleName) -> 26 | BackendConfigName = ["multi_backend", Name], 27 | BackendConfigPrefix = BackendConfigName ++ [Prefix], 28 | SubConf = [ begin 29 | {lists:nthtail(2, Key), Value} 30 | end || {Key, Value} <- cuttlefish_variable:filter_by_prefix(BackendConfigPrefix, Conf)], 31 | 32 | case cuttlefish_generator:map(Schema, SubConf) of 33 | {error, _Phase, _Errors} -> 34 | cuttlefish:invalid( 35 | lists:flatten(io_lib:format( 36 | "Error processing multi_backend configuration for backend ~s", [Name]))); 37 | BackendProplist -> 38 | Proplist = lists:foldl( 39 | fun(K, Acc) -> 40 | proplists:get_value(K, Acc, []) 41 | end, 42 | BackendProplist, ProplistKey), 43 | {ModuleName, Proplist} 44 | end 45 | end, 46 | %% group by $name into list, also cut the "multi_backend.$name" off every key 47 | BackendNames = cuttlefish_variable:fuzzy_matches(["multi_backend","$name","storage_backend"], Conf), 48 | %% for each in list, case statement on backend type 49 | Backends = [ begin 50 | BackendConfigName = ["multi_backend", Name], 51 | {BackendModule, BackendConfig} = case cuttlefish:conf_get(BackendConfigName ++ ["storage_backend"], Conf) of 52 | bitcask -> 53 | GenerateSubConfig(Name, "bitcask", [bitcask], riak_kv_bitcask_backend); 54 | leveldb -> 55 | GenerateSubConfig(Name, "leveldb", [eleveldb], riak_kv_eleveldb_backend); 56 | leveled -> 57 | GenerateSubConfig(Name, "leveled", [leveled], riak_kv_leveled_backend); 58 | memory -> 59 | GenerateSubConfig(Name, "memory_backend", [riak_kv, memory_backend], riak_kv_memory_backend) 60 | end, 61 | {list_to_binary(Name), BackendModule, BackendConfig} 62 | end || {"$name", Name} <- BackendNames], 63 | case Backends of 64 | [] -> throw(unset); 65 | _ -> Backends 66 | end 67 | end 68 | }. 69 | -------------------------------------------------------------------------------- /priv/tracers/tracer_accumulating_time.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(tracer_accumulating_time). 22 | 23 | -compile(export_all). 24 | -compile(nowarn_export_all). 25 | 26 | start(Pid_list, MFA_list, IntervalMS) -> 27 | dbg:tracer(process, {fun trace/2, new_stats()}), 28 | [dbg:p(Pid, [call, timestamp, arity]) || Pid <- Pid_list], 29 | [catch dbg:tpl(Mod, Func, Arity, [{'_', [], [{return_trace}]}]) || 30 | {Mod, Func, Arity} <- MFA_list], 31 | 32 | {ok, TPid} = dbg:get_tracer(), 33 | io:format("Tracer pid: ~p, use ~p:stop() to stop\n", [TPid, ?MODULE]), 34 | timer:send_interval(IntervalMS, TPid, print_report), 35 | {started, TPid}. 36 | 37 | stop() -> 38 | dbg:stop_clear(), 39 | catch exit(element(2,dbg:get_tracer()), kill), 40 | stopped. 41 | 42 | trace({trace_ts, Pid, call, {Mod, Func, Arity}, TS}, {Dict}) -> 43 | MFA = {Mod, Func, Arity}, 44 | DKey = {Pid, MFA}, 45 | {dict:store(DKey, TS, Dict)}; 46 | trace({trace_ts, Pid, return_from, {Mod, Func, Arity}, _Res, TS}, {Dict}) -> 47 | MFA = {Mod, Func, Arity}, 48 | DKey = {Pid, MFA}, 49 | Start = case dict:find(DKey, Dict) of 50 | {ok, StTime} -> StTime; 51 | error -> os:timestamp() 52 | end, 53 | Elapsed = timer:now_diff(TS, Start), 54 | SumKey = {sum, MFA}, 55 | {OldCount, OldTime} = case dict:find(SumKey, Dict) of 56 | error -> 57 | {0, 0}; 58 | {ok, Else} -> 59 | Else 60 | end, 61 | Dict2 = dict:erase(DKey, Dict), 62 | {dict:store(SumKey, {OldCount+1, OldTime+Elapsed}, Dict2)}; 63 | trace(print_report, {Dict}) -> 64 | print_stats(Dict), 65 | {dict:from_list([X || {K, _V} = X <- dict:to_list(Dict), 66 | element(1, K) /= sum])}; 67 | trace(Unknown, {Dict}) -> 68 | erlang:display(wha), 69 | io:format("Unknown! ~P\n", [Unknown, 20]), 70 | {Dict}. 71 | 72 | new_stats() -> 73 | {dict:new()}. 74 | 75 | print_stats(Dict) -> 76 | Reports = lists:sort([{MFA, X} || {{sum, MFA}, X} <- dict:to_list(Dict)]), 77 | [io:format("~p MFA ~p count ~p elapsed_msec ~p\n", 78 | [time(), MFA, Count, Sum div 1000]) || 79 | {MFA, {Count, Sum}} <- Reports]. 80 | -------------------------------------------------------------------------------- /priv/tracers/tracer_eleveldb_put_size.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(tracer_eleveldb_put_size). 22 | 23 | -compile(nowarn_export_all). 24 | -compile(export_all). 25 | 26 | start() -> 27 | start(10*1000). 28 | 29 | start(Interval) -> 30 | Stats = {StatName, _} = new_stats(), 31 | reset_metric(StatName), 32 | 33 | dbg:tracer(process, {fun trace/2, Stats}), 34 | dbg:p(all, [call]), 35 | dbg:tpl(eleveldb, write, 3, [{'_', [], []}]), 36 | 37 | {ok, TPid} = dbg:get_tracer(), 38 | io:format("Tracer pid: ~p, use ~p:stop() to stop\n", [TPid, ?MODULE]), 39 | timer:send_interval(Interval, TPid, print_report), 40 | {started, TPid}. 41 | 42 | stop() -> 43 | dbg:stop_clear(), 44 | catch exit(element(2,dbg:get_tracer()), kill), 45 | stopped. 46 | 47 | trace({trace, _Pid, call, {eleveldb, write, [_, PutList, _]}}, 48 | {StatName, SumBytes}) -> 49 | Bs = [begin 50 | Bs = size(K) + size(V), 51 | folsom_metrics_histogram:update(StatName, Bs), 52 | Bs 53 | end || {put, K, V} <- PutList], 54 | {StatName, SumBytes + lists:sum(Bs)}; 55 | trace(print_report, Stats = {StatName, _}) -> 56 | print_stats(Stats), 57 | reset_metric(StatName), 58 | new_stats(); 59 | trace(_Unknown, Stats) -> 60 | erlang:display(wha), 61 | %% io:format("Unknown! ~P\n", [Unknown, 20]), 62 | Stats. 63 | 64 | new_stats() -> 65 | {foo, 0}. 66 | 67 | print_stats({StatName, SumBytes}) -> 68 | if SumBytes == 0 -> 69 | io:format("~p ~p: 0 bytes\n", [date(), time()]); 70 | true -> 71 | Ps = folsom_metrics:get_histogram_statistics(StatName), 72 | io:format("~p ~p: ~p bytes\n ~p\n", [date(), time(), SumBytes, Ps]) 73 | end. 74 | 75 | reset_metric(Stats) -> 76 | catch folsom_metrics:delete_metric(Stats), 77 | folsom_metrics:new_histogram(Stats, uniform, 9981239823). 78 | -------------------------------------------------------------------------------- /priv/tracers/tracer_fsm_init.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(tracer_fsm_init). 22 | 23 | -compile(nowarn_export_all). 24 | -compile(export_all). 25 | 26 | start() -> 27 | start(1*1000). 28 | 29 | start(Interval) -> 30 | %%% Count the get, put, buckets, keys, exchange, and index FSM init() calls 31 | dbg:tracer(process, {fun trace/2, new_stats()}), 32 | dbg:p(all, call), 33 | [dbg:tpl(Mod, init, 1, [{'_', [], []}]) || 34 | Mod <- [riak_kv_buckets_fsm, riak_kv_exchange_fsm, riak_kv_get_fsm, riak_kv_index_fsm, riak_kv_keys_fsm, riak_kv_put_fsm]], 35 | dbg:tpl(riak_kv_put_fsm, start_link, 3, [{'_', [], []}]), 36 | 37 | %% Don't need return_trace events for this use case, but here's 38 | %% how to do it if needed. 39 | %%dbg:tpl(bitcask, merge_single_entry, 6, [{'_', [], [{return_trace}]}]). 40 | 41 | {ok, TPid} = dbg:get_tracer(), 42 | io:format("Tracer pid: ~p, use ~p:stop() to stop\n", [TPid, ?MODULE]), 43 | timer:send_interval(Interval, TPid, print_report), 44 | {started, TPid}. 45 | 46 | stop() -> 47 | dbg:stop_clear(), 48 | catch exit(element(2,dbg:get_tracer()), kill), 49 | stopped. 50 | 51 | trace({trace, _Pid, call, {riak_kv_put_fsm, start_link, _}}, 52 | {Pstart_link, R, P, B, E, I, K}) -> 53 | {Pstart_link+1, R, P, B, E, I, K}; 54 | trace({trace, _Pid, call, {riak_kv_get_fsm, init, _}}, 55 | {Pstart_link, R, P, B, E, I, K}) -> 56 | {Pstart_link, R+1, P, B, E, I, K}; 57 | trace({trace, _Pid, call, {riak_kv_put_fsm, init, _}}, 58 | {Pstart_link, R, P, B, E, I, K}) -> 59 | {Pstart_link, R, P+1, B, E, I, K}; 60 | trace({trace, _Pid, call, {riak_kv_buckets_fsm, init, _}}, 61 | {Pstart_link, R, P, B, E, I, K}) -> 62 | {Pstart_link, R, P, B+1, E, I, K}; 63 | trace({trace, _Pid, call, {riak_kv_exchange_fsm, init, _}}, 64 | {Pstart_link, R, P, B, E, I, K}) -> 65 | {Pstart_link, R, P, B, E+1, I, K}; 66 | trace({trace, _Pid, call, {riak_kv_index_fsm, init, _}}, 67 | {Pstart_link, R, P, B, E, I, K}) -> 68 | {Pstart_link, R, P, B, E, I+1, K}; 69 | trace({trace, _Pid, call, {riak_kv_keys_fsm, init, _}}, 70 | {Pstart_link, R, P, B, E, I, K}) -> 71 | {Pstart_link, R, P, B, E, I, K+1}; 72 | trace(print_report, Stats) -> 73 | print_stats(Stats), 74 | new_stats(); 75 | trace(Unknown, Stats) -> 76 | erlang:display(wha), 77 | io:format("Unknown! ~P\n", [Unknown, 20]), 78 | Stats. 79 | 80 | new_stats() -> 81 | {0, 82 | 0, 0, 0, 0, 0, 0}. 83 | 84 | print_stats({Pstart_link, Get, Put, Buckets, Exchange, Index, Keys}) -> 85 | Stats = [{put_start, Pstart_link}, 86 | {get, Get}, 87 | {put, Put}, 88 | {buckets, Buckets}, 89 | {exchange, Exchange}, 90 | {index, Index}, 91 | {keys, Keys}], 92 | io:format("~p ~p: ~p\n", [date(), time(), Stats]). 93 | -------------------------------------------------------------------------------- /priv/tracers/tracer_func_args.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | %% For example: what ETS tables are being called the most by ets:lookup/2? 22 | %% The 1st arg of ets:lookup/2 is the table name. 23 | %% Watch for 10 seconds. 24 | %% 25 | %% > func_args_tracer:start(ets, lookup, 2, 10, fun(Args) -> hd(Args) end). 26 | %% 27 | %% Tracer pid: <0.16102.15>, use func_args_tracer:stop() to stop 28 | %% Otherwise, tracing stops in 10 seconds 29 | %% Current date & time: {2013,9,19} {18,5,48} 30 | %% {started,<0.16102.15>} 31 | %% Total calls: 373476 32 | %% Call stats: 33 | %% [{folsom_histograms,114065}, 34 | %% {ac_tab,69689}, 35 | %% {ets_riak_core_ring_manager,67147}, 36 | %% {folsom_spirals,57076}, 37 | %% {riak_capability_ets,48862}, 38 | %% {riak_core_node_watcher,8149}, 39 | %% {riak_api_pb_registrations,8144}, 40 | %% {folsom,243}, 41 | %% {folsom_meters,43}, 42 | %% {folsom_durations,20}, 43 | %% {timer_tab,18}, 44 | %% {folsom_gauges,8}, 45 | %% {riak_core_stat_cache,5}, 46 | %% {sys_dist,3}, 47 | %% {inet_db,1}, 48 | %% {21495958,1}, 49 | %% {3145765,1}, 50 | %% {3407910,1}] 51 | %% 52 | 53 | -module(tracer_func_args). 54 | 55 | -compile(nowarn_export_all). 56 | -compile(export_all). 57 | 58 | start(Mod, Func, Arity, RunSeconds) -> 59 | start(Mod, Func, Arity, RunSeconds, fun(Args) -> Args end). 60 | 61 | start(Mod, Func, Arity, RunSeconds, ArgMangler) -> 62 | catch ets:delete(foo), 63 | ets:new(foo, [named_table, public, set]), 64 | dbg:tracer(process, {fun trace/2, new_stats({foo, ArgMangler})}), 65 | dbg:p(all, call), 66 | dbg:tpl(Mod, Func, Arity, [{'_', [], []}]), 67 | 68 | {ok, TPid} = dbg:get_tracer(), 69 | io:format("Tracer pid: ~p, use ~p:stop() to stop\n", [TPid, ?MODULE]), 70 | io:format("Otherwise, tracing stops in ~p seconds\n", [RunSeconds]), 71 | io:format("Current date & time: ~p ~p\n", [date(), time()]), 72 | spawn(fun() -> timer:sleep(RunSeconds * 1000), stop() end), 73 | {started, TPid}. 74 | 75 | stop() -> 76 | Sort = fun({_,A}, {_, B}) -> A > B end, 77 | Res = ets:tab2list(foo), 78 | TotalCalls = lists:sum([Count || {_Arg, Count} <- Res]), 79 | io:format("Total calls: ~p\n", [TotalCalls]), 80 | io:format("Call stats:\n~p\n", [catch lists:sort(Sort, Res)]), 81 | dbg:stop_clear(), 82 | catch exit(element(2,dbg:get_tracer()), kill), 83 | timer:sleep(100), 84 | stopped. 85 | 86 | trace({trace, _Pid, call, {_, _, Args}}, {Tab, ArgMangler} = Acc) -> 87 | Args2 = ArgMangler(Args), 88 | try 89 | ets:update_counter(Tab, Args2, {2, 1}) 90 | catch _:_ -> 91 | ets:insert(Tab, {Args2, 1}) 92 | end, 93 | Acc; 94 | trace(Unknown, DictStats) -> 95 | io:format("Unknown! ~P\n", [Unknown, 20]), 96 | DictStats. 97 | 98 | new_stats({Tab, _ArgMangler} = Acc) -> 99 | ets:delete_all_objects(Tab), 100 | Acc. 101 | 102 | print_stats(_DictStats) -> 103 | ok. 104 | 105 | -------------------------------------------------------------------------------- /priv/tracers/tracer_gc_latency.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(tracer_gc_latency). 22 | 23 | -compile(nowarn_export_all). 24 | -compile(export_all). 25 | 26 | start(LatencyMS) -> 27 | catch folsom_metrics:delete_metric(foo), 28 | folsom_metrics:new_histogram(foo, uniform, 50*1000*1000), 29 | dbg:tracer(process, {fun trace/2, new_stats(LatencyMS)}), 30 | {ok, _} = dbg:p(all, [timestamp, garbage_collection, running]), 31 | 32 | {ok, TPid} = dbg:get_tracer(), 33 | io:format("Tracer pid: ~p, use ~p:stop() to stop\n", [TPid, ?MODULE]), 34 | io:format("Current date & time: ~p ~p local time\n", [date(), time()]), 35 | {started, TPid}. 36 | 37 | stop() -> 38 | dbg:stop_clear(), 39 | catch exit(element(2,dbg:get_tracer()), kill), 40 | timer:sleep(100), 41 | catch folsom_metrics:delete_metric(foo), 42 | stopped. 43 | 44 | trace({trace_ts, Pid, gc_start, _Stats, TS}, {Dict, LMS}) -> 45 | {dict:store(Pid, TS, Dict), LMS}; 46 | trace({trace_ts, Pid, gc_end, _Stats, TS}, {Dict, LMS}=Acc) -> 47 | DKey = Pid, 48 | case dict:find(DKey, Dict) of 49 | {ok, GcStart} -> 50 | Elapsed = erlang:max(-1, (timer:now_diff(TS, GcStart) div 1000)), 51 | if Elapsed > LMS -> 52 | io:format("~p: GC of ~p elapsed time ~p > threshold ~p\n", 53 | [time(), Pid, Elapsed, LMS]), 54 | io:format(" ~w,~w\n", [process_info(Pid, message_queue_len), _Stats]); 55 | true -> 56 | ok 57 | end, 58 | {dict:erase(DKey, Dict), LMS}; 59 | error -> 60 | Acc 61 | end; 62 | trace({trace_ts, Pid, InOrOut, _MFA, TS}, {Dict, _LMS}=Acc) -> 63 | DKey = Pid, 64 | case dict:find(DKey, Dict) of 65 | {ok, GcStart} -> 66 | io:format("Hey, pid ~p scheduled ~p but started GC ~p msec ago\n", 67 | [Pid, InOrOut, timer:now_diff(TS, GcStart)]); 68 | _ -> 69 | ok 70 | end, 71 | Acc; 72 | trace(Unknown, DictStats) -> 73 | erlang:display(wha), 74 | io:format("Unknown! ~P\n\t~P", [Unknown, 20, DictStats,7]), 75 | DictStats. 76 | 77 | new_stats(LatencyMS) -> 78 | {dict:new(), LatencyMS}. 79 | 80 | print_stats(_DictStats) -> 81 | ok. 82 | 83 | -------------------------------------------------------------------------------- /priv/tracers/tracer_large4.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(tracer_large4). 22 | 23 | -compile(nowarn_export_all). 24 | -compile(export_all). 25 | 26 | -record(r_object, {bucket = '_', 27 | key = '_', 28 | contents = '_', 29 | vclock = '_', 30 | updatemetadata = '_', 31 | updatevalue = '_' 32 | }). 33 | 34 | go(Time, Count, Size) -> 35 | ss(), 36 | %% gets 37 | GetMS = [{['_', 38 | #r_object{bucket='$1', 39 | key='$2'}], 40 | [], 41 | [{message,{{'$1','$2'}}}, {return_trace}]}], 42 | erlang:trace_pattern({riak_kv_get_fsm, calculate_objsize, 2}, GetMS, [local]), 43 | 44 | %% puts 45 | PutMS = [{['$1','$2','_','$3','_'], 46 | [{'>',{size,'$3'},Size}], 47 | [{message,{{'$1','$2',{size,'$3'}}}}]}], 48 | erlang:trace_pattern({riak_kv_eleveldb_backend, put, 5}, PutMS, [local]), 49 | erlang:trace_pattern({riak_kv_bitcask_backend, put, 5}, PutMS, [local]), 50 | erlang:trace_pattern({riak_kv_memory_backend, put, 5}, PutMS, [local]), 51 | 52 | {Tracer, _} = spawn_monitor(?MODULE, tracer, [0, Count, Size, dict:new()]), 53 | erlang:trace(all, true, [call, arity, {tracer, Tracer}]), 54 | receive 55 | {'DOWN', _, process, Tracer, _} -> 56 | ok 57 | after Time -> 58 | exit(Tracer, kill), 59 | receive 60 | {'DOWN', _, process, Tracer, _} -> 61 | ok 62 | end 63 | end, 64 | ss(), 65 | io:format("object trace stopped~n"). 66 | 67 | tracer(Limit, Limit, _, _) -> 68 | ok; 69 | tracer(Count, Limit, Threshold, Objs) -> 70 | receive 71 | {trace,Pid,call,{riak_kv_get_fsm,calculate_objsize,2},{Bucket,Key}} -> 72 | Objs2 = dict:store(Pid, {Bucket,Key}, Objs), 73 | tracer(Count+1, Limit, Threshold, Objs2); 74 | {trace,Pid,return_from,{riak_kv_get_fsm,calculate_objsize,2},Size} -> 75 | case Size >= Threshold of 76 | true -> 77 | case dict:find(Pid, Objs) of 78 | {ok, {Bucket, Key}} -> 79 | io:format("~p: get: ~p~n", [ts(), {Bucket, Key, Size}]); 80 | _ -> 81 | ok 82 | end; 83 | false -> 84 | ok 85 | end, 86 | Objs2 = dict:erase(Pid, Objs), 87 | tracer(Count+1, Limit, Threshold, Objs2); 88 | {trace,_Pid,call,{riak_kv_eleveldb_backend,put,5},{Bucket,Key,Size}} -> 89 | io:format("~p: put(l): ~p~n", [ts(), {Bucket, Key, Size}]), 90 | tracer(Count+1, Limit, Threshold, Objs); 91 | {trace,_Pid,call,{riak_kv_bitcask_backend,put,5},{Bucket,Key,Size}} -> 92 | io:format("~p: put(b): ~p~n", [ts(), {Bucket, Key, Size}]), 93 | tracer(Count+1, Limit, Threshold, Objs); 94 | {trace,_Pid,call,{riak_kv_memory_backend,put,5},{Bucket,Key,Size}} -> 95 | io:format("~p: put(m): ~p~n", [ts(), {Bucket, Key, Size}]), 96 | tracer(Count+1, Limit, Threshold, Objs); 97 | Msg -> 98 | io:format("tracer: ~p~n", [Msg]), 99 | tracer(Count+1, Limit, Threshold, Objs) 100 | end. 101 | 102 | ts() -> 103 | calendar:now_to_datetime(os:timestamp()). 104 | 105 | ss() -> 106 | erlang:trace_pattern({'_','_','_'}, false, [local]), 107 | erlang:trace(all, false, [call, arity]). 108 | -------------------------------------------------------------------------------- /priv/tracers/tracer_latency_histogram.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | %% For example: create a histogram of call latencies for bitcask:put/3. 22 | %% Watch for 10 seconds. 23 | %% 24 | %% > latency_histogram_tracer:start(bitcask, put, 3, 10). 25 | %% 26 | %% Tracer pid: <0.2108.18>, use latency_histogram_tracer:stop() to stop 27 | %% Otherwise, tracing stops in 10 seconds 28 | %% Current date & time: {2013,9,19} {18,14,13} 29 | %% {started,<0.2108.18>} 30 | %% Histogram stats: 31 | %% [{min,0}, 32 | %% {max,48}, 33 | %% {arithmetic_mean,2.765411819271055}, 34 | %% {geometric_mean,2.527103493663478}, 35 | %% {harmonic_mean,2.2674039086593973}, 36 | %% {median,3}, 37 | %% {variance,3.5629207473971585}, 38 | %% {standard_deviation,1.8875700642352746}, 39 | %% {skewness,2.0360354571500774}, 40 | %% {kurtosis,18.529695846728423}, 41 | %% {percentile,[{50,3},{75,4},{90,5},{95,6},{99,8},{999,14}]}, 42 | %% {histogram,[{1,13436}, 43 | %% {2,12304}, 44 | %% {3,10789}, 45 | %% {4,7397}, 46 | %% {5,4191}, 47 | %% {6,1929}, 48 | %% {7,873}, 49 | %% {8,420}, 50 | %% {9,163}, 51 | %% {10,79}, 52 | %% {11,42}, 53 | %% {12,47}, 54 | %% {13,11}, 55 | %% {14,16}, 56 | %% {15,7}, 57 | %% {16,5}, 58 | %% {17,3}, 59 | %% {18,4}, 60 | %% {19,2}, 61 | %% {20,4}, 62 | %% {21,1}, 63 | %% {22,11}, 64 | %% {23,2}, 65 | %% {24,1}, 66 | %% {25,2}, 67 | %% {26,1}, 68 | %% {27,0}, 69 | %% {28,1}, 70 | %% {29,2}, 71 | %% {30,0}, 72 | %% {31,0}, 73 | %% {40,2}, 74 | %% {50,1}]}, 75 | %% {n,51746}] 76 | 77 | -module(tracer_latency_histogram). 78 | 79 | -compile(nowarn_export_all). 80 | -compile(export_all). 81 | 82 | start(Mod, Func, Arity, RunSeconds) -> 83 | catch folsom_metrics:delete_metric(foo), 84 | folsom_metrics:new_histogram(foo, uniform, 50*1000*1000), 85 | dbg:tracer(process, {fun trace/2, new_stats(0)}), 86 | dbg:p(all, [call, timestamp, arity]), 87 | dbg:tpl(Mod, Func, Arity, [{'_', [], [{return_trace}]}]), 88 | 89 | {ok, TPid} = dbg:get_tracer(), 90 | io:format("Tracer pid: ~p, use ~p:stop() to stop\n", [TPid, ?MODULE]), 91 | io:format("Otherwise, tracing stops in ~p seconds\n", [RunSeconds]), 92 | io:format("Current date & time: ~p ~p\n", [date(), time()]), 93 | spawn(fun() -> timer:sleep(RunSeconds * 1000), stop() end), 94 | {started, TPid}. 95 | 96 | stop() -> 97 | io:format("Histogram stats:\n~p\n", [catch folsom_metrics:get_histogram_statistics(foo)]), 98 | dbg:stop_clear(), 99 | catch exit(element(2,dbg:get_tracer()), kill), 100 | timer:sleep(100), 101 | catch folsom_metrics:delete_metric(foo), 102 | stopped. 103 | 104 | trace({trace_ts, Pid, call, {_, _, _}, TS}, {Dict, LMS}) -> 105 | {dict:store(Pid, TS, Dict), LMS}; 106 | trace({trace_ts, Pid, return_from, {_, _, _}, _Res, TS}, {Dict, LatencyMS}) -> 107 | DKey = Pid, 108 | Start = case dict:find(DKey, Dict) of 109 | {ok, StTime} -> StTime; 110 | error -> os:timestamp() 111 | end, 112 | Elapsed = timer:now_diff(TS, Start) div 1000, 113 | folsom_metrics_histogram:update(foo, Elapsed), 114 | {dict:erase(DKey, Dict), LatencyMS}; 115 | trace(print_report, DictStats) -> 116 | DictStats; 117 | trace(Unknown, DictStats) -> 118 | erlang:display(wha), 119 | io:format("Unknown! ~P\n", [Unknown, 20]), 120 | DictStats. 121 | 122 | new_stats(LatencyMS) -> 123 | {dict:new(), LatencyMS}. 124 | 125 | print_stats(_DictStats) -> 126 | ok. 127 | 128 | -------------------------------------------------------------------------------- /priv/tracers/tracer_merge_and_and_handoff.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(tracer_merge_and_and_handoff). 22 | 23 | -compile(nowarn_export_all). 24 | -compile(export_all). 25 | 26 | start() -> 27 | start(1*1000). 28 | 29 | start(Interval) -> 30 | dbg:tracer(process, {fun trace/2, {orddict:new(), orddict:new()}}), 31 | dbg:p(all, call), 32 | dbg:tpl(bitcask, merge_single_entry, 6, [{'_', [], []}]), 33 | dbg:tpl(riak_kv_vnode, encode_handoff_item, 2, [{'_', [], []}]), 34 | dbg:tpl(riak_core_handoff_receiver, process_message, 3, [{'_', [], []}]), 35 | 36 | %% Don't need return_trace events for this use case, but here's 37 | %% how to do it if needed. 38 | %%dbg:tpl(bitcask, merge_single_entry, 6, [{'_', [], [{return_trace}]}]). 39 | 40 | {ok, TPid} = dbg:get_tracer(), 41 | io:format("Tracer pid: ~p, use ~p:stop() to stop\n", [TPid, ?MODULE]), 42 | timer:send_interval(Interval, TPid, print_report), 43 | {started, TPid}. 44 | 45 | stop() -> 46 | dbg:stop_clear(), 47 | catch exit(element(2,dbg:get_tracer()), kill), 48 | stopped. 49 | 50 | trace({trace, _Pid, call, {bitcask, merge_single_entry, 51 | [K, V, _TS, _FId, {File,_,_,_}, _State]}}, 52 | {MDict, HDict}) -> 53 | Dir = re:replace(File, "/[^/]*\$", "", [{return, binary}]), 54 | Bytes = size(K) + size(V), 55 | MDict2 = increment_cbdict(MDict, Dir, Bytes), 56 | {MDict2, HDict}; 57 | trace({trace, _Pid, call, {riak_kv_vnode, encode_handoff_item, 58 | [{B, K}, V]}}, 59 | {MDict, HDict}) -> 60 | Bytes = size(B) + size(K) + size(V), 61 | Key = "all-sending-handoff", 62 | HDict2 = increment_cbdict(HDict, Key, Bytes), 63 | {MDict, HDict2}; 64 | trace({trace, _Pid, call, {riak_core_handoff_receiver, process_message, 65 | [_Type, Msg, State]}}, 66 | {MDict, HDict}) -> 67 | Bytes = size(Msg), 68 | Partition = element(5, State), % ugly hack 69 | Key = Partition, 70 | HDict2 = increment_cbdict(HDict, Key, Bytes), 71 | {MDict, HDict2}; 72 | trace(print_report, {MDict, HDict}) -> 73 | print_stats(MDict, merge), 74 | print_stats(HDict, handoff), 75 | {orddict:new(), orddict:new()}. 76 | 77 | %% "cb" = count + bytes 78 | increment_cbdict(Dict, Key, Bytes) -> 79 | orddict:update(Key, fun({Count, Bs}) -> {Count + 1, Bs + Bytes} end, 80 | {1, Bytes}, Dict). 81 | 82 | print_stats(Dict, Type) -> 83 | F = fun(Key, {Count, Bytes}, {SumC, SumB}) when Count > 0 -> 84 | io:format("~p ~p: ~p items ~p bytes ~p avg-size ~p\n", 85 | [date(), time(), Count, Bytes, Bytes div Count, Key]), 86 | {SumC + Count, SumB + Bytes}; 87 | (_, _, Acc) -> 88 | Acc 89 | end, 90 | {Count, Bytes} = orddict:fold(F, {0, 0}, Dict), 91 | Avg = if Count > 0 -> Bytes div Count; 92 | true -> 0 93 | end, 94 | io:format("~p ~p: ~p total: ~p items ~p bytes ~p avg-size\n", 95 | [date(), time(), Type, Count, Bytes, Avg]). 96 | -------------------------------------------------------------------------------- /priv/tracers/tracer_read_bin_trace_file.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(tracer_read_bin_trace_file). 22 | 23 | -compile(nowarn_export_all). 24 | -compile(export_all). 25 | 26 | -include("../../src/stacktrace.hrl"). 27 | 28 | read(Path) -> 29 | read(Path, 1). 30 | 31 | read(Path, LatencyMS) -> 32 | {ok, FH} = file:open(Path, [read, binary, raw]), 33 | read(file:read(FH, 5), FH, LatencyMS, []). 34 | 35 | read(eof, _FH, _, _) -> 36 | ok; 37 | read({ok, <>}, FH, LatencyMS, Hist) -> 38 | {ok, Bin} = file:read(FH, Size), 39 | case binary_to_term(Bin) of 40 | {trace_ts, _, call, {M,F,A}, Time} -> 41 | %%io:format("call MFA = ~p:~p/~p, ", [M, F, length(A)]), 42 | read(file:read(FH, 5), FH, LatencyMS, [{{M,F,length(A)}, Time, A}|Hist]); 43 | {trace_ts, _, return_from, MFA, Res, EndTime} -> 44 | %%io:format("MFA ~p Hist ~p\n", [MFA, Hist]), 45 | try 46 | {value, {_, StartTime, A}, NewHist} = lists:keytake(MFA, 1, Hist), 47 | MSec = timer:now_diff(EndTime, StartTime)/1000, 48 | if MSec > LatencyMS -> 49 | io:format("~p ~p msec\nArgs: (~p/~p) ~P\nRes: ~P\n\n", 50 | [MFA, MSec, 51 | erts_debug:flat_size(A), erts_debug:size(A), 52 | A, 20, Res, 20]); 53 | true -> 54 | ok 55 | end, 56 | read(file:read(FH, 5), FH, LatencyMS, NewHist) 57 | catch 58 | error:{badmatch,false} -> 59 | read(file:read(FH, 5), FH, LatencyMS, Hist); 60 | ?_exception_(X, Y, StackToken) -> 61 | io:format("ERR ~p ~p @ ~p\n", [X, Y, ?_get_stacktrace_(StackToken)]), 62 | read(file:read(FH, 5), FH, LatencyMS, Hist) 63 | end 64 | end. 65 | 66 | %% read(eof, _FH) -> 67 | %% ok; 68 | %% read({ok, <>}, FH) -> 69 | %% {ok, Bin} = file:read(FH, Size), 70 | %% io:format("~P\n", [binary_to_term(Bin), 15]), 71 | %% read(file:read(FH, 5), FH). 72 | 73 | -------------------------------------------------------------------------------- /priv/tracers/tracer_timeit.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(tracer_timeit). 22 | 23 | -compile(nowarn_export_all). 24 | -compile(export_all). 25 | 26 | %% @doc Dynamically add timing to MFA. There are various types of 27 | %% timing. 28 | %% 29 | %% all - time latency of all calls to MFA 30 | %% 31 | %% {sample, N, Max} - sample every N calls and stop sampling after Max 32 | %% 33 | %% {threshold, Millis, Max} - count # of calls where latency is > Millis 34 | %% and count # of calls total, thus percentage of calls over threshold 35 | timeit(Mod, Fun, Arity, Type) -> 36 | Type2 = case Type of 37 | {sample, N, Max} -> {sample, {N, Max}, {0, 0, 0}}; 38 | {threshold, Millis, Max} -> {threshold, {Millis, Max}, {0, 0}}; 39 | {all, Max} -> {all, {0, Max}} 40 | end, 41 | dbg:tracer(process, {fun trace/2, {orddict:new(), Type2}}), 42 | dbg:p(all, call), 43 | dbg:tpl(Mod, Fun, Arity, [{'_', [], [{return_trace}]}]). 44 | 45 | stop() -> dbg:stop_clear(). 46 | 47 | trace({trace, Pid, call, {Mod, Fun, _}}, {D, {all, {Count, Max}}}) -> 48 | D2 = orddict:store({Pid, Mod, Fun}, os:timestamp(), D), 49 | {D2, {all, {Count, Max}}}; 50 | trace({trace, Pid, call, {Mod, Fun, _}}, 51 | {D, {sample, {N, Max}, {M, K, Total}}}) -> 52 | M2 = M+1, 53 | Total2 = Total+1, 54 | if N == M2 -> 55 | D2 = orddict:store({Pid, Mod, Fun}, os:timestamp(), D), 56 | {D2, {sample, {N, Max}, {0, K, Total2}}}; 57 | true -> 58 | {D, {sample, {N, Max}, {M2, K, Total2}}} 59 | end; 60 | trace({trace, Pid, call, {Mod, Fun, _}}, 61 | {D, {threshold, {Millis, Max}, {Over, Total}}}) -> 62 | D2 = orddict:store({Pid, Mod, Fun}, os:timestamp(), D), 63 | {D2, {threshold, {Millis, Max}, {Over, Total+1}}}; 64 | 65 | trace({trace, Pid, return_from, {Mod, Fun, _}, _Result}, 66 | Acc={D, {all, {Count, Max}}}) -> 67 | Key = {Pid, Mod, Fun}, 68 | case orddict:find(Key, D) of 69 | {ok, StartTime} -> 70 | Count2 = Count+1, 71 | ElapsedUs = timer:now_diff(os:timestamp(), StartTime), 72 | ElapsedMs = ElapsedUs/1000, 73 | io:format(user, "~p:~p:~p: ~p ms\n", [Pid, Mod, Fun, ElapsedMs]), 74 | if Count2 == Max -> stop(); 75 | true -> 76 | D2 = orddict:erase(Key, D), 77 | {D2, {all, {Count2, Max}}} 78 | end; 79 | error -> Acc 80 | end; 81 | trace({trace, Pid, return_from, {Mod, Fun, _}, _Result}, 82 | Acc={D, {sample, {N, Max}, {M, K, Total}}}) -> 83 | Key = {Pid, Mod, Fun}, 84 | case orddict:find(Key, D) of 85 | {ok, StartTime} -> 86 | K2 = K+1, 87 | ElapsedUs = timer:now_diff(os:timestamp(), StartTime), 88 | ElapsedMs = ElapsedUs/1000, 89 | io:format(user, "[sample ~p/~p] ~p:~p:~p: ~p ms\n", 90 | [K2, Total, Pid, Mod, Fun, ElapsedMs]), 91 | if K2 == Max -> stop(); 92 | true -> 93 | D2 = orddict:erase(Key, D), 94 | {D2, {sample, {N, Max}, {M, K2, Total}}} 95 | end; 96 | error -> Acc 97 | end; 98 | trace({trace, Pid, return_from, {Mod, Fun, _}, _Result}, 99 | Acc={D, {threshold, {Millis, Max}, {Over, Total}}}) -> 100 | Key = {Pid, Mod, Fun}, 101 | case orddict:find(Key, D) of 102 | {ok, StartTime} -> 103 | ElapsedUs = timer:now_diff(os:timestamp(), StartTime), 104 | ElapsedMs = ElapsedUs / 1000, 105 | if ElapsedMs > Millis -> 106 | Over2 = Over+1, 107 | io:format(user, "[over threshold ~p, ~p/~p] ~p:~p:~p: ~p ms\n", 108 | [Millis, Over2, Total, Pid, Mod, Fun, ElapsedMs]); 109 | true -> 110 | Over2 = Over 111 | end, 112 | if Max == Over -> stop(); 113 | true -> 114 | D2 = orddict:erase(Key, D), 115 | {D2, {threshold, {Millis, Max}, {Over2, Total}}} 116 | end; 117 | error -> Acc 118 | end. 119 | -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | {src_dirs, ["./priv/tracers", "./src"]}. 2 | {cover_enabled, false}. 3 | {edoc_opts, [{preprocess, true}]}. 4 | {erl_opts, [warnings_as_errors, 5 | {parse_transform, lager_transform}, 6 | {lager_extra_sinks, [object]}, 7 | {src_dirs, ["src", "priv/tracers"]}, 8 | {platform_define, "^[0-9]+", namespaced_types}, 9 | {platform_define, "^[0-9]+", set_env_options}, 10 | {platform_define, "^R15", "old_hash"}, 11 | {i, "./_build/default/plugins/gpb/include"}, 12 | {d, 'TEST_FS2_BACKEND_IN_RIAK_KV'}]}. 13 | 14 | {eunit_opts, [verbose]}. 15 | 16 | {xref_checks,[undefined_function_calls,undefined_functions]}. 17 | 18 | %% XXX yz_kv is here becase Ryan has not yet made a generic hook interface for object modification 19 | %% XXX yz_stat is here for similar reasons -- we do not yet support dynamic stat hooks 20 | %% XXX object is here because it's a new Lager sync 21 | {xref_queries, [{"(XC - UC) || (XU - X - B - \"(cluster_info|dtrace|yz_kv|yz_stat|object)\" : Mod)", []}]}. 22 | 23 | {erl_first_files, [ 24 | "src/riak_kv_backend.erl" 25 | ]}. 26 | 27 | {plugins, [{rebar3_gpb_plugin, {git, "https://github.com/basho/rebar3_gpb_plugin", {tag, "2.15.1+riak.3.0.4"}}}, 28 | {eqc_rebar, {git, "https://github.com/Quviq/eqc-rebar", {branch, "master"}}}]}. 29 | 30 | {gpb_opts, [{module_name_suffix, "_pb"}, 31 | {i, "src"}]}. 32 | 33 | {dialyzer, [{plt_apps, all_deps}]}. 34 | 35 | {provider_hooks, [ 36 | {pre, [{compile, {protobuf, compile}}]} 37 | ]}. 38 | 39 | {profiles, [ 40 | {test, [{deps, [meck]}]}, 41 | {gha, [{erl_opts, [{d, 'GITHUBEXCLUDE'}]}]} 42 | ]}. 43 | 44 | {deps, [ 45 | {riak_core, {git, "https://github.com/basho/riak_core.git", {tag, "riak_kv-3.0.16"}}}, 46 | {sidejob, {git, "https://github.com/basho/sidejob.git", {tag, "2.1.0"}}}, 47 | {bitcask, {git, "https://github.com/basho/bitcask.git", {tag, "2.1.0"}}}, 48 | {redbug, {git, "https://github.com/massemanet/redbug", {tag, "1.2.2"}}}, 49 | {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.2"}}}, 50 | {sext, {git, "https://github.com/uwiger/sext.git", {tag, "1.4.1"}}}, 51 | {riak_pipe, {git, "https://github.com/basho/riak_pipe.git", {tag, "riak_kv-3.0.16"}}}, 52 | {riak_dt, {git, "https://github.com/basho/riak_dt.git", {tag, "riak_kv-3.0.0"}}}, 53 | {riak_api, {git, "https://github.com/basho/riak_api.git", {tag, "riak_kv-3.0.16"}}}, 54 | {hyper, {git, "https://github.com/basho/hyper", {tag, "1.1.0"}}}, 55 | {kv_index_tictactree, {git, "https://github.com/martinsumner/kv_index_tictactree.git", {tag, "1.0.7"}}}, 56 | {riakhttpc, {git, "https://github.com/basho/riak-erlang-http-client", {tag, "3.0.13"}}} 57 | ]}. 58 | -------------------------------------------------------------------------------- /rebar3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/basho/riak_kv/18817d228ecd818f8fc997fc6a6a288d00372209/rebar3 -------------------------------------------------------------------------------- /src/json_pp.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% json_pp: pretty print serialized JSON strings 4 | %% 5 | %% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | -module(json_pp). 23 | 24 | -define(SPACE, 32). 25 | -define(is_quote(C), (C == $\") orelse (C == $\')). 26 | -define(is_indent(C), (C == 91) orelse (C == 123)). % [, { 27 | -define(is_undent(C), (C == 93) orelse (C == 125)). % ], } 28 | -export([print/1, 29 | test/0]). 30 | 31 | print(Str) when is_list(Str) -> json_pp(Str, 0, undefined, []). 32 | 33 | json_pp([$\\, C| Rest], I, C, Acc) -> % in quote 34 | json_pp(Rest, I, C, [C, $\\| Acc]); 35 | json_pp([C| Rest], I, undefined, Acc) when ?is_quote(C) -> 36 | json_pp(Rest, I, C, [C| Acc]); 37 | json_pp([C| Rest], I, C, Acc) -> % in quote 38 | json_pp(Rest, I, undefined, [C| Acc]); 39 | json_pp([C| Rest], I, undefined, Acc) when ?is_indent(C) -> 40 | json_pp(Rest, I+1, undefined, [pp_indent(I+1), $\n, C| Acc]); 41 | json_pp([C| Rest], I, undefined, Acc) when ?is_undent(C) -> 42 | json_pp(Rest, I-1, undefined, [C, pp_indent(I-1), $\n| Acc]); 43 | json_pp([$,| Rest], I, undefined, Acc) -> 44 | json_pp(Rest, I, undefined, [pp_indent(I), $\n, $,| Acc]); 45 | json_pp([$:| Rest], I, undefined, Acc) -> 46 | json_pp(Rest, I, undefined, [?SPACE, $:| Acc]); 47 | json_pp([C|Rest], I, Q, Acc) -> 48 | json_pp(Rest, I, Q, [C| Acc]); 49 | json_pp([], _I, _Q, Acc) -> % done 50 | lists:reverse(Acc). 51 | 52 | pp_indent(I) -> lists:duplicate(I*4, ?SPACE). 53 | 54 | %% testing 55 | 56 | test_data() -> 57 | {struct, [{foo, true}, 58 | {bar, false}, 59 | {baz, {array, [1, 2, 3, 4]}}, 60 | {'fiz:f', null}, 61 | {"fozzer\"", 5}]}. 62 | 63 | listify(IoList) -> binary_to_list(list_to_binary(IoList)). 64 | 65 | test() -> 66 | J1 = listify(mochijson:encode(test_data())), 67 | io:format("~s~n", [listify(print(J1))]). 68 | 69 | -ifdef(TEST). 70 | -include_lib("eunit/include/eunit.hrl"). 71 | 72 | basic_test() -> 73 | J1 = listify(mochijson:encode(test_data())), 74 | L1 = 75 | "{\n" 76 | " \"foo\": true,\n" 77 | " \"bar\": false,\n" 78 | " \"baz\": [\n" 79 | " 1,\n" 80 | " 2,\n" 81 | " 3,\n" 82 | " 4\n" 83 | " ],\n" 84 | " \"fiz:f\": null,\n" 85 | " \"fozzer\\\"\": 5\n" 86 | "}", 87 | ?assertEqual(L1, listify(print(J1))), 88 | ok. 89 | 90 | -endif. 91 | -------------------------------------------------------------------------------- /src/raw_link_walker.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% raw_link_walker: Backwards compatibility module for link traversal 4 | %% 5 | %% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | -module(raw_link_walker). 23 | 24 | -export([mapreduce_linkfun/3]). 25 | 26 | %% 27 | %% NOTICE 28 | %% 29 | %% This module is STRICTLY a pass-thru for backward compatibility. DO NOT 30 | %% add/modify this code! 31 | %% 32 | %% We use the explicit syntax for invoking riak_kv_wm_link_walker to ensure 33 | %% that code loading works appropriately. 34 | %% 35 | 36 | mapreduce_linkfun(Object, Term, Bucket) -> 37 | riak_kv_wm_link_walker:mapreduce_linkfun(Object, Term, Bucket). 38 | -------------------------------------------------------------------------------- /src/riak_core.proto: -------------------------------------------------------------------------------- 1 | message RiakObject_PB { 2 | required bytes bucket = 1; 3 | required bytes key = 2; 4 | required bytes val = 3; 5 | } 6 | 7 | -------------------------------------------------------------------------------- /src/riak_kv.app.src: -------------------------------------------------------------------------------- 1 | %% ex: ts=4 sw=4 et 2 | {application, riak_kv, 3 | [ 4 | {description, "Riak Key/Value Store"}, 5 | {vsn, git}, 6 | {applications, [ 7 | kernel, 8 | stdlib, 9 | sasl, 10 | crypto, 11 | riak_api, 12 | riak_core, 13 | sidejob, 14 | mochiweb, 15 | webmachine, 16 | os_mon, 17 | riak_pipe, 18 | riak_dt, 19 | riak_pb, 20 | sext, 21 | leveled, 22 | kv_index_tictactree, 23 | riak_ensemble, 24 | exometer_core, 25 | hyper, 26 | redbug, 27 | recon, 28 | riakc, 29 | riakhttpc 30 | ]}, 31 | {registered, []}, 32 | {mod, {riak_kv_app, []}}, 33 | {env, [ 34 | %% Endpoint for system stats HTTP provider 35 | {stats_urlpath, "stats"}, 36 | 37 | %% Secondary code paths 38 | {add_paths, []}, 39 | 40 | %% This option toggles compatibility of keylisting with 1.0 41 | %% and earlier versions. Once a rolling upgrade to a version 42 | %% > 1.0 is completed for a cluster, this should be set to 43 | %% true for better control of memory usage during key listing 44 | %% operations 45 | {listkeys_backpressure, false}, 46 | 47 | %% use the legacy routines for tracking kv stats 48 | {legacy_stats, true}, 49 | 50 | %% Disable active anti-entropy by default 51 | {anti_entropy, {off, []}}, 52 | 53 | %% Enable DVV by default 54 | {dvv_enabled, true}, 55 | 56 | %% Allow Erlang MapReduce functions to be specified as 57 | %% strings. 58 | %% 59 | %% !!!WARNING!!! 60 | %% This will allow arbitrary Erlang code to be submitted 61 | %% through the REST and Protocol Buffers interfaces. This 62 | %% should only be used for development purposes. 63 | {allow_strfun, false}, 64 | 65 | %% Log a warning if object bigger than 5MB 66 | {warn_object_size, 5242880}, 67 | % Writing an object bigger than 50MB will send a failure to the client 68 | {max_object_size, 52428800}, 69 | %% Log a warning if # of siblings bigger than this 70 | {warn_siblings, 25}, 71 | % Writing an object with more than this number of siblings will 72 | % generate a warning in the logs 73 | {max_siblings, 100}, 74 | 75 | %% @doc Object hash version should be 0 by default. Without the 76 | %% environment variable being set at startup, it could by default 77 | %% revert to being considered as legacy even when the whole cluster 78 | %% has support for version 0 - Github Issue 1656 79 | {object_hash_version, 0}, 80 | 81 | %% @doc http_url_encoding determines how Riak treats URL encoded 82 | %% buckets, keys, and links over the REST API. When set to 'on' Riak 83 | %% always decodes encoded values sent as URLs and Headers. Otherwise, 84 | %% Riak defaults to compatibility mode where links are decoded, but 85 | %% buckets and keys are not. The compatibility mode will be removed in 86 | %% a future release. 87 | {http_url_encoding, on}, 88 | 89 | %% @doc mapred_2i_pipe indicates whether secondary-index 90 | %% MapReduce inputs are queued in parallel via their own 91 | %% pipe ('true'), or serially via a helper process 92 | %% ('false' or undefined). Set to 'false' or leave 93 | %% undefined during a rolling upgrade from 1.0. 94 | {mapred_2i_pipe, true} 95 | ]} 96 | ]}. 97 | -------------------------------------------------------------------------------- /src/riak_kv_backend.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_backend: Riak backend behaviour 4 | %% 5 | %% Copyright (c) 2007-2015 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | -module(riak_kv_backend). 24 | 25 | -export([callback_after/3]). 26 | 27 | -type fold_buckets_fun() :: fun((binary(), any()) -> any() | no_return()). 28 | -type fold_keys_fun() :: fun((binary(), binary(), any()) -> any() | 29 | no_return()). 30 | -type fold_objects_fun() :: fun((binary(), binary(), term(), any()) -> 31 | any() | 32 | no_return()). 33 | 34 | -type index_spec() :: {add | remove, binary(), riak_object:index_value()}. 35 | 36 | -export_type([fold_buckets_fun/0, 37 | fold_keys_fun/0, 38 | fold_objects_fun/0, 39 | fold_opts/0, 40 | index_spec/0]). 41 | 42 | %% These are just here to make the callback specs more succinct and readable 43 | -type state() :: term(). 44 | -type fold_acc() :: term(). 45 | -type fold_opts() :: [term()]. %% TODO maybe more specific? [{atom(), term()}]? 46 | -type fold_result() :: {ok, fold_acc()} | {async, fun()} | {error, term()}. 47 | 48 | -callback api_version() -> {ok, number()}. 49 | 50 | -callback capabilities(state()) -> {ok, [atom()]}. 51 | -callback capabilities(riak_object:bucket(), state()) -> {ok, [atom()]}. 52 | 53 | -callback start(PartitionIndex :: non_neg_integer(), Config :: [{atom(), term()}]) -> 54 | {ok, state()} | {error, term()}. 55 | -callback stop(state()) -> ok. 56 | 57 | -callback get(riak_object:bucket(), riak_object:key(), state()) -> 58 | {ok, Value :: term(), state()} | 59 | {ok, not_found, state()} | 60 | {error, term(), state()}. 61 | -callback put(riak_object:bucket(), riak_object:key(), [index_spec()], Value :: binary(), 62 | state()) -> 63 | {ok, state()} | 64 | {error, term(), state()}. 65 | -callback delete(riak_object:bucket(), riak_object:key(), [index_spec()], state()) -> 66 | {ok, state()} | 67 | {error, term(), state()}. 68 | 69 | -callback drop(state()) -> {ok, state()} | {error, term(), state()}. 70 | 71 | -callback fold_buckets(fold_buckets_fun(), fold_acc(), fold_opts(), state()) -> fold_result(). 72 | -callback fold_keys(fold_keys_fun(), fold_acc(), fold_opts(), state()) -> fold_result(). 73 | -callback fold_objects(fold_objects_fun(), fold_acc(), fold_opts(), state()) -> fold_result(). 74 | 75 | -callback is_empty(state()) -> boolean() | {error, term()}. 76 | 77 | -callback status(state()) -> [{atom(), term()}]. 78 | 79 | -callback callback(reference(), Msg :: term(), state()) -> {ok, state()}. 80 | 81 | %% Queue a callback for the backend after Time ms. 82 | -spec callback_after(integer(), reference(), term()) -> reference(). 83 | callback_after(Time, Ref, Msg) when is_integer(Time), is_reference(Ref) -> 84 | riak_core_vnode:send_command_after(Time, {backend_callback, Ref, Msg}). 85 | -------------------------------------------------------------------------------- /src/riak_kv_buckets_fsm.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_buckets_fsm: listing of buckets 4 | %% 5 | %% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | %% @doc The buckets fsm manages the listing of buckets. 24 | 25 | -module(riak_kv_buckets_fsm). 26 | 27 | -behaviour(riak_core_coverage_fsm). 28 | 29 | -include_lib("riak_kv_vnode.hrl"). 30 | 31 | -export([init/2, 32 | process_results/2, 33 | finish/2]). 34 | 35 | -type from() :: {atom(), req_id(), pid()}. 36 | -type req_id() :: non_neg_integer(). 37 | 38 | -ifdef(namespaced_types). 39 | -type riak_kv_buckets_fsm_set() :: sets:set(). 40 | -else. 41 | -type riak_kv_buckets_fsm_set() :: set(). 42 | -endif. 43 | 44 | -record(state, {buckets=sets:new() :: riak_kv_buckets_fsm_set(), 45 | from :: from(), 46 | stream=false :: boolean(), 47 | type :: binary() 48 | }). 49 | 50 | -include("riak_kv_dtrace.hrl"). 51 | 52 | %% @doc Return a tuple containing the ModFun to call per vnode, 53 | %% the number of primary preflist vnodes the operation 54 | %% should cover, the service to use to check for available nodes, 55 | %% and the registered name to use to access the vnode master process. 56 | init(From, [_, _]=Args) -> 57 | init(From, Args ++ [false, <<"default">>]); 58 | init(From, [ItemFilter, Timeout, Stream]) -> 59 | init(From, [ItemFilter, Timeout, Stream, <<"default">>]); 60 | init(From={_, _, ClientPid}, [ItemFilter, Timeout, Stream, BucketType]) -> 61 | ClientNode = atom_to_list(node(ClientPid)), 62 | PidStr = pid_to_list(ClientPid), 63 | FilterX = if ItemFilter == none -> 0; 64 | true -> 1 65 | end, 66 | %% "other" is a legacy term from when MapReduce used this FSM (in 67 | %% which case, the string "mapred" would appear 68 | ?DTRACE(?C_BUCKETS_INIT, [2, FilterX], 69 | [<<"other">>, ClientNode, PidStr]), 70 | %% Construct the bucket listing request 71 | Req = riak_kv_requests:new_listbuckets_request(ItemFilter), 72 | {Req, allup, 1, 1, riak_kv, riak_kv_vnode_master, Timeout, 73 | #state{from=From, stream=Stream, type=BucketType}}. 74 | 75 | process_results(done, StateData) -> 76 | {done, StateData}; 77 | process_results({error, Reason}, _State) -> 78 | ?DTRACE(?C_BUCKETS_PROCESS_RESULTS, [-1], []), 79 | {error, Reason}; 80 | process_results(Buckets0, 81 | StateData=#state{buckets=BucketAcc, from=From, stream=true}) -> 82 | Buckets = filter_buckets(Buckets0, StateData#state.type), 83 | ?DTRACE(?C_BUCKETS_PROCESS_RESULTS, [length(Buckets)], []), 84 | BucketsToSend = [ B || B <- Buckets, 85 | not sets:is_element(B, BucketAcc) ], 86 | case BucketsToSend =/= [] of 87 | true -> 88 | reply({buckets_stream, BucketsToSend}, From); 89 | false -> 90 | ok 91 | end, 92 | {ok, StateData#state{buckets=accumulate(Buckets, BucketAcc)}}; 93 | process_results(Buckets0, 94 | StateData=#state{buckets=BucketAcc, stream=false}) -> 95 | Buckets = filter_buckets(Buckets0, StateData#state.type), 96 | ?DTRACE(?C_BUCKETS_PROCESS_RESULTS, [length(Buckets)], []), 97 | {ok, StateData#state{buckets=accumulate(Buckets, BucketAcc)}}. 98 | 99 | finish({error, _}=Error, 100 | StateData=#state{from=From}) -> 101 | ?DTRACE(?C_BUCKETS_FINISH, [-1], []), 102 | %% Notify the requesting client that an error 103 | %% occurred or the timeout has elapsed. 104 | reply(Error, From), 105 | {stop, normal, StateData}; 106 | finish(clean, StateData=#state{from=From, stream=true}) -> 107 | ?DTRACE(?C_BUCKETS_FINISH, [0], []), 108 | reply(done, From), 109 | {stop, normal, StateData}; 110 | finish(clean, 111 | StateData=#state{buckets=Buckets, 112 | from=From, 113 | stream=false}) -> 114 | reply({buckets, sets:to_list(Buckets)}, From), 115 | ?DTRACE(?C_BUCKETS_FINISH, [0], []), 116 | {stop, normal, StateData}. 117 | 118 | reply(Msg, {raw, ReqId, ClientPid}) -> 119 | ClientPid ! {ReqId, Msg}, 120 | ok. 121 | 122 | accumulate(Entries, Set) -> 123 | sets:union(sets:from_list(Entries),Set). 124 | 125 | filter_buckets(Buckets, Type) -> 126 | filter_buckets(Buckets, Type, []). 127 | 128 | filter_buckets([], _Type, Acc) -> 129 | Acc; 130 | filter_buckets([{Type, Bucket}|Rest], Type, Acc) -> 131 | filter_buckets(Rest, Type, [Bucket|Acc]); 132 | filter_buckets([Bucket|Rest], Type, Acc) when is_binary(Bucket), 133 | Type == undefined orelse 134 | Type == <<"default">> -> 135 | filter_buckets(Rest, Type, [Bucket|Acc]); 136 | filter_buckets([_|Rest], Type, Acc) -> 137 | %% does not match 138 | filter_buckets(Rest, Type, Acc). 139 | 140 | 141 | -ifdef(TEST). 142 | -include_lib("eunit/include/eunit.hrl"). 143 | -compile([export_all, nowarn_export_all]). 144 | %% tests should go here at some point. 145 | -endif. 146 | -------------------------------------------------------------------------------- /src/riak_kv_buckets_fsm_sup.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_buckets_fsm_sup: supervise the riak_kv buckets state machines. 4 | %% 5 | %% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | %% @doc supervise the riak_kv buckets state machines 24 | 25 | -module(riak_kv_buckets_fsm_sup). 26 | 27 | -behaviour(supervisor). 28 | 29 | -export([start_buckets_fsm/2]). 30 | -export([start_link/0]). 31 | -export([init/1]). 32 | 33 | start_buckets_fsm(Node, Args) -> 34 | case supervisor:start_child({?MODULE, Node}, Args) of 35 | {ok, Pid} -> 36 | ok = riak_kv_stat:update({list_create, Pid}), 37 | {ok, Pid}; 38 | Error -> 39 | ok = riak_kv_stat:update(list_create_error), 40 | Error 41 | end. 42 | 43 | %% @spec start_link() -> ServerRet 44 | %% @doc API for starting the supervisor. 45 | start_link() -> 46 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 47 | 48 | %% @spec init([]) -> SupervisorTree 49 | %% @doc supervisor callback. 50 | init([]) -> 51 | BucketsFsmSpec = {undefined, 52 | {riak_core_coverage_fsm, start_link, [riak_kv_buckets_fsm]}, 53 | temporary, 5000, worker, [riak_kv_keys_fsm]}, 54 | 55 | {ok, {{simple_one_for_one, 10, 10}, [BucketsFsmSpec]}}. 56 | -------------------------------------------------------------------------------- /src/riak_kv_cinfo.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Riak: A lightweight, decentralized key-value store. 4 | %% 5 | %% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | -module(riak_kv_cinfo). 24 | -export([cluster_info_init/0, cluster_info_generator_funs/0]). 25 | 26 | %% @spec () -> term() 27 | %% @doc Required callback function for cluster_info: initialization. 28 | %% 29 | %% This function doesn't have to do anything. 30 | 31 | cluster_info_init() -> 32 | ok. 33 | 34 | %% @spec () -> list({string(), fun()}) 35 | %% @doc Required callback function for cluster_info: return list of 36 | %% {NameForReport, FunOfArity_1} tuples to generate ASCII/UTF-8 37 | %% formatted reports. 38 | 39 | cluster_info_generator_funs() -> 40 | [ 41 | {"Riak KV status", fun status/1}, 42 | {"Riak KV ringready", fun ringready/1}, 43 | {"Riak KV transfers", fun transfers/1}, 44 | {"Riak KV anti-entropy throttle", fun aae_throttle/1} 45 | ]. 46 | 47 | aae_throttle(CPid) -> 48 | Props = get_aae_throttle(), 49 | Throttle = proplists:get_value(current_throttle, Props), 50 | ThrottleLimits = proplists:get_value(limits, Props), 51 | cluster_info:format(CPid, "Current throttle: ~p msec\n", [Throttle]), 52 | cluster_info:format(CPid, "Limit AAE throttle parameters: ~p\n", 53 | [ThrottleLimits]). 54 | 55 | get_aae_throttle() -> 56 | [{current_throttle, riak_kv_entropy_manager:get_aae_throttle()}, 57 | {limits, riak_kv_entropy_manager:get_aae_throttle_limits()}]. 58 | 59 | status(CPid) -> % CPid is the data collector's pid. 60 | cluster_info:format(CPid, "~p\n", [riak_kv_status:statistics()]). 61 | 62 | ringready(CPid) -> 63 | cluster_info:format(CPid, "~p\n", [riak_kv_status:ringready()]). 64 | 65 | transfers(CPid) -> 66 | cluster_info:format(CPid, "~p\n", [riak_kv_status:transfers()]). 67 | 68 | -------------------------------------------------------------------------------- /src/riak_kv_clusteraae_fsm_sup.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_clusteraae_fsm_sup: supervise the riak_kv cluster aae state 4 | %% machines. 5 | %% 6 | %% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved. 7 | %% 8 | %% This file is provided to you under the Apache License, 9 | %% Version 2.0 (the "License"); you may not use this file 10 | %% except in compliance with the License. You may obtain 11 | %% a copy of the License at 12 | %% 13 | %% http://www.apache.org/licenses/LICENSE-2.0 14 | %% 15 | %% Unless required by applicable law or agreed to in writing, 16 | %% software distributed under the License is distributed on an 17 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 18 | %% KIND, either express or implied. See the License for the 19 | %% specific language governing permissions and limitations 20 | %% under the License. 21 | %% 22 | %% ------------------------------------------------------------------- 23 | 24 | %% @doc supervise the riak_kv cluster aae state machine 25 | 26 | -module(riak_kv_clusteraae_fsm_sup). 27 | 28 | -behaviour(supervisor). 29 | 30 | -export([start_clusteraae_fsm/2]). 31 | -export([start_link/0]). 32 | -export([init/1]). 33 | 34 | start_clusteraae_fsm(Node, Args) -> 35 | case supervisor:start_child({?MODULE, Node}, Args) of 36 | {ok, Pid} -> 37 | ok = riak_kv_stat:update({clusteraae_create, Pid}), 38 | {ok, Pid}; 39 | Error -> 40 | ok = riak_kv_stat:update(clusteraae_create_error), 41 | Error 42 | end. 43 | 44 | %% @spec start_link() -> ServerRet 45 | %% @doc API for starting the supervisor. 46 | start_link() -> 47 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 48 | 49 | %% @spec init([]) -> SupervisorTree 50 | %% @doc supervisor callback. 51 | init([]) -> 52 | ClusterAAEFsmSpec = 53 | {undefined, 54 | {riak_core_coverage_fsm, start_link, [riak_kv_clusteraae_fsm]}, 55 | temporary, 5000, worker, [riak_kv_clusteraae_fsm]}, 56 | 57 | {ok, {{simple_one_for_one, 10, 10}, [ClusterAAEFsmSpec]}}. 58 | -------------------------------------------------------------------------------- /src/riak_kv_counter.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_counter: Backwards compatibile access to counters for 4 | %% customer MR 5 | %% 6 | %% Copyright (c) 2007-2013 Basho Technologies, Inc. All Rights Reserved. 7 | %% 8 | %% This file is provided to you under the Apache License, 9 | %% Version 2.0 (the "License"); you may not use this file 10 | %% except in compliance with the License. You may obtain 11 | %% a copy of the License at 12 | %% 13 | %% http://www.apache.org/licenses/LICENSE-2.0 14 | %% 15 | %% Unless required by applicable law or agreed to in writing, 16 | %% software distributed under the License is distributed on an 17 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 18 | %% KIND, either express or implied. See the License for the 19 | %% specific language governing permissions and limitations 20 | %% under the License. 21 | %% 22 | %% ------------------------------------------------------------------- 23 | 24 | %% @doc Backwards compatibility with 1.4 counters `value' function as 25 | %% used in the CRDT cookbook. 26 | %% 27 | %% @see riak_kv_crdt 28 | %% @end 29 | 30 | -module(riak_kv_counter). 31 | 32 | -export([value/1]). 33 | 34 | -include("riak_kv_wm_raw.hrl"). 35 | -include_lib("riak_kv_types.hrl"). 36 | 37 | %% @doc Get the value of V1 (1.4) Counter from an Object. Backwards 38 | %% compatability with 1.4 for MapReduce. 39 | -spec value(riak_object:riak_object()) -> 40 | integer(). 41 | value(RObj) -> 42 | {{_Ctx, Count}, _Stats} = riak_kv_crdt:value(RObj, ?V1_COUNTER_TYPE), 43 | Count. 44 | -------------------------------------------------------------------------------- /src/riak_kv_coverage_filter.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_coverage_filter: Construct coverage filter functions. 4 | %% 5 | %% 6 | %% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved. 7 | %% 8 | %% This file is provided to you under the Apache License, 9 | %% Version 2.0 (the "License"); you may not use this file 10 | %% except in compliance with the License. You may obtain 11 | %% a copy of the License at 12 | %% 13 | %% http://www.apache.org/licenses/LICENSE-2.0 14 | %% 15 | %% Unless required by applicable law or agreed to in writing, 16 | %% software distributed under the License is distributed on an 17 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 18 | %% KIND, either express or implied. See the License for the 19 | %% specific language governing permissions and limitations 20 | %% under the License. 21 | %% 22 | %% ------------------------------------------------------------------- 23 | 24 | %% @doc This module is used to construct a property list of VNode 25 | %% indexes and functions to filter results from a coverage 26 | %% operation. This may include filtering based on the particular 27 | %% VNode or filtering on each item in the result list from any 28 | %% VNode. 29 | 30 | -module(riak_kv_coverage_filter). 31 | 32 | %% API 33 | -export([build_filter/1, build_filter/3]). 34 | 35 | -export_type([filter/0]). 36 | 37 | -type bucket() :: binary(). 38 | -type filter() :: none | fun((any()) -> boolean()) | [{atom(), atom(), [any()]}]. 39 | -type index() :: non_neg_integer(). 40 | 41 | %% =================================================================== 42 | %% Public API 43 | %% =================================================================== 44 | 45 | %% @doc Build the list of filter functions for any required VNode indexes. 46 | %% 47 | %% The ItemFilterInput parameter can be the atom `none' to indicate 48 | %% no filtering based on the request items, a function that returns 49 | %% a boolean indicating whether or not the item should be included 50 | %% in the final results, or a list of tuples of the form 51 | %% {Module, Function, Args}. The latter is the form used by 52 | %% MapReduce filters such as those in the {@link riak_kv_mapred_filters} 53 | %% module. The list of tuples is composed into a function that is 54 | %% used to determine if an item should be included in the final 55 | %% result set. 56 | -spec build_filter(filter()) -> filter(). 57 | build_filter(Filter) -> 58 | build_item_filter(Filter). 59 | 60 | 61 | -spec build_filter(bucket(), filter(), [index()]) -> filter(). 62 | build_filter(Bucket, ItemFilterInput, FilterVNode) -> 63 | ItemFilter = build_item_filter(ItemFilterInput), 64 | 65 | if 66 | (ItemFilter == none) andalso 67 | (FilterVNode == undefined) -> % no filtering 68 | none; 69 | (FilterVNode == undefined) -> % only key filtering 70 | %% Compose a key filtering function for the VNode 71 | ItemFilter; 72 | (ItemFilter == none) -> % only vnode filtering required 73 | {ok, CHBin} = riak_core_ring_manager:get_chash_bin(), 74 | PrefListFun = build_preflist_fun(Bucket, CHBin), 75 | %% Create a VNode filter 76 | compose_filter(FilterVNode, PrefListFun); 77 | true -> % key and vnode filtering 78 | {ok, CHBin} = riak_core_ring_manager:get_chash_bin(), 79 | PrefListFun = build_preflist_fun(Bucket, CHBin), 80 | %% Create a filter for the VNode 81 | compose_filter(FilterVNode, PrefListFun, ItemFilter) 82 | end. 83 | 84 | %% ==================================================================== 85 | %% Internal functions 86 | %% ==================================================================== 87 | 88 | %% @private 89 | compose_filter(KeySpaceIndexes, PrefListFun) -> 90 | VNodeFilter = build_vnode_filter(KeySpaceIndexes, PrefListFun), 91 | VNodeFilter. 92 | 93 | compose_filter(undefined, _, ItemFilter) -> 94 | ItemFilter; 95 | compose_filter(KeySpaceIndexes, PrefListFun, ItemFilter) -> 96 | VNodeFilter = build_vnode_filter(KeySpaceIndexes, PrefListFun), 97 | fun(Item) -> 98 | ItemFilter(Item) andalso VNodeFilter(Item) 99 | end. 100 | 101 | %% @private 102 | build_vnode_filter(KeySpaceIndexes, PrefListFun) -> 103 | fun(X) -> 104 | PrefListIndex = PrefListFun(X), 105 | lists:member(PrefListIndex, KeySpaceIndexes) 106 | end. 107 | 108 | %% @private 109 | build_item_filter(none) -> 110 | none; 111 | build_item_filter(FilterInput) when is_function(FilterInput) -> 112 | FilterInput; 113 | build_item_filter(FilterInput) -> 114 | %% FilterInput is a list of {Module, Fun, Args} tuples 115 | compose(FilterInput). 116 | 117 | 118 | %% @private 119 | build_preflist_fun(Bucket, CHBin) -> 120 | fun({o, Key, _Value}) -> %% $ index return_body 121 | ChashKey = riak_core_util:chash_key({Bucket, Key}), 122 | chashbin:responsible_index(ChashKey, CHBin); 123 | ({_Value, Key}) -> 124 | ChashKey = riak_core_util:chash_key({Bucket, Key}), 125 | chashbin:responsible_index(ChashKey, CHBin); 126 | (Key) -> 127 | ChashKey = riak_core_util:chash_key({Bucket, Key}), 128 | chashbin:responsible_index(ChashKey, CHBin) 129 | end. 130 | 131 | 132 | 133 | compose([]) -> 134 | none; 135 | compose(Filters) -> 136 | compose(Filters, []). 137 | 138 | compose([], RevFilterFuns) -> 139 | FilterFuns = lists:reverse(RevFilterFuns), 140 | fun(Val) -> 141 | true =:= lists:foldl(fun(Fun, Acc) -> Fun(Acc) end, 142 | Val, 143 | FilterFuns) 144 | end; 145 | compose([Filter | RestFilters], FilterFuns) -> 146 | {FilterMod, FilterFun, Args} = Filter, 147 | Fun = FilterMod:FilterFun(Args), 148 | compose(RestFilters, [Fun | FilterFuns]). 149 | 150 | -------------------------------------------------------------------------------- /src/riak_kv_delete_sup.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_delete_sup: supervise the riak_kv delete state machines. 4 | %% 5 | %% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | %% @doc supervise the riak_kv delete state machines 24 | 25 | -module(riak_kv_delete_sup). 26 | 27 | -behaviour(supervisor). 28 | 29 | -export([start_delete/2]). 30 | -export([start_link/0]). 31 | -export([init/1]). 32 | 33 | start_delete(Node, Args) -> 34 | supervisor:start_child({?MODULE, Node}, Args). 35 | 36 | %% @spec start_link() -> ServerRet 37 | %% @doc API for starting the supervisor. 38 | start_link() -> 39 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 40 | 41 | %% @spec init([]) -> SupervisorTree 42 | %% @doc supervisor callback. 43 | init([]) -> 44 | DeleteSpec = {undefined, 45 | {riak_kv_delete, start_link, []}, 46 | temporary, 5000, worker, [riak_kv_delete]}, 47 | 48 | {ok, {{simple_one_for_one, 10, 10}, [DeleteSpec]}}. 49 | -------------------------------------------------------------------------------- /src/riak_kv_exometer_sidejob.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_exometer_sidejob: Access sidejob stats via the Exometer API 4 | %% 5 | %% Copyright (c) 2007-2013 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | %% @doc riak_kv_exometer_sidejob is a wrapper module making sidejob 24 | %% stats available via the Exometer API. It adds no overhead to the 25 | %% stats update, nor interfere with the 'old' way of reading the 26 | %% sidejob stats; it is purely complementary. 27 | %% @end 28 | 29 | -module(riak_kv_exometer_sidejob). 30 | 31 | -behaviour(exometer_entry). 32 | 33 | %% API 34 | -export([new_entry/3, new_entry/4]). 35 | 36 | %% Callback API 37 | -export([behaviour/0, 38 | new/3, delete/3, get_value/4, update/4, reset/3, sample/3, 39 | get_datapoints/3, setopts/3]). 40 | 41 | behaviour() -> 42 | entry. 43 | 44 | new_entry(Name, SjName, Opts) -> 45 | exometer_new(Name, SjName, Opts). 46 | 47 | new_entry(Name, SjName, AliasPrefix, Opts) -> 48 | exometer_new(Name, SjName, [{aliases, aliases(AliasPrefix)}|Opts]). 49 | 50 | exometer_new(Name, SjName, Opts) -> 51 | exometer:re_register(Name, ad_hoc, [{module, ?MODULE}, 52 | {type, sidejob}, 53 | {sj_name, SjName}|Opts]). 54 | 55 | aliases(Prefix) -> 56 | [{DP, join(Prefix, Suffix)} 57 | || {DP, Suffix} <- [ 58 | {usage , "_active"}, 59 | {usage_60s , "_active_60s"}, 60 | {in_rate , "_in_rate"}, 61 | {out_rate , "_out_rate"}, 62 | {rejected , "_rejected"}, 63 | {rejected_60s , "_rejected_60s"}, 64 | {rejected_total, "_rejected_total"} 65 | ]]. 66 | 67 | join(Prefix, Suffix) -> 68 | list_to_atom(Prefix ++ Suffix). 69 | 70 | -define(UNSUP, {error, unsupported}). 71 | 72 | new(_Name, _Type, Options) -> 73 | {_, SjName} = lists:keyfind(sj_name, 1, Options), 74 | {ok, SjName}. 75 | 76 | delete(_Name, _Type, _Ref) -> 77 | ok. 78 | 79 | get_value(_Name, _Type, SjName, DPs) -> 80 | try filter_datapoints(sidejob_resource_stats:stats(SjName), DPs) 81 | catch 82 | error:_ -> 83 | unavailable 84 | end. 85 | 86 | get_datapoints(_, _, _) -> 87 | [usage,rejected, in_rate, out_rate, usage_60s, rejected_60s, 88 | avg_in_rate_60s, max_in_rate_60s, avg_out_rate_60s, 89 | max_out_rate_60s, usage_total, rejected_total, 90 | avg_in_rate_total, max_in_rate_total, avg_out_rate_total]. 91 | 92 | update(_Name, _Type, _Ref, _Value) -> ?UNSUP. 93 | reset(_Name, _Type, _Ref) -> ?UNSUP. 94 | sample(_Name, _Type, _Ref) -> ?UNSUP. 95 | setopts(_Entry, _Options, _NewStatus) -> ok. 96 | 97 | filter_datapoints(Stats, default) -> 98 | Stats; 99 | filter_datapoints(Stats, DPs) -> 100 | [S || {K, _} = S <- Stats, 101 | lists:member(K, DPs)]. 102 | -------------------------------------------------------------------------------- /src/riak_kv_fold_buffer.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_fold_buffer: Provide operations for creating and using 4 | %% size-limited buffers for use in folds.n 5 | %% 6 | %% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved. 7 | %% 8 | %% This file is provided to you under the Apache License, 9 | %% Version 2.0 (the "License"); you may not use this file 10 | %% except in compliance with the License. You may obtain 11 | %% a copy of the License at 12 | %% 13 | %% http://www.apache.org/licenses/LICENSE-2.0 14 | %% 15 | %% Unless required by applicable law or agreed to in writing, 16 | %% software distributed under the License is distributed on an 17 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 18 | %% KIND, either express or implied. See the License for the 19 | %% specific language governing permissions and limitations 20 | %% under the License. 21 | %% 22 | %% ------------------------------------------------------------------- 23 | 24 | %% @doc Provide operations for creating and using 25 | %% size-limited buffers for use in folds. 26 | 27 | -module(riak_kv_fold_buffer). 28 | 29 | %% Public API 30 | -export([new/2, 31 | add/2, 32 | flush/1, 33 | size/1]). 34 | 35 | -export_type([buffer/0]). 36 | 37 | -ifdef(TEST). 38 | -include_lib("eunit/include/eunit.hrl"). 39 | -endif. 40 | 41 | -record(buffer, {acc=[] :: [any()], 42 | buffer_fun :: function(), 43 | max_size :: pos_integer(), 44 | size=0 :: non_neg_integer()}). 45 | -type buffer() :: #buffer{}. 46 | 47 | %% =================================================================== 48 | %% Public API 49 | %% =================================================================== 50 | 51 | %% @doc Returns a new buffer with the specified 52 | %% maximum size and buffer function. 53 | -spec new(pos_integer(), fun(([any()]) -> any())) -> buffer(). 54 | new(MaxSize, Fun) -> 55 | #buffer{buffer_fun=Fun, 56 | max_size=MaxSize-1}. 57 | 58 | %% @doc Add an item to the buffer. If the 59 | %% size of the buffer is equal to the 60 | %% maximum size of this buffer then 61 | %% the buffer function is called on 62 | %% the accumlated buffer items and 63 | %% then the buffer is emptied. 64 | -spec add(any(), buffer()) -> buffer(). 65 | add(Item, #buffer{acc=Acc, 66 | buffer_fun=Fun, 67 | max_size=MaxSize, 68 | size=MaxSize}=Buffer) -> 69 | Fun([Item | Acc]), 70 | Buffer#buffer{acc=[], 71 | size=0}; 72 | add(Item, #buffer{acc=Acc, 73 | size=Size}=Buffer) -> 74 | Buffer#buffer{acc=[Item | Acc], 75 | size=Size+1}. 76 | 77 | %% @doc Call the buffer function on the 78 | %% remaining items and then reset the buffer. 79 | -spec flush(buffer()) -> buffer(). 80 | flush(#buffer{acc=Acc, 81 | buffer_fun=Fun}=Buffer) -> 82 | Fun(Acc), 83 | Buffer#buffer{acc=[], 84 | size=0}. 85 | 86 | %% @doc Returns the size of the buffer. 87 | -spec size(buffer()) -> non_neg_integer(). 88 | size(#buffer{size=Size}) -> 89 | Size. 90 | 91 | %% =================================================================== 92 | %% EUnit tests 93 | %% =================================================================== 94 | -ifdef(TEST). 95 | 96 | fold_buffer_test_() -> 97 | Fun = fun(_) -> true end, 98 | Buffer = new(5, Fun), 99 | Buffer1 = add(1, Buffer), 100 | Buffer2 = add(2, Buffer1), 101 | Buffer3 = add(3, Buffer2), 102 | Buffer4 = add(4, Buffer3), 103 | Buffer5 = add(5, Buffer4), 104 | {spawn, 105 | [ 106 | ?_assertEqual(#buffer{acc=[1], 107 | buffer_fun=Fun, 108 | max_size=4, 109 | size=1}, 110 | Buffer1), 111 | ?_assertEqual(#buffer{acc=[2,1], 112 | buffer_fun=Fun, 113 | max_size=4, 114 | size=2}, 115 | Buffer2), 116 | ?_assertEqual(#buffer{acc=[3,2,1], 117 | buffer_fun=Fun, 118 | max_size=4, 119 | size=3}, 120 | Buffer3), 121 | ?_assertEqual(#buffer{acc=[4,3,2,1], 122 | buffer_fun=Fun, 123 | max_size=4, 124 | size=4}, 125 | Buffer4), 126 | ?_assertEqual(#buffer{acc=[], 127 | buffer_fun=Fun, 128 | max_size=4, 129 | size=0}, 130 | Buffer5), 131 | ?_assertEqual(Buffer5#buffer{acc=[], 132 | size=0}, 133 | flush(Buffer5)), 134 | ?_assertEqual(0, ?MODULE:size(Buffer)), 135 | ?_assertEqual(1, ?MODULE:size(Buffer1)), 136 | ?_assertEqual(2, ?MODULE:size(Buffer2)), 137 | ?_assertEqual(3, ?MODULE:size(Buffer3)), 138 | ?_assertEqual(4, ?MODULE:size(Buffer4)), 139 | ?_assertEqual(0, ?MODULE:size(Buffer5)) 140 | ]}. 141 | 142 | -endif. 143 | -------------------------------------------------------------------------------- /src/riak_kv_fsm_timing.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_fsm_timing: Common code for timing fsm states 4 | %% 5 | %% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | %% @doc code that would otherwise be duplicated in both fsms 24 | %% functions for gathering and calculating timing information 25 | %% for fsm states. 26 | 27 | -module(riak_kv_fsm_timing). 28 | 29 | -export([add_timing/2, calc_timing/1]). 30 | 31 | -type timing() :: {StageName::atom(), StageStartTime::erlang:timestamp()}. 32 | -type timings() :: [timing()]. 33 | -type duration() :: {StageName::atom(), StageDuration::non_neg_integer()}. 34 | -type durations() :: {ResponseUSecs::non_neg_integer(), [duration()]}. 35 | 36 | %% @doc add timing information of `{State, erlang:now()}' to the Timings 37 | 38 | -spec add_timing(atom(), timings()) -> timings(). 39 | add_timing(State, Timings) when is_list(Timings) -> 40 | [{State, os:timestamp()}|Timings]. 41 | 42 | %% --------------------------------------------------------------------- 43 | 44 | %% @doc Calc timing information - stored as `{Stage, StageStart}' 45 | %% in reverse order. 46 | %% 47 | %% ResponseUsecs is calculated as time from reply to start of first stage. 48 | %% If `reply' is in `stages' more than once, the earliest value is used. 49 | %% If `reply' is not in `stages' fails with `badarg' 50 | %% Since a stage's duration is the difference between it's start time 51 | %% and the next stages start time, we don't calculate the duration of 52 | %% the final stage, it is just there as the end time of the 53 | %% penultimate stage 54 | 55 | -spec calc_timing(timings()) -> 56 | durations(). 57 | calc_timing(Stages0) -> 58 | case proplists:get_value(reply, Stages0) of 59 | undefined -> 60 | erlang:error(badarg); 61 | ReplyTime -> 62 | [{_FinalStage, StageEnd}|Stages] = Stages0, 63 | calc_timing(Stages, StageEnd, ReplyTime, orddict:new()) 64 | end. 65 | 66 | %% A stages duration is the difference between it's start time 67 | %% and the next stages start time. 68 | -spec calc_timing(timings(), erlang:timestamp(), 69 | erlang:timestamp(), 70 | orddict:orddict()) -> 71 | durations(). 72 | calc_timing([], FirstStageStart, ReplyTime, Acc) -> 73 | %% Time from first stage start until reply sent 74 | {timer:now_diff(ReplyTime, FirstStageStart), orddict:to_list(Acc)}; 75 | calc_timing([{Stage, StageStart} | Rest], StageEnd, ReplyTime, Acc0) -> 76 | StageDuration = timer:now_diff(StageEnd, StageStart), 77 | %% When the same stage appears more than once in 78 | %% a list of timings() aggregate the times into 79 | %% a total for that stage 80 | Acc = orddict:update_counter(Stage, StageDuration, Acc0), 81 | calc_timing(Rest, StageStart, ReplyTime, Acc). 82 | -------------------------------------------------------------------------------- /src/riak_kv_hooks.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2012-2013 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | -module(riak_kv_hooks). 21 | 22 | %% API 23 | -export([add_conditional_postcommit/1, 24 | del_conditional_postcommit/1, 25 | get_conditional_postcommit/2]). 26 | 27 | %% Exported for internal use by `riak_kv_sup' 28 | -export([create_table/0]). 29 | 30 | %% Types 31 | -type hook() :: {module(), atom()}. 32 | -type hook_type() :: conditional_postcommit. 33 | -type bucket() :: riak_object:bucket(). 34 | -type key() :: riak_object:key(). 35 | -type bucket_props() :: riak_kv_bucket:props(). 36 | 37 | %%%=================================================================== 38 | 39 | %% @doc 40 | %% Called by {@link riak_kv_sup} to create the public ETS table used to 41 | %% track registered hooks. Having `riak_kv_sup' own the table ensures 42 | %% that the table exists aslong as riak_kv is running. 43 | -spec create_table() -> ok. 44 | create_table() -> 45 | ?MODULE = ets:new(?MODULE, [named_table, public, bag, 46 | {write_concurrency, true}, 47 | {read_concurrency, true}]), 48 | restore_state(), 49 | ok. 50 | 51 | %% @doc 52 | %% Add a global conditional postcommit hook that is called for each 53 | %% PUT operation. The hook is of the form `{Module, Fun}'. The specified 54 | %% function is called with the relevent bucket, key, and bucket properties 55 | %% at the time of the PUT operation and is expected to return `false' or 56 | %% a normal postcommit hook specification that should be invoked. 57 | -spec add_conditional_postcommit(hook()) -> ok. 58 | add_conditional_postcommit(Hook) -> 59 | add_hook(conditional_postcommit, Hook). 60 | 61 | %% @doc Remove a previously registered conditional postcommit hook 62 | -spec del_conditional_postcommit(hook()) -> ok. 63 | del_conditional_postcommit(Hook) -> 64 | del_hook(conditional_postcommit, Hook). 65 | 66 | %% @doc 67 | %% This function invokes each registered conditional postcommit 68 | %% hook. Each hook will return either `false' or a list of active 69 | %% hooks. This function then returns the combined list of active hooks. 70 | -spec get_conditional_postcommit({bucket(), key()}, bucket_props()) -> [any()]. 71 | get_conditional_postcommit({{BucketType, Bucket}, _Key}, BucketProps) -> 72 | Hooks = get_hooks(conditional_postcommit), 73 | ActiveHooks = 74 | [ActualHook || {Mod, Fun} <- Hooks, 75 | ActualHook <- [Mod:Fun(BucketType, Bucket, BucketProps)], 76 | ActualHook =/= false], 77 | lists:flatten(ActiveHooks); 78 | get_conditional_postcommit(_BKey, _BucketProps) -> 79 | %% For now, we only support typed buckets. 80 | []. 81 | 82 | %%%=================================================================== 83 | 84 | -spec add_hook(hook_type(), hook()) -> ok. 85 | add_hook(Type, Hook) -> 86 | ets:insert(?MODULE, {Type, Hook}), 87 | save_state(), 88 | ok. 89 | 90 | -spec del_hook(hook_type(), hook()) -> ok. 91 | del_hook(Type, Hook) -> 92 | ets:delete_object(?MODULE, {Type, Hook}), 93 | save_state(), 94 | ok. 95 | 96 | -spec get_hooks(hook_type()) -> [hook()]. 97 | get_hooks(Type) -> 98 | [Hook || {_, Hook} <- ets:lookup(?MODULE, Type)]. 99 | 100 | -ifdef(set_env_options). 101 | -define(SETENV(Application, Par, Val, Opts), 102 | application:set_env(Application, Par, Val, [{timeout, Opts}])). 103 | -else. 104 | -define(SETENV(Application, Par, Val, Opts), 105 | application:set_env(Application, Par, Val, Opts)). 106 | -endif. 107 | 108 | %% Backup the current ETS state to the application environment just in case 109 | %% riak_kv_sup dies and the ETS table is lost. 110 | -spec save_state() -> ok. 111 | save_state() -> 112 | Hooks = ets:tab2list(?MODULE), 113 | ok = ?SETENV(riak_kv, riak_kv_hooks, Hooks, infinity), 114 | ok. 115 | 116 | %% Restore registered hooks in the unlikely case that riak_kv_sup died and 117 | %% the ETS table was lost/recreated. 118 | -spec restore_state() -> ok. 119 | restore_state() -> 120 | case application:get_env(riak_kv, riak_kv_hooks) of 121 | undefined -> 122 | ok; 123 | {ok, Hooks} -> 124 | true = ets:insert_new(?MODULE, Hooks), 125 | ok 126 | end. 127 | -------------------------------------------------------------------------------- /src/riak_kv_hotbackup_fsm_sup.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_hotbackup_fsm_sup: supervise the hotbackup state machine. 4 | %% 5 | %% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | %% @doc supervise the riak_kv cluster aae state machine 24 | 25 | -module(riak_kv_hotbackup_fsm_sup). 26 | 27 | -behaviour(supervisor). 28 | 29 | -export([start_hotbackup_fsm/2]). 30 | -export([start_link/0]). 31 | -export([init/1]). 32 | 33 | start_hotbackup_fsm(Node, Args) -> 34 | % No stat is updated on this, there will be a log for every backup instead 35 | % of incrementing a stat. This is not expected to be a regular event that 36 | % requires counting 37 | supervisor:start_child({?MODULE, Node}, Args). 38 | 39 | %% @spec start_link() -> ServerRet 40 | %% @doc API for starting the supervisor. 41 | start_link() -> 42 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 43 | 44 | %% @spec init([]) -> SupervisorTree 45 | %% @doc supervisor callback. 46 | init([]) -> 47 | HotBackupFsmSpec = 48 | {undefined, 49 | {riak_core_coverage_fsm, start_link, [riak_kv_hotbackup_fsm]}, 50 | temporary, 5000, worker, [riak_kv_hotbackup_fsm]}, 51 | 52 | {ok, {{simple_one_for_one, 10, 10}, [HotBackupFsmSpec]}}. 53 | -------------------------------------------------------------------------------- /src/riak_kv_http_cache.erl: -------------------------------------------------------------------------------- 1 | -module(riak_kv_http_cache). 2 | 3 | -export([start_link/0, 4 | get_stats/0]). 5 | 6 | -export([init/1, 7 | handle_call/3, 8 | handle_cast/2, 9 | handle_info/2, 10 | terminate/2, 11 | code_change/3]). 12 | 13 | -define(SERVER, ?MODULE). 14 | 15 | -record(st, {ts, stats = []}). 16 | 17 | start_link() -> 18 | gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). 19 | 20 | get_stats() -> 21 | gen_server:call(?MODULE, get_stats). 22 | 23 | init(_) -> 24 | {ok, #st{}}. 25 | 26 | handle_call(get_stats, _From, #st{} = S) -> 27 | #st{stats = Stats} = S1 = check_cache(S), 28 | {reply, Stats, S1}. 29 | 30 | handle_cast(_, S) -> 31 | {noreply, S}. 32 | 33 | handle_info(_, S) -> 34 | {noreply, S}. 35 | 36 | terminate(_, _) -> 37 | ok. 38 | 39 | code_change(_, S, _) -> 40 | {ok, S}. 41 | 42 | check_cache(#st{ts = undefined} = S) -> 43 | S#st{ts = os:timestamp(), stats = do_get_stats()}; 44 | check_cache(#st{ts = Then} = S) -> 45 | Now = os:timestamp(), 46 | case timer:now_diff(Now, Then) < 1000000 of 47 | true -> 48 | S; 49 | false -> 50 | S#st{ts = Now, stats = do_get_stats()} 51 | end. 52 | 53 | do_get_stats() -> 54 | riak_kv_wm_stats:get_stats(). 55 | -------------------------------------------------------------------------------- /src/riak_kv_index_fsm_sup.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_index_fsm_sup: supervise the riak_kv index state machines. 4 | %% 5 | %% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | %% @doc supervise the riak_kv index state machines used to 24 | %% process secondary index queries. 25 | 26 | -module(riak_kv_index_fsm_sup). 27 | 28 | -behaviour(supervisor). 29 | 30 | -export([start_index_fsm/2]). 31 | -export([start_link/0]). 32 | -export([init/1]). 33 | 34 | start_index_fsm(Node, Args) -> 35 | case supervisor:start_child({?MODULE, Node}, Args) of 36 | {ok, Pid} -> 37 | ok = riak_kv_stat:update({index_create, Pid}), 38 | {ok, Pid}; 39 | Error -> 40 | ok = riak_kv_stat:update(index_create_error), 41 | Error 42 | end. 43 | 44 | %% @spec start_link() -> ServerRet 45 | %% @doc API for starting the supervisor. 46 | start_link() -> 47 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 48 | 49 | %% @spec init([]) -> SupervisorTree 50 | %% @doc supervisor callback. 51 | init([]) -> 52 | IndexFsmSpec = {undefined, 53 | {riak_core_coverage_fsm, start_link, [riak_kv_index_fsm]}, 54 | temporary, 5000, worker, [riak_kv_index_fsm]}, 55 | 56 | {ok, {{simple_one_for_one, 10, 10}, [IndexFsmSpec]}}. 57 | -------------------------------------------------------------------------------- /src/riak_kv_keys_fsm_sup.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_keys_fsm_sup: supervise the riak_kv keys state machines. 4 | %% 5 | %% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | %% @doc supervise the riak_kv keys state machines 24 | 25 | -module(riak_kv_keys_fsm_sup). 26 | 27 | -behaviour(supervisor). 28 | 29 | -export([start_keys_fsm/2]). 30 | -export([start_link/0]). 31 | -export([init/1]). 32 | 33 | start_keys_fsm(Node, Args) -> 34 | case supervisor:start_child({?MODULE, Node}, Args) of 35 | {ok, Pid} -> 36 | ok = riak_kv_stat:update({list_create, Pid}), 37 | {ok, Pid}; 38 | Error -> 39 | ok = riak_kv_stat:update(list_create_error), 40 | Error 41 | end. 42 | 43 | %% @spec start_link() -> ServerRet 44 | %% @doc API for starting the supervisor. 45 | start_link() -> 46 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 47 | 48 | %% @spec init([]) -> SupervisorTree 49 | %% @doc supervisor callback. 50 | init([]) -> 51 | KeysFsmSpec = {undefined, 52 | {riak_core_coverage_fsm, start_link, [riak_kv_keys_fsm]}, 53 | temporary, 5000, worker, [riak_kv_keys_fsm]}, 54 | 55 | {ok, {{simple_one_for_one, 10, 10}, [KeysFsmSpec]}}. 56 | -------------------------------------------------------------------------------- /src/riak_kv_mrc_sink_sup.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2011 Basho Technologies, Inc. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | %% @doc Supervisor for a sink processes used by {@link 22 | %% riak_kv_wm_mapred} and {@link riak_kv_pb_mapred}. 23 | -module(riak_kv_mrc_sink_sup). 24 | 25 | -behaviour(supervisor). 26 | 27 | %% API 28 | -export([start_link/0]). 29 | -export([start_sink/2, 30 | terminate_sink/1]). 31 | 32 | %% Supervisor callbacks 33 | -export([init/1]). 34 | 35 | %%%=================================================================== 36 | %%% API functions 37 | %%%=================================================================== 38 | 39 | %% @doc Start the supervisor. 40 | -spec start_link() -> {ok, pid()} | ignore | {error, term()}. 41 | start_link() -> 42 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 43 | 44 | %% @doc Start a new worker under the supervisor. 45 | -spec start_sink(pid(), list()) -> {ok, pid()}. 46 | start_sink(Owner, Options) -> 47 | supervisor:start_child(?MODULE, [Owner, Options]). 48 | 49 | %% @doc Stop a worker immediately 50 | -spec terminate_sink(pid()) -> ok | {error, term()}. 51 | terminate_sink(Sink) -> 52 | supervisor:terminate_child(?MODULE, Sink). 53 | 54 | %%%=================================================================== 55 | %%% Supervisor callbacks 56 | %%%=================================================================== 57 | 58 | %% @doc Initialize the supervisor. This is a `simple_one_for_one', 59 | %% whose child spec is for starting `riak_kv_mrc_sink' FSMs. 60 | -spec init([]) -> {ok, {{supervisor:strategy(), 61 | pos_integer(), 62 | pos_integer()}, 63 | [ supervisor:child_spec() ]}}. 64 | init([]) -> 65 | RestartStrategy = simple_one_for_one, 66 | MaxRestarts = 1000, 67 | MaxSecondsBetweenRestarts = 3600, 68 | 69 | SupFlags = {RestartStrategy, MaxRestarts, MaxSecondsBetweenRestarts}, 70 | 71 | Restart = temporary, 72 | Shutdown = 2000, 73 | Type = worker, 74 | 75 | AChild = {undefined, % no registered name 76 | {riak_kv_mrc_sink, start_link, []}, 77 | Restart, Shutdown, Type, [riak_kv_mrc_sink]}, 78 | 79 | {ok, {SupFlags, [AChild]}}. 80 | 81 | %%%=================================================================== 82 | %%% Internal functions 83 | %%%=================================================================== 84 | -------------------------------------------------------------------------------- /src/riak_kv_pb_bucket_key_apl.erl: -------------------------------------------------------------------------------- 1 | %% -------------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_pb_bucket_key_apl: Expose Core active preflist functionality to 4 | %% Protocol Buffers 5 | %% 6 | %% Copyright (c) 2015 Basho Technologies, Inc. 7 | %% 8 | %% This file is provided to you under the Apache License, 9 | %% Version 2.0 (the "License"); you may not use this file 10 | %% except in compliance with the License. You may obtain 11 | %% a copy of the License at 12 | %% 13 | %% http://www.apache.org/licenses/LICENSE-2.0 14 | %% 15 | %% Unless required by applicable law or agreed to in writing, 16 | %% software distributed under the License is distributed on an 17 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 18 | %% KIND, either express or implied. See the License for the 19 | %% specific language governing permissions and limitations 20 | %% under the License. 21 | %% 22 | %% -------------------------------------------------------------------------- 23 | 24 | %% @doc

The Bucket-Key Preflist (Primaries & Fallbacks) PB service 25 | %% for Riak Core. This service covers the following request messages in the 26 | %% original protocol:

27 | %% 28 | %%
29 | %% 33 - RpbGetBucketKeyPreflistReq
30 | %% 
31 | %% 32 | %%

This service produces the following responses:

33 | %% 34 | %%
35 | %% 34 - RpbGetBucketKeyPreflistResp
36 | %% 
37 | %% 38 | %% @end 39 | 40 | -module(riak_kv_pb_bucket_key_apl). 41 | 42 | -export([init/0, 43 | decode/2, 44 | encode/1, 45 | process/2, 46 | process_stream/3]). 47 | 48 | -include_lib("riak_pb/include/riak_kv_pb.hrl"). 49 | 50 | init() -> 51 | undefined. 52 | 53 | %% @doc decode/2 callback. Decodes an incoming message. 54 | decode(Code, Bin) when Code == 33 -> 55 | Msg = #rpbgetbucketkeypreflistreq{type =T, bucket =B, key =_Key} = 56 | riak_pb_codec:decode(Code, Bin), 57 | Bucket = riak_kv_pb_bucket:bucket_type(T, B), 58 | {ok, Msg, {"riak_kv.get_preflist", Bucket}}. 59 | 60 | %% @doc encode/1 callback. Encodes an outgoing response message. 61 | encode(Message) -> 62 | {ok, riak_pb_codec:encode(Message)}. 63 | 64 | %% Get bucket-key preflist primaries 65 | process(#rpbgetbucketkeypreflistreq{bucket = <<>>}, State) -> 66 | {error, "Bucket cannot be zero-length", State}; 67 | process(#rpbgetbucketkeypreflistreq{key = <<>>}, State) -> 68 | {error, "Key cannot be zero-length", State}; 69 | process(#rpbgetbucketkeypreflistreq{type = <<>>}, State) -> 70 | {error, "Type cannot be zero-length", State}; 71 | process(#rpbgetbucketkeypreflistreq{type=T, bucket=B0, key =K}, State) -> 72 | B = riak_kv_pb_bucket:maybe_create_bucket_type(T, B0), 73 | Preflist = riak_core_apl:get_apl_ann_with_pnum({B, K}), 74 | PbPreflist = riak_pb_kv_codec:encode_apl_ann(Preflist), 75 | {reply, #rpbgetbucketkeypreflistresp{preflist=PbPreflist}, State}. 76 | 77 | process_stream(_, _, State) -> 78 | {ignore, State}. 79 | -------------------------------------------------------------------------------- /src/riak_kv_pb_csbucket.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_pb_index: Expose secondary index queries to Protocol Buffers 4 | %% 5 | %% Copyright (c) 2013 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | %% @doc

Special service for riak cs. Fold over objects in buckets. 23 | %% This covers the following request messages:

24 | %% 25 | %%
 26 | %%  40 - RpbCSBucketReq
 27 | %% 
28 | %% 29 | %%

This service produces the following responses:

30 | %% 31 | %%
 32 | %%  41 - RpbCSBucketResp
 33 | %% 
34 | %% @end 35 | 36 | -module(riak_kv_pb_csbucket). 37 | 38 | -include_lib("riak_pb/include/riak_kv_pb.hrl"). 39 | -include("riak_kv_index.hrl"). 40 | 41 | -behaviour(riak_api_pb_service). 42 | 43 | -export([init/0, 44 | decode/2, 45 | encode/1, 46 | process/2, 47 | process_stream/3]). 48 | 49 | -record(state, {client, req_id, req, continuation, result_count=0}). 50 | 51 | %% @doc init/0 callback. Returns the service internal start 52 | %% state. 53 | -spec init() -> any(). 54 | init() -> 55 | {ok, C} = riak:local_client(), 56 | #state{client=C}. 57 | 58 | %% @doc decode/2 callback. Decodes an incoming message. 59 | decode(Code, Bin) -> 60 | {ok, riak_pb_codec:decode(Code, Bin)}. 61 | 62 | %% @doc encode/1 callback. Encodes an outgoing response message. 63 | encode(Message) -> 64 | {ok, riak_pb_codec:encode(Message)}. 65 | 66 | process(Req=#rpbcsbucketreq{}, State) -> 67 | #rpbcsbucketreq{start_key=StartKey, 68 | start_incl=StartIncl, continuation=Continuation, 69 | end_key=EndKey, end_incl=EndIncl} = Req, 70 | Query = riak_index:to_index_query([ 71 | {field,<<"$bucket">>}, 72 | {start_term, StartKey}, 73 | {start_inclusive, StartIncl}, 74 | {end_term, EndKey}, 75 | {end_inclusive, EndIncl}, 76 | {start_key, StartKey}, 77 | {return_body, true}, 78 | {continuation, Continuation} 79 | ]), 80 | maybe_perform_query(Query, Req, State). 81 | 82 | maybe_perform_query({error, Reason}, _Req, State) -> 83 | {error, {format, Reason}, State}; 84 | maybe_perform_query({ok, Query}, Req, State) -> 85 | #rpbcsbucketreq{type=T, bucket=B, max_results=MaxResults, timeout=Timeout} = Req, 86 | #state{client=Client} = State, 87 | Bucket = maybe_bucket_type(T, B), 88 | Opts = riak_index:add_timeout_opt(Timeout, [{max_results, MaxResults}, 89 | {pagination_sort, true}]), 90 | {ok, ReqId, _FSMPid} = 91 | riak_client:stream_get_index(Bucket, Query, Opts, Client), 92 | {reply, {stream, ReqId}, State#state{req_id=ReqId, req=Req}}. 93 | 94 | %% @doc process_stream/3 callback. Handle streamed responses 95 | process_stream({ReqId, done}, ReqId, State=#state{req_id=ReqId, 96 | continuation=Continuation, 97 | req=Req, 98 | result_count=Count}) -> 99 | %% Only add the continuation if there may be more results to send 100 | #rpbcsbucketreq{max_results=MaxResults} = Req, 101 | Resp = case is_integer(MaxResults) andalso Count >= MaxResults of 102 | true -> #rpbcsbucketresp{done=1, continuation=Continuation}; 103 | false -> #rpbcsbucketresp{done=1} 104 | end, 105 | {done, Resp, State}; 106 | process_stream({ReqId, {results, []}}, ReqId, State=#state{req_id=ReqId}) -> 107 | {ignore, State}; 108 | process_stream({ReqId, {results, Results0}}, ReqId, State=#state{req_id=ReqId, req=Req, result_count=Count}) -> 109 | #rpbcsbucketreq{max_results=MaxResults, bucket=Bucket} = Req, 110 | Count2 = length(Results0) + Count, 111 | %% results are {o, Key, Binary} where binary is a riak object 112 | Continuation = make_continuation(MaxResults, lists:last(Results0), Count2), 113 | Results = [encode_result(Bucket, {K, V}) || {o, K, V} <- Results0], 114 | {reply, #rpbcsbucketresp{objects=Results}, 115 | State#state{continuation=Continuation, result_count=Count2}}; 116 | process_stream({ReqId, Error}, ReqId, State=#state{req_id=ReqId}) -> 117 | {error, {format, Error}, State#state{req_id=undefined}}; 118 | process_stream(_,_,State) -> 119 | {ignore, State}. 120 | 121 | encode_result(B, {K, V}) -> 122 | RObj = riak_object:from_binary(B, K, V), 123 | Contents = riak_pb_kv_codec:encode_contents(riak_object:get_contents(RObj)), 124 | VClock = pbify_rpbvc(riak_object:vclock(RObj)), 125 | GetResp = #rpbgetresp{vclock=VClock, content=Contents}, 126 | #rpbindexobject{key=K, object=GetResp}. 127 | 128 | pbify_rpbvc(Vc) -> 129 | riak_object:encode_vclock(Vc). 130 | 131 | make_continuation(MaxResults, {o, K, _V}, MaxResults) -> 132 | riak_index:make_continuation([K]); 133 | make_continuation(_, _, _) -> 134 | undefined. 135 | 136 | %% Construct a {Type, Bucket} tuple, if not working with the default bucket 137 | maybe_bucket_type(undefined, B) -> 138 | B; 139 | maybe_bucket_type(<<"default">>, B) -> 140 | B; 141 | maybe_bucket_type(T, B) -> 142 | {T, B}. 143 | -------------------------------------------------------------------------------- /src/riak_kv_reader.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_reader: Process for queueing and applying read repairs 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | %% @doc Queue any read request originating from this node. This is intended 22 | %% for background read requests to trigger read repair. 23 | 24 | 25 | -module(riak_kv_reader). 26 | -ifdef(TEST). 27 | -include_lib("eunit/include/eunit.hrl"). 28 | -export([start_link/1]). 29 | -endif. 30 | 31 | -behaviour(riak_kv_queue_manager). 32 | 33 | -define(QUEUE_LIMIT, 100000). 34 | -define(OVERFLOW_LIMIT, 10000000). 35 | -define(REDO_TIMEOUT, 2000). 36 | -define(OVERLOAD_PAUSE_MS, 10000). 37 | 38 | -export([start_link/0, 39 | start_job/1, 40 | request_read/1, 41 | request_read/2, 42 | read_stats/0, 43 | read_stats/1, 44 | clear_queue/0, 45 | clear_queue/1, 46 | stop_job/1]). 47 | 48 | -export([action/2, 49 | get_limits/0, 50 | redo/0]). 51 | 52 | -type read_reference() :: {riak_object:bucket(), riak_object:key()}. 53 | -type job_id() :: pos_integer(). 54 | 55 | -export_type([read_reference/0, job_id/0]). 56 | 57 | %%%============================================================================ 58 | %%% API 59 | %%%============================================================================ 60 | 61 | -spec start_link() -> {ok, pid()}. 62 | start_link() -> 63 | start_link(app_helper:get_env(riak_kv, reader_dataroot)). 64 | 65 | start_link(FilePath) -> 66 | riak_kv_queue_manager:start_link(?MODULE, FilePath). 67 | 68 | -spec start_job(job_id()) -> {ok, pid()}. 69 | %% @doc 70 | %% To be used when starting a reader for a specific workload 71 | start_job(JobID) -> 72 | start_job(JobID, app_helper:get_env(riak_kv, reader_dataroot)). 73 | 74 | start_job(JobID, FilePath) -> 75 | riak_kv_queue_manager:start_job(JobID, ?MODULE, FilePath). 76 | 77 | -spec request_read(read_reference()) -> ok. 78 | request_read(ReadReference) -> 79 | request_read(?MODULE, ReadReference). 80 | 81 | -spec request_read(pid()|module(), read_reference()) -> ok. 82 | request_read(Pid, ReadReference) -> 83 | riak_kv_queue_manager:request(Pid, ReadReference). 84 | 85 | -spec read_stats() -> 86 | list({atom(), non_neg_integer()|riak_kv_overflow_queue:queue_stats()}). 87 | read_stats() -> read_stats(?MODULE). 88 | 89 | -spec read_stats(pid()|module()) -> 90 | list({atom(), non_neg_integer()|riak_kv_overflow_queue:queue_stats()}). 91 | read_stats(Pid) -> 92 | riak_kv_queue_manager:stats(Pid). 93 | 94 | -spec clear_queue() -> ok. 95 | clear_queue() -> clear_queue(?MODULE). 96 | 97 | -spec clear_queue(pid()|module()) -> ok. 98 | clear_queue(Reader) -> 99 | riak_kv_queue_manager:clear_queue(Reader). 100 | 101 | %% @doc 102 | %% Stop the job once the queue is empty 103 | -spec stop_job(pid()) -> ok. 104 | stop_job(Pid) -> 105 | riak_kv_queue_manager:stop_job(Pid). 106 | 107 | %%%============================================================================ 108 | %%% Callback functions 109 | %%%============================================================================ 110 | 111 | -spec get_limits() -> {pos_integer(), pos_integer(), pos_integer()}. 112 | get_limits() -> 113 | RedoTimeout = 114 | app_helper:get_env(riak_kv, reader_redo_timeout, ?REDO_TIMEOUT), 115 | QueueLimit = 116 | app_helper:get_env(riak_kv, reader_queue_limit, ?QUEUE_LIMIT), 117 | OverflowLimit = 118 | app_helper:get_env(riak_kv, reader_overflow_limit, ?OVERFLOW_LIMIT), 119 | {RedoTimeout, QueueLimit, OverflowLimit}. 120 | 121 | %% @doc 122 | %% Attempt to read the object 123 | -spec action(read_reference(), boolean()) -> boolean(). 124 | action({B, K}, _Redo) -> 125 | {ok, C} = riak:local_client(), 126 | case riak_kv_util:consistent_object(B) of 127 | true -> 128 | _ = riak_kv_exchange_fsm:repair_consistent({B, K}); 129 | false -> 130 | _ = riak_client:get(B, K, C) 131 | end, 132 | true. 133 | 134 | -spec redo() -> boolean(). 135 | redo() -> true. 136 | -------------------------------------------------------------------------------- /src/riak_kv_reformat.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2013 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | -module(riak_kv_reformat). 21 | 22 | -export([run/2]). 23 | 24 | run(ObjectVsn, Opts) -> 25 | Concurrency = proplists:get_value(concurrency, Opts, 2), 26 | KillHandoffs = proplists:get_value(kill_handoffs, Opts, true), 27 | lager:info("Starting object reformat with concurrency: ~p", [Concurrency]), 28 | lager:info("Setting preferred object format to ~p", [ObjectVsn]), 29 | set_capabilities(ObjectVsn), 30 | lager:info("Preferred object format set to ~p", [ObjectVsn]), 31 | case KillHandoffs of 32 | true -> 33 | lager:info("Killing any inbound and outbound handoffs", []); 34 | false -> 35 | lager:info("Waiting on any in-flight inbound and outbound handoffs", []) 36 | end, 37 | kill_or_wait_on_handoffs(KillHandoffs, 0), 38 | 39 | %% migrate each running vnode 40 | Running = riak_core_vnode_manager:all_vnodes(riak_kv_vnode), 41 | Counts = riak_core_util:pmap(fun({riak_kv_vnode, Idx, _}) -> 42 | lager:info("Reformatting objects on partition ~p", 43 | [Idx]), 44 | {S, I, E} = reformat_partition(Idx), 45 | lager:info("Completed reformatting objects on " 46 | "partition ~p. Success: ~p. Ignored: ~p. " 47 | "Error: ~p", [Idx, S, I, E]), 48 | {S, I, E} 49 | end, 50 | Running, Concurrency), 51 | {SuccessCounts, IgnoredCounts, ErrorCounts} = lists:unzip3(Counts), 52 | SuccessTotal = lists:sum(SuccessCounts), 53 | IgnoredTotal = lists:sum(IgnoredCounts), 54 | ErrorTotal = lists:sum(ErrorCounts), 55 | lager:info("Completed reformating all partitions to ~p. Success: ~p. Ignored: ~p. Error: ~p", 56 | [ObjectVsn, SuccessTotal, IgnoredTotal, ErrorTotal]), 57 | if ErrorTotal > 0 -> 58 | lager:info("There were errors reformatting ~p keys. Re-run before dowgrading", 59 | [ErrorTotal]); 60 | true -> ok 61 | end, 62 | {SuccessTotal, IgnoredTotal, ErrorTotal}. 63 | 64 | %% set preferred object format to desired version. Although we could just 65 | %% switch the preference order, removing other versions premptively 66 | %% downgrades the whole cluster (after ring convergence) reducing the 67 | %% amount of data needing to be reformatted on other nodes (under the 68 | %% assumption those other nodes will be downgraded as well) 69 | set_capabilities(Vsn) -> 70 | riak_core_capability:register({riak_kv, object_format}, 71 | [Vsn], 72 | Vsn). 73 | 74 | 75 | kill_or_wait_on_handoffs(true, _) -> 76 | riak_core_handoff_manager:kill_handoffs(); 77 | kill_or_wait_on_handoffs(false, CheckCount) -> 78 | case num_running_handoffs() of 79 | 0 -> kill_or_wait_on_handoffs(true, CheckCount); 80 | N -> 81 | case CheckCount rem 10 of 82 | 0 -> lager:info("~p handoffs still outstanding", [N]); 83 | _ -> ok 84 | end, 85 | timer:sleep(1000), 86 | kill_or_wait_on_handoffs(false, CheckCount+1) 87 | end. 88 | 89 | reformat_partition(Idx) -> 90 | riak_kv_vnode:fold({Idx, node()}, 91 | fun(BKey,Value,Acc) -> reformat_object(Idx,BKey,Value,Acc) end, 92 | {0,0,0}). 93 | 94 | reformat_object(Idx, BKey, Value, {SuccessCount, IgnoredCount, ErrorCount}) -> 95 | case riak_object:binary_version(Value) of 96 | v0 -> {SuccessCount, IgnoredCount+1, ErrorCount}; 97 | %% TODO: accumulate and handle errors 98 | _ -> 99 | case riak_kv_vnode:reformat_object(Idx, BKey) of 100 | ok -> {SuccessCount+1, IgnoredCount, ErrorCount}; 101 | {error, not_found} -> {SuccessCount, IgnoredCount+1, ErrorCount}; 102 | {error, _} -> {SuccessCount, IgnoredCount, ErrorCount+1} 103 | end 104 | end. 105 | 106 | num_running_handoffs() -> 107 | Receivers=supervisor:count_children(riak_core_handoff_receiver_sup), 108 | Senders=supervisor:count_children(riak_core_handoff_sender_sup), 109 | ActiveReceivers=proplists:get_value(active,Receivers), 110 | ActiveSenders=proplists:get_value(active,Senders), 111 | ActiveSenders+ActiveReceivers. 112 | -------------------------------------------------------------------------------- /src/riak_kv_stat_worker.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2013 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(riak_kv_stat_worker). 22 | -behaviour(gen_server). 23 | 24 | %% API 25 | -export([update/1]). 26 | 27 | %% gen_server callbacks 28 | -export([init/1, handle_call/3, handle_cast/2, handle_info/2, 29 | terminate/2, code_change/3]). 30 | 31 | -record(state, {}). 32 | 33 | %%%=================================================================== 34 | %%% API 35 | %%%=================================================================== 36 | 37 | update(Arg) -> 38 | sidejob:unbounded_cast(riak_kv_stat_sj, {update, Arg}). 39 | 40 | %%%=================================================================== 41 | %%% gen_server callbacks 42 | %%%=================================================================== 43 | 44 | init([_Name]) -> 45 | {ok, #state{}}. 46 | 47 | handle_call(_Request, _From, State) -> 48 | {reply, ok, State}. 49 | 50 | handle_cast({update, Arg}, State) -> 51 | try riak_kv_stat:perform_update(Arg) catch Class:Error -> 52 | riak_kv_stat:stat_update_error(Arg, Class, Error) 53 | end, 54 | {noreply, State}. 55 | handle_info(_Info, State) -> 56 | {noreply, State}. 57 | 58 | terminate(_Reason, _State) -> 59 | ok. 60 | 61 | code_change(_OldVsn, State, _Extra) -> 62 | {ok, State}. 63 | -------------------------------------------------------------------------------- /src/riak_kv_status.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Riak: A lightweight, decentralized key-value store. 4 | %% 5 | %% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | -module(riak_kv_status). 23 | 24 | -export([statistics/0, 25 | get_stats/1, 26 | ringready/0, 27 | transfers/0, 28 | vnode_status/0, 29 | fixed_index_status/0]). 30 | 31 | -include("riak_kv_vnode.hrl"). 32 | 33 | %% =================================================================== 34 | %% Public API 35 | %% =================================================================== 36 | 37 | -spec(statistics() -> [any()]). 38 | statistics() -> 39 | get_stats(console). 40 | 41 | ringready() -> 42 | riak_core_status:ringready(). 43 | 44 | transfers() -> 45 | riak_core_status:transfers(). 46 | 47 | %% @doc Get status information about the node local vnodes. 48 | -spec vnode_status() -> [{atom(), term()}]. 49 | vnode_status() -> 50 | %% Get the kv vnode indexes and the associated pids for the node. 51 | PrefLists = riak_core_vnode_manager:all_index_pid(riak_kv_vnode), 52 | riak_kv_vnode:vnode_status(PrefLists). 53 | 54 | %% @doc Get status of 2i reformat. If the backend requires reformatting, a boolean 55 | %% value is returned indicating if all partitions on the node have completed 56 | %% upgrading (if downgrading then false indicates all partitions have been downgraded. 57 | %% If the backend does not require reformatting, undefined is returned 58 | -spec fixed_index_status() -> boolean() | undefined. 59 | fixed_index_status() -> 60 | Backend = app_helper:get_env(riak_kv, storage_backend), 61 | fixed_index_status(Backend). 62 | 63 | fixed_index_status(Affected) when Affected =:= riak_kv_eleveldb_backend orelse 64 | Affected =:= riak_kv_multi_backend -> 65 | Statuses = vnode_status(), 66 | fixed_index_status(Affected, Statuses); 67 | fixed_index_status(_) -> 68 | undefined. 69 | 70 | fixed_index_status(Affected, Statuses) -> 71 | lists:foldl(fun(Elem, Acc) -> Acc andalso are_indexes_fixed(Affected, Elem) end, 72 | true, Statuses). 73 | 74 | are_indexes_fixed(riak_kv_eleveldb_backend, {_Idx, [{backend_status,_,Status}]}) -> 75 | are_indexes_fixed(riak_kv_eleveldb_backend, Status); 76 | are_indexes_fixed(riak_kv_eleveldb_backend, Status) -> 77 | case proplists:get_value(fixed_indexes, Status) of 78 | Bool when is_boolean(Bool) -> Bool; 79 | _ -> false 80 | end; 81 | are_indexes_fixed(riak_kv_multi_backend, {_Idx, [{backend_status,_,Status}]}) -> 82 | Statuses = [S || {_, S} <- Status, lists:member({mod, riak_kv_eleveldb_backend}, Status)], 83 | fixed_index_status(riak_kv_eleveldb_backend, Statuses). 84 | 85 | get_stats(web) -> 86 | aliases() 87 | ++ expand_disk_stats(riak_kv_stat_bc:disk_stats()) 88 | ++ riak_kv_stat_bc:app_stats(); 89 | get_stats(console) -> 90 | aliases() 91 | ++ riak_kv_stat_bc:disk_stats() 92 | ++ riak_kv_stat_bc:app_stats(). 93 | 94 | aliases() -> 95 | Grouped = exometer_alias:prefix_foldl( 96 | <<>>, 97 | fun(Alias, Entry, DP, Acc) -> 98 | orddict:append(Entry, {DP, Alias}, Acc) 99 | end, orddict:new()), 100 | lists:keysort( 101 | 1, 102 | lists:foldl( 103 | fun({K, DPs}, Acc) -> 104 | case exometer:get_value(K, [D || {D,_} <- DPs]) of 105 | {ok, Vs} when is_list(Vs) -> 106 | lists:foldr(fun({D,V}, Acc1) -> 107 | {_,N} = lists:keyfind(D,1,DPs), 108 | [{N,V}|Acc1] 109 | end, Acc, Vs); 110 | Other -> 111 | Val = case Other of 112 | {ok, disabled} -> undefined; 113 | _ -> 0 114 | end, 115 | lists:foldr(fun({_,N}, Acc1) -> 116 | [{N,Val}|Acc1] 117 | end, Acc, DPs) 118 | end 119 | end, [], orddict:to_list(Grouped))). 120 | 121 | 122 | expand_disk_stats([{disk, Stats}]) -> 123 | [{disk, [{struct, [{id, list_to_binary(Id)}, {size, Size}, {used, Used}]} 124 | || {Id, Size, Used} <- Stats]}]. 125 | 126 | -------------------------------------------------------------------------------- /src/riak_kv_sup.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_sup: supervise the core Riak services 4 | %% 5 | %% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | %% @doc supervise the core Riak services 24 | 25 | -module(riak_kv_sup). 26 | 27 | 28 | -behaviour(supervisor). 29 | 30 | -export([start_link/0]). 31 | -export([init/1]). 32 | 33 | -define (IF (Bool, A, B), if Bool -> A; true -> B end). 34 | 35 | %% @spec start_link() -> ServerRet 36 | %% @doc API for starting the supervisor. 37 | start_link() -> 38 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 39 | 40 | %% @spec init([]) -> SupervisorTree 41 | %% @doc supervisor callback. 42 | init([]) -> 43 | riak_kv_entropy_info:create_table(), 44 | riak_kv_hooks:create_table(), 45 | VMaster = {riak_kv_vnode_master, 46 | {riak_core_vnode_master, start_link, 47 | [riak_kv_vnode, riak_kv_legacy_vnode, riak_kv]}, 48 | permanent, 5000, worker, [riak_core_vnode_master]}, 49 | HTTPCache = {riak_kv_http_cache, 50 | {riak_kv_http_cache, start_link, []}, 51 | permanent, 5000, worker, [riak_kv_http_cache]}, 52 | FastPutSup = {riak_kv_w1c_sup, 53 | {riak_kv_w1c_sup, start_link, []}, 54 | permanent, infinity, supervisor, [riak_kv_w1c_sup]}, 55 | DeleteSup = {riak_kv_delete_sup, 56 | {riak_kv_delete_sup, start_link, []}, 57 | permanent, infinity, supervisor, [riak_kv_delete_sup]}, 58 | BucketsFsmSup = {riak_kv_buckets_fsm_sup, 59 | {riak_kv_buckets_fsm_sup, start_link, []}, 60 | permanent, infinity, supervisor, [riak_kv_buckets_fsm_sup]}, 61 | KeysFsmSup = {riak_kv_keys_fsm_sup, 62 | {riak_kv_keys_fsm_sup, start_link, []}, 63 | permanent, infinity, supervisor, [riak_kv_keys_fsm_sup]}, 64 | IndexFsmSup = {riak_kv_index_fsm_sup, 65 | {riak_kv_index_fsm_sup, start_link, []}, 66 | permanent, infinity, supervisor, [riak_kv_index_fsm_sup]}, 67 | ClusterAAEFsmSup = {riak_kv_clusteraae_fsm_sup, 68 | {riak_kv_clusteraae_fsm_sup, start_link, []}, 69 | permanent, infinity, supervisor, [riak_kv_clusteraae_fsm_sup]}, 70 | HotBackupAAEFsmSup = {riak_kv_hotbackup_fsm_sup, 71 | {riak_kv_hotbackup_fsm_sup, start_link, []}, 72 | permanent, infinity, supervisor, [riak_kv_hotbackup_fsm_sup]}, 73 | SinkFsmSup = {riak_kv_mrc_sink_sup, 74 | {riak_kv_mrc_sink_sup, start_link, []}, 75 | permanent, infinity, supervisor, [riak_kv_mrc_sink_sup]}, 76 | EntropyManager = {riak_kv_entropy_manager, 77 | {riak_kv_entropy_manager, start_link, []}, 78 | permanent, 30000, worker, [riak_kv_entropy_manager]}, 79 | TictacFSManager = {riak_kv_ttaaefs_manager, 80 | {riak_kv_ttaaefs_manager, start_link, []}, 81 | permanent, 30000, worker, [riak_kv_ttaaefs_manager]}, 82 | ReplRTQSrc = {riak_kv_replrtq_src, 83 | {riak_kv_replrtq_src, start_link, []}, 84 | permanent, 30000, worker, [riak_kv_replrtq_src]}, 85 | ReplRTQSnk = {riak_kv_replrtq_snk, 86 | {riak_kv_replrtq_snk, start_link, []}, 87 | permanent, 30000, worker, [riak_kv_replrtq_snk]}, 88 | ReplRTQPeer = {riak_kv_replrtq_peer, 89 | {riak_kv_replrtq_peer, start_link, []}, 90 | permanent, 30000, worker, [riak_kv_replrtq_snk]}, 91 | Reaper = {riak_kv_reaper, 92 | {riak_kv_reaper, start_link, []}, 93 | permanent, 30000, worker, [riak_kv_reaper]}, 94 | Eraser = {riak_kv_eraser, 95 | {riak_kv_eraser, start_link, []}, 96 | permanent, 30000, worker, [riak_kv_eraser]}, 97 | Reader = {riak_kv_reader, 98 | {riak_kv_reader, start_link, []}, 99 | permanent, 30000, worker, [riak_kv_reader]}, 100 | 101 | EnsemblesKV = {riak_kv_ensembles, 102 | {riak_kv_ensembles, start_link, []}, 103 | permanent, 30000, worker, [riak_kv_ensembles]}, 104 | 105 | % Figure out which processes we should run... 106 | HasStorageBackend = (app_helper:get_env(riak_kv, storage_backend) /= undefined), 107 | 108 | % Build the process list... 109 | Processes = lists:flatten([ 110 | EntropyManager, 111 | TictacFSManager, 112 | ReplRTQSrc, 113 | ReplRTQSnk, 114 | ReplRTQPeer, 115 | Reaper, 116 | Eraser, 117 | Reader, 118 | ?IF(HasStorageBackend, VMaster, []), 119 | FastPutSup, 120 | DeleteSup, 121 | SinkFsmSup, 122 | BucketsFsmSup, 123 | KeysFsmSup, 124 | IndexFsmSup, 125 | ClusterAAEFsmSup, 126 | HotBackupAAEFsmSup, 127 | [EnsemblesKV || riak_core_sup:ensembles_enabled()], 128 | HTTPCache 129 | ]), 130 | 131 | % Run the proesses... 132 | {ok, {{one_for_one, 10, 10}, Processes}}. 133 | -------------------------------------------------------------------------------- /src/riak_kv_update_hook.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% ------------------------------------------------------------------- 3 | %% 4 | %% Copyright (c) 2016 Basho Technologies, Inc. All Rights Reserved. 5 | %% 6 | %% This file is provided to you under the Apache License, 7 | %% Version 2.0 (the "License"); you may not use this file 8 | %% except in compliance with the License. You may obtain 9 | %% a copy of the License at 10 | %% 11 | %% http://www.apache.org/licenses/LICENSE-2.0 12 | %% 13 | %% Unless required by applicable law or agreed to in writing, 14 | %% software distributed under the License is distributed on an 15 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 16 | %% KIND, either express or implied. See the License for the 17 | %% specific language governing permissions and limitations 18 | %% under the License. 19 | %% 20 | %% ------------------------------------------------------------------- 21 | %% 22 | -module(riak_kv_update_hook). 23 | 24 | -include_lib("riak_core/include/riak_core_vnode.hrl"). 25 | 26 | -export_type([object_pair/0, update_reason/0, repair/0, partition/0, handoff_dest/0]). 27 | 28 | -type object_pair() :: {riak_object:riak_object(), riak_object:riak_object() | no_old_object}. 29 | -type repair() :: full_repair | tree_repair | failed_repair. 30 | -type update_reason() :: 31 | delete 32 | | handoff 33 | | put 34 | | anti_entropy 35 | | {delete, repair()} 36 | | {anti_entropy, repair()} 37 | | {anti_entropy_delete, repair()} 38 | | anti_entropy_delete. 39 | 40 | 41 | %% @doc Update a Riak object, given a reason and partition under which 42 | %% the object is being indexed. The object pair contains the new 43 | %% and old objects, in the case where a read-before-write resulted 44 | %% in an old object. 45 | -callback update( 46 | object_pair(), 47 | update_reason(), 48 | partition() 49 | ) -> 50 | ok. 51 | 52 | %% @doc Update a Riak object encoded as an erlang binary. This function 53 | %% is typically called from the write-once path, where there is no 54 | %% old object to pass. 55 | -callback update_binary( 56 | riak_core_bucket:bucket(), 57 | riak_object:key(), 58 | binary(), 59 | update_reason(), 60 | partition() 61 | ) -> 62 | ok. 63 | 64 | %% @doc Determine whether a bucket requires an existing object, 65 | %% based on its properties. If this function returns true, 66 | %% this may result in a read-before-write in the vnode. 67 | -callback requires_existing_object(riak_kv_bucket:props()) -> 68 | boolean(). 69 | 70 | %% @doc Determine whether handoff should start. 71 | -callback should_handoff(handoff_dest()) -> 72 | boolean(). 73 | -------------------------------------------------------------------------------- /src/riak_kv_w1c_sup.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% Copyright (c) 2015 Basho Technologies, Inc. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | -module(riak_kv_w1c_sup). 20 | 21 | -behaviour(supervisor). 22 | 23 | -export([start_link/0]). 24 | -export([init/1]). 25 | 26 | %%%=================================================================== 27 | %%% API functions 28 | %%%=================================================================== 29 | 30 | -spec(start_link() -> 31 | {ok, Pid :: pid()} | ignore | {error, Reason :: term()}). 32 | start_link() -> 33 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 34 | 35 | %%%=================================================================== 36 | %%% Supervisor callbacks 37 | %%%=================================================================== 38 | 39 | -spec(init(Args :: term()) -> 40 | {ok, {SupFlags :: {RestartStrategy :: supervisor:strategy(), 41 | MaxR :: non_neg_integer(), MaxT :: pos_integer()}, 42 | [ChildSpec :: supervisor:child_spec()] 43 | }}). 44 | init([]) -> 45 | Workers = riak_kv_w1c_worker:workers(), 46 | Children = [ 47 | { 48 | Name, {riak_kv_w1c_worker, start_link, [Name]}, 49 | permanent, 5000, worker, [riak_kv_w1c_worker] 50 | } || Name <- tuple_to_list(Workers)], 51 | {ok, {{one_for_one, 10, 10}, Children}}. 52 | 53 | %%%=================================================================== 54 | %%% Internal functions 55 | %%%=================================================================== 56 | -------------------------------------------------------------------------------- /src/riak_kv_wm_ping.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_wm_ping: simple Webmachine resource for availability test 4 | %% 5 | %% Copyright (c) 2007-2013 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | %% @doc simple Webmachine resource for availability test 24 | 25 | -module(riak_kv_wm_ping). 26 | 27 | %% webmachine resource exports 28 | -export([ 29 | init/1, 30 | is_authorized/2, 31 | to_html/2 32 | ]). 33 | 34 | -include_lib("webmachine/include/webmachine.hrl"). 35 | 36 | init([]) -> 37 | {ok, undefined}. 38 | 39 | is_authorized(ReqData, Ctx) -> 40 | case riak_api_web_security:is_authorized(ReqData) of 41 | false -> 42 | {"Basic realm=\"Riak\"", ReqData, Ctx}; 43 | {true, _SecContext} -> 44 | {true, ReqData, Ctx}; 45 | insecure -> 46 | %% XXX 301 may be more appropriate here, but since the http and 47 | %% https port are different and configurable, it is hard to figure 48 | %% out the redirect URL to serve. 49 | {{halt, 426}, wrq:append_to_resp_body(<<"Security is enabled and " 50 | "Riak does not accept credentials over HTTP. Try HTTPS " 51 | "instead.">>, ReqData), Ctx} 52 | end. 53 | 54 | to_html(ReqData, Ctx) -> 55 | {"OK", ReqData, Ctx}. 56 | -------------------------------------------------------------------------------- /src/riak_kv_wm_raw.hrl: -------------------------------------------------------------------------------- 1 | %% This file is provided to you under the Apache License, 2 | %% Version 2.0 (the "License"); you may not use this file 3 | %% except in compliance with the License. You may obtain 4 | %% a copy of the License at 5 | 6 | %% http://www.apache.org/licenses/LICENSE-2.0 7 | 8 | %% Unless required by applicable law or agreed to in writing, 9 | %% software distributed under the License is distributed on an 10 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 11 | %% KIND, either express or implied. See the License for the 12 | %% specific language governing permissions and limitations 13 | %% under the License. 14 | 15 | %% Constants used by the raw_http resources 16 | 17 | %% Names of riak_object metadata fields 18 | -define(MD_CTYPE, <<"content-type">>). 19 | -define(MD_CHARSET, <<"charset">>). 20 | -define(MD_ENCODING, <<"content-encoding">>). 21 | -define(MD_VTAG, <<"X-Riak-VTag">>). 22 | -define(MD_LINKS, <<"Links">>). 23 | -define(MD_LASTMOD, <<"X-Riak-Last-Modified">>). 24 | -define(MD_USERMETA, <<"X-Riak-Meta">>). 25 | -define(MD_INDEX, <<"index">>). 26 | -define(MD_DELETED, <<"X-Riak-Deleted">>). 27 | -define(MD_VAL_ENCODING, <<"X-Riak-Val-Encoding">>). 28 | 29 | %% Names of HTTP header fields 30 | -define(HEAD_CTYPE, "Content-Type"). 31 | -define(HEAD_VCLOCK, "X-Riak-Vclock"). 32 | -define(HEAD_LINK, "Link"). 33 | -define(HEAD_ENCODING, "Content-Encoding"). 34 | -define(HEAD_CLIENT, "X-Riak-ClientId"). 35 | -define(HEAD_USERMETA_PREFIX, "x-riak-meta-"). 36 | -define(HEAD_INDEX_PREFIX, "x-riak-index-"). 37 | -define(HEAD_DELETED, "X-Riak-Deleted"). 38 | -define(HEAD_TIMEOUT, "X-Riak-Timeout"). 39 | -define(HEAD_CRDT_CONTEXT, "X-Riak-CRDT-Ctx"). 40 | -define(HEAD_IF_NOT_MODIFIED, "X-Riak-If-Not-Modified"). 41 | 42 | %% Names of JSON fields in bucket properties 43 | -define(JSON_PROPS, <<"props">>). 44 | -define(JSON_BUCKETS, <<"buckets">>). 45 | -define(JSON_KEYS, <<"keys">>). 46 | -define(JSON_LINKFUN, <<"linkfun">>). 47 | -define(JSON_MOD, <<"mod">>). 48 | -define(JSON_FUN, <<"fun">>). 49 | -define(JSON_ARG, <<"arg">>). 50 | -define(JSON_CHASH, <<"chash_keyfun">>). 51 | -define(JSON_JSFUN, <<"jsfun">>). 52 | -define(JSON_JSANON, <<"jsanon">>). 53 | -define(JSON_JSBUCKET, <<"bucket">>). 54 | -define(JSON_JSKEY, <<"key">>). 55 | -define(JSON_ALLOW_MULT, <<"allow_mult">>). 56 | -define(JSON_EXTRACT, <<"search_extractor">>). 57 | -define(JSON_EXTRACT_LEGACY, <<"rs_extractfun">>). 58 | -define(JSON_DATATYPE, <<"datatype">>). 59 | -define(JSON_HLL_PRECISION, <<"hll_precision">>). 60 | 61 | %% Names of HTTP query parameters 62 | -define(Q_PROPS, "props"). 63 | -define(Q_BUCKETS, "buckets"). 64 | -define(Q_KEYS, "keys"). 65 | -define(Q_FALSE, "false"). 66 | -define(Q_TRUE, "true"). 67 | -define(Q_STREAM, "stream"). 68 | -define(Q_VTAG, "vtag"). 69 | -define(Q_RETURNBODY, "returnbody"). 70 | -define(Q_2I_RETURNTERMS, "return_terms"). 71 | -define(Q_2I_MAX_RESULTS, "max_results"). 72 | -define(Q_2I_TERM_REGEX, "term_regex"). 73 | -define(Q_2I_CONTINUATION, "continuation"). 74 | -define(Q_2I_PAGINATION_SORT, "pagination_sort"). 75 | -define(Q_RESULTS, "results"). 76 | -define(Q_RETURNVALUE, "returnvalue"). 77 | -define(Q_2I_MAPFOLD, "mapfold"). 78 | -define(Q_MF_MAPFOLDMOD, "mapfoldmod"). 79 | -define(Q_MF_MAPFOLDOPTS, "mapfoldoptions"). 80 | -define(Q_AAEFOLD_FILTER, "filter"). 81 | -define(Q_OBJECT_FORMAT, "object_format"). 82 | -define(Q_NVAL, "nval"). 83 | -------------------------------------------------------------------------------- /src/riak_kv_wm_stats.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% riak_kv_wm_stats: publishing Riak runtime stats via HTTP 4 | %% 5 | %% Copyright (c) 2007-2013 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | -module(riak_kv_wm_stats). 24 | 25 | %% webmachine resource exports 26 | -export([ 27 | init/1, 28 | encodings_provided/2, 29 | content_types_provided/2, 30 | service_available/2, 31 | forbidden/2, 32 | produce_body/2, 33 | pretty_print/2 34 | ]). 35 | -export([get_stats/0]). 36 | 37 | -include_lib("webmachine/include/webmachine.hrl"). 38 | 39 | -record(ctx, {}). 40 | 41 | init(_) -> 42 | {ok, #ctx{}}. 43 | 44 | %% @spec encodings_provided(webmachine:wrq(), context()) -> 45 | %% {[encoding()], webmachine:wrq(), context()} 46 | %% @doc Get the list of encodings this resource provides. 47 | %% "identity" is provided for all methods, and "gzip" is 48 | %% provided for GET as well 49 | encodings_provided(ReqData, Context) -> 50 | case wrq:method(ReqData) of 51 | 'GET' -> 52 | {[{"identity", fun(X) -> X end}, 53 | {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}; 54 | _ -> 55 | {[{"identity", fun(X) -> X end}], ReqData, Context} 56 | end. 57 | 58 | %% @spec content_types_provided(webmachine:wrq(), context()) -> 59 | %% {[ctype()], webmachine:wrq(), context()} 60 | %% @doc Get the list of content types this resource provides. 61 | %% "application/json" and "text/plain" are both provided 62 | %% for all requests. "text/plain" is a "pretty-printed" 63 | %% version of the "application/json" content. 64 | content_types_provided(ReqData, Context) -> 65 | {[{"application/json", produce_body}, 66 | {"text/plain", pretty_print}], 67 | ReqData, Context}. 68 | 69 | 70 | service_available(ReqData, Ctx) -> 71 | {true, ReqData, Ctx}. 72 | 73 | forbidden(RD, Ctx) -> 74 | {riak_kv_wm_utils:is_forbidden(RD), RD, Ctx}. 75 | 76 | produce_body(ReqData, Ctx) -> 77 | Stats= riak_kv_http_cache:get_stats(), 78 | Body = mochijson2:encode({struct, Stats}), 79 | {Body, ReqData, Ctx}. 80 | 81 | %% @spec pretty_print(webmachine:wrq(), context()) -> 82 | %% {string(), webmachine:wrq(), context()} 83 | %% @doc Format the respons JSON object is a "pretty-printed" style. 84 | pretty_print(RD1, C1=#ctx{}) -> 85 | {Json, RD2, C2} = produce_body(RD1, C1), 86 | {json_pp:print(binary_to_list(list_to_binary(Json))), RD2, C2}. 87 | 88 | 89 | get_stats() -> 90 | riak_kv_status:get_stats(web). 91 | -------------------------------------------------------------------------------- /src/riak_kv_worker.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | %% @doc This module uses the riak_core_vnode_worker behavior to perform 22 | %% different riak_kv tasks asynchronously. 23 | 24 | -module(riak_kv_worker). 25 | -behaviour(riak_core_vnode_worker). 26 | 27 | -export([init_worker/3, 28 | handle_work/3]). 29 | 30 | -include_lib("riak_kv_vnode.hrl"). 31 | 32 | -record(state, {index :: partition()}). 33 | 34 | %% =================================================================== 35 | %% Public API 36 | %% =================================================================== 37 | 38 | %% @doc Initialize the worker. Currently only the VNode index 39 | %% parameter is used. 40 | init_worker(VNodeIndex, _Args, _Props) -> 41 | {ok, #state{index=VNodeIndex}}. 42 | 43 | %% @doc Perform the asynchronous fold operation. 44 | handle_work({fold, FoldFun, FinishFun}, _Sender, State) -> 45 | try 46 | FinishFun(FoldFun()) 47 | catch 48 | throw:receiver_down -> ok; 49 | throw:stop_fold -> ok; 50 | throw:PrematureAcc -> FinishFun(PrematureAcc) 51 | end, 52 | {noreply, State}. 53 | -------------------------------------------------------------------------------- /src/sms.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% sms: Streaming merge sort 4 | %% 5 | %% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved. 6 | %% 7 | %% This file is provided to you under the Apache License, 8 | %% Version 2.0 (the "License"); you may not use this file 9 | %% except in compliance with the License. You may obtain 10 | %% a copy of the License at 11 | %% 12 | %% http://www.apache.org/licenses/LICENSE-2.0 13 | %% 14 | %% Unless required by applicable law or agreed to in writing, 15 | %% software distributed under the License is distributed on an 16 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 17 | %% KIND, either express or implied. See the License for the 18 | %% specific language governing permissions and limitations 19 | %% under the License. 20 | %% 21 | %% ------------------------------------------------------------------- 22 | 23 | %% 24 | %% @doc Streaming merge sort 25 | 26 | -module(sms). 27 | 28 | -define(DICTMODULE, orddict). 29 | 30 | -export([new/1, 31 | add_results/3, 32 | done/1, 33 | sms/1]). 34 | 35 | -export_type([sms/0]). 36 | 37 | -opaque sms() :: ?DICTMODULE:?DICTMODULE(). 38 | 39 | %% @doc create a new sms buffer for the given covering set 40 | %% of `Vnodes' 41 | -spec new([non_neg_integer()]) -> sms(). 42 | new(Vnodes) -> 43 | DictList = [{VnodeID, {active,[]}} || VnodeID <- Vnodes], 44 | ?DICTMODULE:from_list(DictList). 45 | 46 | %% @doc Append `Results' to existing buffer for `VnodeID' in 47 | %% `Data' 48 | -spec add_results(non_neg_integer(), list() | atom(), sms()) -> sms(). 49 | add_results(VnodeID, done, Data) -> 50 | UpdateFun = fun({_, Prev}) -> {done, Prev} end, 51 | update(VnodeID, UpdateFun, Data); 52 | add_results(VnodeID, Results, Data) -> 53 | UpdateFun = fun ({active, Prev}) -> {active, Prev ++ Results} end, 54 | update(VnodeID, UpdateFun, Data). 55 | 56 | %% @private 57 | update(VnodeID, UpdateFun, Data) -> 58 | ?DICTMODULE:update(VnodeID, UpdateFun, Data). 59 | 60 | %% @doc get all data in buffer, for all vnodes, merged 61 | -spec done(sms()) -> [term()]. 62 | done(Data) -> 63 | Values = values(Data), 64 | lists:merge(Values). 65 | 66 | 67 | %% @doc perform the streaming merge sort over given `Data:sms()' 68 | %% returns a two tuple of {`MergedReadyToSendResults::[term()], sms()}, 69 | %% where the first element is the merge-sorted data from all vnodes that can 70 | %% be consumed by the client, and `sms()' is a buffer of remaining results. 71 | -spec sms(sms()) -> {[term()] | [], sms()}. 72 | sms(Data) -> 73 | Vals = values(Data), 74 | case any_empty(Vals) orelse Vals == [] of 75 | true -> 76 | {[], Data}; 77 | false -> 78 | unsafe_sms(Data) 79 | end. 80 | 81 | %% @private, perform the merge 82 | unsafe_sms(Data) -> 83 | MinOfLastsOfLists = lists:min([lists:last(List) || List <- values(Data)]), 84 | SplitFun = fun (Elem) -> Elem =< MinOfLastsOfLists end, 85 | Split = ?DICTMODULE:map(fun (_Key, {Status, V}) -> {Status, lists:splitwith(SplitFun, V)} end, Data), 86 | LessThan = ?DICTMODULE:map(fun (_Key, {Status, V}) -> {Status, element(1, V)} end, Split), 87 | GreaterThan = ?DICTMODULE:map(fun (_Key, {Status, V}) -> {Status, element(2, V)} end, Split), 88 | Merged = lists:merge(values(LessThan)), 89 | {Merged, GreaterThan}. 90 | 91 | %% @private 92 | values(Data) -> 93 | %% Don't make the SMS wait forever for vnodes that are done 94 | [V || {_Key, {_Status, V}=T} <- ?DICTMODULE:to_list(Data), T /= {done, []}]. 95 | 96 | %% @private 97 | empty([]) -> true; 98 | empty(_) -> false. 99 | 100 | %% @private 101 | any_empty(Lists) -> 102 | lists:any(fun empty/1, Lists). 103 | -------------------------------------------------------------------------------- /src/stacktrace.hrl: -------------------------------------------------------------------------------- 1 | %% Originating from Quviq AB 2 | %% Fix to make Erlang programs compile on both OTP20 and OTP21. 3 | %% 4 | %% Get the stack trace in a way that is backwards compatible. Luckily 5 | %% OTP_RELEASE was introduced in the same version as the new preferred way of 6 | %% getting the stack trace. A _catch_/2 macro is provided for consistency in 7 | %% cases where the stack trace is not needed. 8 | %% 9 | %% Example use: 10 | %% try f(...) 11 | %% catch 12 | %% ?_exception_(_, Reason, StackToken) -> 13 | %% case Reason of 14 | %% {fail, Error} -> ok; 15 | %% _ -> {'EXIT', Reason, ?_get_stacktrace_(StackToken)} 16 | %% end 17 | %% end, 18 | 19 | -ifdef(OTP_RELEASE). %% This implies 21 or higher 20 | -define(_exception_(Class, Reason, StackToken), Class:Reason:StackToken). 21 | -define(_get_stacktrace_(StackToken), StackToken). 22 | -define(_current_stacktrace_(), 23 | try 24 | exit('$get_stacktrace') 25 | catch 26 | exit:'$get_stacktrace':__GetCurrentStackTrace -> 27 | __GetCurrentStackTrace 28 | end). 29 | -else. 30 | -define(_exception_(Class, Reason, _), Class:Reason). 31 | -define(_get_stacktrace_(_), erlang:get_stacktrace()). 32 | -define(_current_stacktrace_(), erlang:get_stacktrace()). 33 | -endif. 34 | -------------------------------------------------------------------------------- /test/bad_bitcask_multi.schema: -------------------------------------------------------------------------------- 1 | %% @see bitcask.merge.thresholds.small_file 2 | {mapping, "multi_backend.$name.bitcask.thresholds.small_file", "riak_kv.multi_backend", [ 3 | {datatype, bytesize}, 4 | hidden, 5 | {default, "10MB"} 6 | ]}. 7 | -------------------------------------------------------------------------------- /test/put_fsm_precommit.js: -------------------------------------------------------------------------------- 1 | function precommit_noop(object) 2 | { 3 | return object; 4 | } 5 | 6 | function precommit_append_value(object) 7 | { 8 | upd_data = object.values[[0]].data + "_precommit_hook_was_here"; 9 | object.values[[0]].data = upd_data; 10 | return object; 11 | } 12 | 13 | function precommit_nonobj(object) 14 | { 15 | return "not_an_obj"; 16 | } 17 | 18 | function precommit_fail(object) 19 | { 20 | return "fail"; 21 | } 22 | 23 | function precommit_fail_reason(object) 24 | { 25 | return {"fail":"the hook says no"}; 26 | } 27 | 28 | function precommit_crash(object) 29 | { 30 | throw "wobbler"; 31 | } 32 | 33 | function postcommit_ok(object) 34 | { 35 | return ok; 36 | } 37 | 38 | function postcommit_crash(object) 39 | { 40 | throw "postcommit_crash"; 41 | } -------------------------------------------------------------------------------- /test/riak_kv_entropy_manager_test.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(riak_kv_entropy_manager_test). 22 | 23 | -ifdef(TEST). 24 | -include_lib("eunit/include/eunit.hrl"). 25 | 26 | -define(TM, riak_kv_entropy_manager). 27 | 28 | -export([test_set_aae_throttle/0, test_set_aae_throttle_limits/0]). 29 | 30 | cleanup(TOPid) -> 31 | exit(TOPid, kill), 32 | MonitorRef = erlang:monitor(process, TOPid), 33 | receive 34 | {'DOWN', MonitorRef, _, _, _} -> ok 35 | end. 36 | 37 | setup() -> 38 | {ok, TOPid} = riak_core_table_owner:start_link(), 39 | unlink(TOPid), 40 | riak_core_throttle:init(), 41 | TOPid. 42 | 43 | 44 | simple_throttle_test_() -> 45 | {setup, 46 | fun setup/0, 47 | fun cleanup/1, 48 | [ 49 | {test, ?MODULE, test_set_aae_throttle}, 50 | {test, ?MODULE, test_set_aae_throttle_limits} 51 | ] 52 | }. 53 | 54 | 55 | test_set_aae_throttle() -> 56 | try 57 | _ = ?TM:set_aae_throttle(-4), 58 | error(u) 59 | catch error:function_clause -> 60 | ok; 61 | error:u -> 62 | error(unexpected_success); 63 | _X:_Y -> 64 | error(wrong_exception) 65 | end, 66 | [begin ?TM:set_aae_throttle(V), V = ?TM:get_aae_throttle() end || 67 | V <- [5,6]]. 68 | 69 | test_set_aae_throttle_limits() -> 70 | ?assertError(invalid_throttle_limits, ?TM:set_aae_throttle_limits([])), 71 | ?assertError(invalid_throttle_limits, ?TM:set_aae_throttle_limits([{5,7}])), 72 | ?assertError(invalid_throttle_limits, ?TM:set_aae_throttle_limits([{-1,x}])), 73 | ?assertError(invalid_throttle_limits, ?TM:set_aae_throttle_limits([{-1,0}, {x,7}])), 74 | ok = ?TM:set_aae_throttle_limits([{-1,0}, {100, 500}, {100, 500}, 75 | {100, 500}, {100, 500}, {100, 500}]). 76 | 77 | %% Drat, EUnit + Meck won't work if this test is inside the 78 | %% riak_kv_entropy_manager.erl module. 79 | 80 | side_effects_test_() -> 81 | BigWait = 100, 82 | LittleWait = 10, 83 | {setup, 84 | fun() -> 85 | TOPid = setup(), 86 | meck:new(?TM, [passthrough]), 87 | ?TM:set_aae_throttle_limits([{-1, 0}, 88 | {30, LittleWait}, {50, BigWait}]), 89 | TOPid 90 | end, 91 | fun(TOPid) -> 92 | ?TM:enable_aae_throttle(), 93 | erase(inner_iters), 94 | meck:unload(?TM), 95 | ?TM:set_aae_throttle(0), 96 | cleanup(TOPid) 97 | end, 98 | [ 99 | ?_test( 100 | begin 101 | State0 = ?TM:make_state(), 102 | 103 | ok = verify_mailbox_is_empty(), 104 | put(inner_iters, 1), 105 | 106 | BadRPC_result = {badrpc,{'EXIT',{undef,[{riak_kv_entropy_manager,multicall,[x,x,x],[y,y,y]},{rpc,'-handle_call_call/6-fun-0-',5,[stack,trace,here]}]}}}, 107 | Tests1 = [{fun(_,_,_,_,_) -> {[], [nd]} end, BigWait}, 108 | {fun(_,_,_,_,_) -> {[{0,nd}], [nd]} end, BigWait}, 109 | {fun(_,_,_,_,_) -> {[{666,nd}], []} end, BigWait}, 110 | {fun(_,_,_,_,_) -> { [BadRPC_result], []} end, BigWait}], 111 | Eval = fun({Fun, ExpectThrottle}, St1) -> 112 | meck:expect(?TM, multicall, Fun), 113 | St3 = lists:foldl( 114 | fun(_, Stx) -> 115 | ?TM:query_and_set_aae_throttle(Stx) 116 | end, St1, lists:seq(1, get(inner_iters))), 117 | {Throttle, St4} = ?TM:get_last_throttle(St3), 118 | ?assertEqual(ExpectThrottle, Throttle), 119 | St4 120 | end, 121 | 122 | State10 = lists:foldl(Eval, State0, Tests1), 123 | ok = verify_mailbox_is_empty(), 124 | 125 | %% Put kill switch test in the middle, to try to catch 126 | %% problems after the switch is turned off again. 127 | Tests2 = [{fun(_,_,_,_,_) -> { [BadRPC_result], []} end, 0}], 128 | ?TM:disable_aae_throttle(), 129 | State20 = lists:foldl(Eval, State10, Tests2), 130 | ok = verify_mailbox_is_empty(), 131 | ?TM:enable_aae_throttle(), 132 | 133 | Tests3 = [{fun(_,_,_,_,_) -> {[{31,nd}], []} end, LittleWait}, 134 | {fun(_,_,_,_,_) -> {[{2,nd}], []} end, 0}], 135 | State30 = lists:foldl(Eval, State20, Tests3), 136 | ok = verify_mailbox_is_empty(), 137 | 138 | put(inner_iters, 10), 139 | Tests4 = [{fun(_,_,_,_,_) -> timer:sleep(1000), {[{2,nd}], []} end, 0}], 140 | _State40 = lists:foldl(Eval, State30, Tests4), 141 | ok = verify_mailbox_is_empty(), 142 | ok 143 | end) 144 | ] 145 | }. 146 | 147 | verify_mailbox_is_empty() -> 148 | receive 149 | X -> 150 | error({mailbox_not_empty, got, X}) 151 | after 0 -> 152 | ok 153 | end. 154 | 155 | -endif. 156 | --------------------------------------------------------------------------------