bunshin.core
del!
(del! context key & {:keys [replication-factor id], :or {replication-factor 2, id (gen-id)}})
context - Use context generated by gen-context
3 |
4 | Delete key from all servers. Providing correct replication factor is
5 | important.
gen-context
(gen-context servers-conf-list)
(gen-context servers-conf-list storage-backend)
(gen-context servers-conf-list storage-backend submit-to-threadpool-fn load-distribution-fn)
A function to generate context used by all API functions.
6 |
7 | servers-conf-list - A list of server configurations used by storage
8 | backend. This will be convereted into a ketama ring.
9 |
10 | storage-backend - By default gen-context will use redis-backend as
11 | storage backend but if you implement BunshinDataStorage you can pass
12 | any storage backend
13 |
14 | sumbit-to-threadpool-fn - All repair on reads and pruning old data on
15 | a key are submitted to this threadpool function. By default it's just
16 | a future. This is not advisible in production systems. You should
17 | provide your own implementation with this option.
18 |
19 | load-distribution-fn - This function is used by 'get' to decide which
20 | server to choose from a list of servers. List of servers is the
21 | argument to this function. Default behaviour just shuffles the list
22 | and picks the first entry
get!
(get! context key & {:keys [replication-factor ttl], :or {replication-factor 2, ttl -1}})
context - Use context generated by gen-context
23 | replication-factor - Number of copies you want of value
24 | ttl - time to live for the value
25 |
26 | Returns either nil or value
get-fast
(get-fast context key id servers)
context - Use context generated by gen-context
27 |
28 | Use id and servers returned from get-with-meta
29 |
30 | This will let you fetch data without in extra network hops. This
31 | function will return incosistent data if there are any writes between
32 | calling get-with-meta and get-fast. But this function lets you tune
33 | for performance at cost of incosistency
store!
(store! context key val & {:keys [replication-factor ttl id], :or {replication-factor 2, ttl -1, id (gen-id)}})
context - Use context generated by gen-context
44 | replication-factor - Number of copies you want of value
45 | ttl - time to live for the value
46 | id - A monotonically increasing number it will default to server timestamp
47 |
48 | Returns either :ok or :stale-write. :stale-write means the id
49 | provided is smaller than id already stored in datastore
--------------------------------------------------------------------------------
/doc/bunshin.datastores.datastore.html:
--------------------------------------------------------------------------------
1 |
2 | bunshin.datastores.redis
redis
macro
(redis server-conf & body)
--------------------------------------------------------------------------------
/doc/css/default.css:
--------------------------------------------------------------------------------
1 | body {
2 | font-family: Helvetica, Arial, sans-serif;
3 | font-size: 15px;
4 | }
5 |
6 | pre, code {
7 | font-family: Monaco, DejaVu Sans Mono, Consolas, monospace;
8 | font-size: 9pt;
9 | margin: 15px 0;
10 | }
11 |
12 | h2 {
13 | font-weight: normal;
14 | font-size: 28px;
15 | padding: 10px 0 2px 0;
16 | margin: 0;
17 | }
18 |
19 | #header, #content, .sidebar {
20 | position: fixed;
21 | }
22 |
23 | #header {
24 | top: 0;
25 | left: 0;
26 | right: 0;
27 | height: 20px;
28 | background: #444;
29 | color: #fff;
30 | padding: 5px 7px;
31 | }
32 |
33 | #content {
34 | top: 30px;
35 | right: 0;
36 | bottom: 0;
37 | overflow: auto;
38 | background: #fff;
39 | color: #333;
40 | padding: 0 18px;
41 | }
42 |
43 | .sidebar {
44 | position: fixed;
45 | top: 30px;
46 | bottom: 0;
47 | overflow: auto;
48 | }
49 |
50 | #namespaces {
51 | background: #e2e2e2;
52 | border-right: solid 1px #cccccc;
53 | left: 0;
54 | width: 250px;
55 | }
56 |
57 | #vars {
58 | background: #f2f2f2;
59 | border-right: solid 1px #cccccc;
60 | left: 251px;
61 | width: 200px;
62 | }
63 |
64 | .namespace-index {
65 | left: 251px;
66 | }
67 |
68 | .namespace-docs {
69 | left: 452px;
70 | }
71 |
72 | #header {
73 | background: -moz-linear-gradient(top, #555 0%, #222 100%);
74 | background: -webkit-linear-gradient(top, #555 0%, #333 100%);
75 | background: -o-linear-gradient(top, #555 0%, #222 100%);
76 | background: -ms-linear-gradient(top, #555 0%, #222 100%);
77 | background: linear-gradient(top, #555 0%, #222 100%);
78 | box-shadow: 0 0 8px rgba(0, 0, 0, 0.4);
79 | z-index: 100;
80 | }
81 |
82 | #header h1 {
83 | margin: 0;
84 | padding: 0;
85 | font-size: 12pt;
86 | font-weight: lighter;
87 | text-shadow: -1px -1px 0px #333;
88 | }
89 |
90 | #header a, .sidebar a {
91 | display: block;
92 | text-decoration: none;
93 | }
94 |
95 | #header a {
96 | color: #fff;
97 | }
98 |
99 | .sidebar a {
100 | color: #333;
101 | }
102 |
103 | #header h2 {
104 | float: right;
105 | font-size: 9pt;
106 | font-weight: normal;
107 | margin: 3px 3px;
108 | padding: 0;
109 | color: #bbb;
110 | }
111 |
112 | #header h2 a {
113 | display: inline;
114 | }
115 |
116 | .sidebar h3 {
117 | margin: 0;
118 | padding: 10px 10px 0 10px;
119 | font-size: 19px;
120 | font-weight: normal;
121 | }
122 |
123 | .sidebar ul {
124 | padding: 0.5em 0em;
125 | margin: 0;
126 | }
127 |
128 | .sidebar li {
129 | display: block;
130 | vertical-align: middle;
131 | }
132 |
133 | .sidebar li a, .sidebar li .no-link {
134 | border-left: 3px solid transparent;
135 | padding: 0 7px;
136 | white-space: nowrap;
137 | }
138 |
139 | .sidebar li .no-link {
140 | display: block;
141 | color: #777;
142 | font-style: italic;
143 | }
144 |
145 | .sidebar li .inner {
146 | display: inline-block;
147 | padding-top: 7px;
148 | height: 24px;
149 | }
150 |
151 | .sidebar li a, .sidebar li .tree {
152 | height: 31px;
153 | }
154 |
155 | .depth-1 .inner { padding-left: 2px; }
156 | .depth-2 .inner { padding-left: 6px; }
157 | .depth-3 .inner { padding-left: 20px; }
158 | .depth-4 .inner { padding-left: 34px; }
159 | .depth-5 .inner { padding-left: 48px; }
160 | .depth-6 .inner { padding-left: 62px; }
161 |
162 | .sidebar li .tree {
163 | display: block;
164 | float: left;
165 | position: relative;
166 | top: -10px;
167 | margin: 0 4px 0 0;
168 | padding: 0;
169 | }
170 |
171 | .sidebar li.depth-1 .tree {
172 | display: none;
173 | }
174 |
175 | .sidebar li .tree .top, .sidebar li .tree .bottom {
176 | display: block;
177 | margin: 0;
178 | padding: 0;
179 | width: 7px;
180 | }
181 |
182 | .sidebar li .tree .top {
183 | border-left: 1px solid #aaa;
184 | border-bottom: 1px solid #aaa;
185 | height: 19px;
186 | }
187 |
188 | .sidebar li .tree .bottom {
189 | height: 22px;
190 | }
191 |
192 | .sidebar li.branch .tree .bottom {
193 | border-left: 1px solid #aaa;
194 | }
195 |
196 | #namespaces li.current a {
197 | border-left: 3px solid #a33;
198 | color: #a33;
199 | }
200 |
201 | #vars li.current a {
202 | border-left: 3px solid #33a;
203 | color: #33a;
204 | }
205 |
206 | #content h3 {
207 | font-size: 13pt;
208 | font-weight: bold;
209 | }
210 |
211 | .public h3 {
212 | margin: 0;
213 | float: left;
214 | }
215 |
216 | .usage {
217 | clear: both;
218 | }
219 |
220 | .public {
221 | margin: 0;
222 | border-top: 1px solid #e0e0e0;
223 | padding-top: 14px;
224 | padding-bottom: 6px;
225 | }
226 |
227 | .public:last-child {
228 | margin-bottom: 20%;
229 | }
230 |
231 | .members .public:last-child {
232 | margin-bottom: 0;
233 | }
234 |
235 | .members {
236 | margin: 15px 0;
237 | }
238 |
239 | .members h4 {
240 | color: #555;
241 | font-weight: normal;
242 | font-variant: small-caps;
243 | margin: 0 0 5px 0;
244 | }
245 |
246 | .members .inner {
247 | padding-top: 5px;
248 | padding-left: 12px;
249 | margin-top: 2px;
250 | margin-left: 7px;
251 | border-left: 1px solid #bbb;
252 | }
253 |
254 | #content .members .inner h3 {
255 | font-size: 12pt;
256 | }
257 |
258 | .members .public {
259 | border-top: none;
260 | margin-top: 0;
261 | padding-top: 6px;
262 | padding-bottom: 0;
263 | }
264 |
265 | .members .public:first-child {
266 | padding-top: 0;
267 | }
268 |
269 | h4.type,
270 | h4.dynamic,
271 | h4.added,
272 | h4.deprecated {
273 | float: left;
274 | margin: 3px 10px 15px 0;
275 | font-size: 15px;
276 | font-weight: bold;
277 | font-variant: small-caps;
278 | }
279 |
280 | .public h4.type,
281 | .public h4.dynamic,
282 | .public h4.added,
283 | .public h4.deprecated {
284 | font-size: 13px;
285 | font-weight: bold;
286 | margin: 3px 0 0 10px;
287 | }
288 |
289 | .members h4.type,
290 | .members h4.added,
291 | .members h4.deprecated {
292 | margin-top: 1px;
293 | }
294 |
295 | h4.type {
296 | color: #717171;
297 | }
298 |
299 | h4.dynamic {
300 | color: #9933aa;
301 | }
302 |
303 | h4.added {
304 | color: #508820;
305 | }
306 |
307 | h4.deprecated {
308 | color: #880000;
309 | }
310 |
311 | .namespace {
312 | margin-bottom: 30px;
313 | }
314 |
315 | .namespace:last-child {
316 | margin-bottom: 10%;
317 | }
318 |
319 | .index {
320 | padding: 0;
321 | font-size: 80%;
322 | margin: 15px 0;
323 | line-height: 16px;
324 | }
325 |
326 | .index * {
327 | display: inline;
328 | }
329 |
330 | .index p {
331 | padding-right: 3px;
332 | }
333 |
334 | .index li {
335 | padding-right: 5px;
336 | }
337 |
338 | .index ul {
339 | padding-left: 0;
340 | }
341 |
342 | .usage code {
343 | display: block;
344 | color: #008;
345 | margin: 2px 0;
346 | }
347 |
348 | .usage code:first-child {
349 | padding-top: 10px;
350 | }
351 |
352 | p {
353 | margin: 15px 0;
354 | }
355 |
356 | .public p:first-child, .public pre.plaintext {
357 | margin-top: 12px;
358 | }
359 |
360 | .doc {
361 | margin: 0 0 26px 0;
362 | clear: both;
363 | }
364 |
365 | .public .doc {
366 | margin: 0;
367 | }
368 |
369 | .namespace-index .doc {
370 | margin-bottom: 20px;
371 | }
372 |
373 | .namespace-index .namespace .doc {
374 | margin-bottom: 10px;
375 | }
376 |
377 | .markdown {
378 | line-height: 18px;
379 | font-size: 14px;
380 | }
381 |
382 | .doc, .public, .namespace .index {
383 | max-width: 680px;
384 | overflow-x: visible;
385 | }
386 |
387 | .markdown code, .src-link a {
388 | background: #f6f6f6;
389 | border: 1px solid #e4e4e4;
390 | border-radius: 2px;
391 | }
392 |
393 | .markdown pre {
394 | background: #f4f4f4;
395 | border: 1px solid #e0e0e0;
396 | border-radius: 2px;
397 | padding: 5px 10px;
398 | margin: 0 10px;
399 | }
400 |
401 | .markdown pre code {
402 | background: transparent;
403 | border: none;
404 | }
405 |
406 | .doc ul, .doc ol {
407 | padding-left: 30px;
408 | }
409 |
410 | .doc table {
411 | border-collapse: collapse;
412 | margin: 0 10px;
413 | }
414 |
415 | .doc table td, .doc table th {
416 | border: 1px solid #dddddd;
417 | padding: 4px 6px;
418 | }
419 |
420 | .doc table th {
421 | background: #f2f2f2;
422 | }
423 |
424 | .doc dl {
425 | margin: 0 10px 20px 10px;
426 | }
427 |
428 | .doc dl dt {
429 | font-weight: bold;
430 | margin: 0;
431 | padding: 3px 0;
432 | border-bottom: 1px solid #ddd;
433 | }
434 |
435 | .doc dl dd {
436 | padding: 5px 0;
437 | margin: 0 0 5px 10px;
438 | }
439 |
440 | .doc abbr {
441 | border-bottom: 1px dotted #333;
442 | font-variant: none
443 | cursor: help;
444 | }
445 |
446 | .src-link {
447 | margin-bottom: 15px;
448 | }
449 |
450 | .src-link a {
451 | font-size: 70%;
452 | padding: 1px 4px;
453 | text-decoration: none;
454 | color: #5555bb;
455 | }
--------------------------------------------------------------------------------
/doc/index.html:
--------------------------------------------------------------------------------
1 |
2 | Bunshin 0.1.0-SNAPSHOT
Bunshin is a redis based multi instance cache library that aims for high availability.
Public variables and functions:
Public variables and functions:
Public variables and functions:
Public variables and functions:
--------------------------------------------------------------------------------
/doc/intro.md:
--------------------------------------------------------------------------------
1 | # Introduction to hoho
2 |
3 | TODO: write [great documentation](http://jacobian.org/writing/what-to-write/)
4 |
--------------------------------------------------------------------------------
/doc/js/page_effects.js:
--------------------------------------------------------------------------------
1 | function visibleInParent(element) {
2 | var position = $(element).position().top
3 | return position > -50 && position < ($(element).offsetParent().height() - 50)
4 | }
5 |
6 | function hasFragment(link, fragment) {
7 | return $(link).attr("href").indexOf("#" + fragment) != -1
8 | }
9 |
10 | function findLinkByFragment(elements, fragment) {
11 | return $(elements).filter(function(i, e) { return hasFragment(e, fragment)}).first()
12 | }
13 |
14 | function scrollToCurrentVarLink(elements) {
15 | var elements = $(elements);
16 | var parent = elements.offsetParent();
17 |
18 | if (elements.length == 0) return;
19 |
20 | var top = elements.first().position().top;
21 | var bottom = elements.last().position().top + elements.last().height();
22 |
23 | if (top >= 0 && bottom <= parent.height()) return;
24 |
25 | if (top < 0) {
26 | parent.scrollTop(parent.scrollTop() + top);
27 | }
28 | else if (bottom > parent.height()) {
29 | parent.scrollTop(parent.scrollTop() + bottom - parent.height());
30 | }
31 | }
32 |
33 | function setCurrentVarLink() {
34 | $('#vars a').parent().removeClass('current')
35 | $('.anchor').
36 | filter(function(index) { return visibleInParent(this) }).
37 | each(function(index, element) {
38 | findLinkByFragment("#vars a", element.id).
39 | parent().
40 | addClass('current')
41 | });
42 | scrollToCurrentVarLink('#vars .current');
43 | }
44 |
45 | var hasStorage = (function() { try { return localStorage.getItem } catch(e) {} }())
46 |
47 | function scrollPositionId(element) {
48 | var directory = window.location.href.replace(/[^\/]+\.html$/, '')
49 | return 'scroll::' + $(element).attr('id') + '::' + directory
50 | }
51 |
52 | function storeScrollPosition(element) {
53 | if (!hasStorage) return;
54 | localStorage.setItem(scrollPositionId(element) + "::x", $(element).scrollLeft())
55 | localStorage.setItem(scrollPositionId(element) + "::y", $(element).scrollTop())
56 | }
57 |
58 | function recallScrollPosition(element) {
59 | if (!hasStorage) return;
60 | $(element).scrollLeft(localStorage.getItem(scrollPositionId(element) + "::x"))
61 | $(element).scrollTop(localStorage.getItem(scrollPositionId(element) + "::y"))
62 | }
63 |
64 | function persistScrollPosition(element) {
65 | recallScrollPosition(element)
66 | $(element).scroll(function() { storeScrollPosition(element) })
67 | }
68 |
69 | function sidebarContentWidth(element) {
70 | var widths = $(element).find('.inner').map(function() { return $(this).innerWidth() })
71 | return Math.max.apply(Math, widths)
72 | }
73 |
74 | function resizeSidebars() {
75 | var nsWidth = sidebarContentWidth('#namespaces') + 30
76 | var varWidth = 0
77 |
78 | if ($('#vars').length != 0) {
79 | varWidth = sidebarContentWidth('#vars') + 30
80 | }
81 |
82 | // snap to grid
83 | var snap = 30;
84 | nsWidth = Math.ceil(nsWidth / snap) * snap;
85 | varWidth = Math.ceil(varWidth / snap) * snap;
86 |
87 | $('#namespaces').css('width', nsWidth)
88 | $('#vars').css('width', varWidth)
89 | $('#vars, .namespace-index').css('left', nsWidth + 1)
90 | $('.namespace-docs').css('left', nsWidth + varWidth + 2)
91 | }
92 |
93 | $(window).ready(resizeSidebars)
94 | $(window).ready(setCurrentVarLink)
95 | $(window).ready(function() { persistScrollPosition('#namespaces')})
96 | $(window).ready(function() {
97 | $('#content').scroll(setCurrentVarLink)
98 | $(window).resize(setCurrentVarLink)
99 | })
100 |
--------------------------------------------------------------------------------
/doc/proof.md:
--------------------------------------------------------------------------------
1 | Mathematical analysis
2 |
3 | Number of queries under normal operation
4 |
5 | N - Number of redis nodes
6 | x - Payload size
7 | x-ts - Payload size for timestamps
8 | B - Max bandwidth
9 | n - Number of queries possible (B * N) / ((N * x-ts) + x)
10 |
11 | N - 1
12 | x - 20000 (bytes)
13 | x-ts - 1 (byte)
14 | B - 120 MBps = 120000000 Bps
15 |
16 | n = (960000000 * 1) / (( 1 * 1 ) + 20)
17 | n = 5999
18 |
19 |
20 | [10 59970]
21 | [20 119880]
22 | [30 179730]
23 | [40 239520]
24 | [50 299251]
25 | [60 358923]
26 | [70 418535]
27 | [80 478087]
28 | [90 537580]
29 | [100 597014]
30 | [500 2926829]
31 | [1000 5714285]
32 | [5000 24000000]
33 | [10000 40000000]
34 |
35 |
36 | Number of queries in case of new nodes added
37 |
38 | N1 - Number of nodes at t1
39 | N2 - Number of nodes at t2
40 |
41 | (N2 - N1)
42 |
43 | Number of queries in case of recovery after node failure
44 |
45 | N1 - Number of nodes at t1
46 | N2 number of nodes failed at t2
47 | Same number of nodes recovered at t3
48 |
49 | number of repair on read queries N2
50 |
51 |
52 |
53 |
54 | Latency
55 |
56 | q - Average query time
57 | qw - Worst case time for redusa data fetch
58 |
59 |
60 | qw = (N + 1) * q
61 |
--------------------------------------------------------------------------------
/doc/scenarios.md:
--------------------------------------------------------------------------------
1 | ### Scenarios
2 |
3 | For all scenarios key x of replication factor 3 there are three nodes selected [A, B, C]
4 |
5 | ###Successful write
6 | - Id reads go to [A, B, C]
7 | - Latest id is selected from read result
8 | - If the id provided by write is greater than latest id then writes go to [A, B, C]
9 |
10 | ###Stale write
11 | - Id reads go to [A, B, C]
12 | - Latest id is selected from read result
13 | - If the id provided by write is less than latest id then it's a stale write
14 |
15 | ###Successful read
16 | - Id reads go to [A, B, C]
17 | - One node is selected from [A, B, C] for fetching data
18 |
19 | ### Concurrent writes
20 | #### Same id from same machine
21 | All writes after first one will be dropped. This helps reduce surge of load introduced when a new redis is added to cluster
22 | #### Same id from different machines
23 | All writes after first one will be stale writes.
24 | #### Different ids
25 | #####Machine 1
26 | - Id reads go to [A, B, C]
27 | - If id is fresh then writes go to [A, B, C] writes update data and id set.
28 | #####Machine 2
29 | - Id reads go to [A, B, C]
30 | - If id is fresh then writes go to [A, B, C] writes update data and id set.
31 |
32 |
33 | ###Node failure / Network failure #1
34 | - Node A fails
35 | - Id reads go to [A, B, C] and read for A fails
36 | - Latest id is selected from [B, C]
37 | - Reads go to one node from [B, C]
38 |
39 |
40 | ####Node failure / Network failure #2
41 | - Id reads go to [A, B, C]
42 | - Node A fails
43 | - A is selected for fetching data. Fetching data fails.
44 | - This will be a cache miss
45 |
46 | #### Node failure / Network failure #3
47 | - A write is sent to [N1, N2, N3, N4]
48 | - [N2, N3, N4] fail
49 | - Another write is only sent to [N1]
50 | - [N2] recovered and [N1] failed
51 | - A read will now result in stale data
52 | This can be avoided by always adding new nodes in cluster with data removed. This included recovered nodes as well. Check Ops section
53 |
--------------------------------------------------------------------------------
/project.clj:
--------------------------------------------------------------------------------
1 | (defproject me.kapilreddy/bunshin "0.1.0-SNAPSHOT"
2 | :description "Bunshin is a redis based multi instance cache library that aims for high availability."
3 | :url "http://github.com/kapilreddy/bunshin"
4 | :license {:name "Eclipse Public License"
5 | :url "http://www.eclipse.org/legal/epl-v10.html"}
6 | :dependencies [[org.clojure/clojure "1.6.0"]
7 | [com.taoensso/carmine "2.9.2"]
8 | [ketamine "1.0.0"]
9 | [clj-time "0.9.0"]]
10 | :profiles {:dev {:dependencies [[criterium "0.4.3"]
11 | [org.clojure/test.check "0.7.0"]]
12 | :plugins [[codox "0.8.11"]]}})
13 |
--------------------------------------------------------------------------------
/src/bunshin/core.clj:
--------------------------------------------------------------------------------
1 | (ns bunshin.core
2 | (:require [clojure.set :as cs]
3 | [ketamine.core :as ketama]
4 | [clj-time.core :as ct]
5 | [bunshin.datastores.redis :refer [redis-backend]]
6 | [bunshin.datastores.datastore :refer [BunshinDataStorage]]
7 | [bunshin.datastores.datastore :as bdd]))
8 |
9 |
10 | (defn- gen-id-set-key
11 | [key]
12 | (format "bunshin-ids:%s" key))
13 |
14 |
15 | (defn- gen-val-key
16 | [key id]
17 | (format "%s:%.0f" key (double id)))
18 |
19 |
20 | (defn- gen-id
21 | []
22 | (.getMillis (ct/now)))
23 |
24 |
25 | (defn- get-servers
26 | [ring id n]
27 | (take n (clojure.core/set (take (* n 2)
28 | (ketama/node-seq ring id)))))
29 |
30 |
31 | (defn- get-fresh-id
32 | [server-with-id-xs]
33 | (first (first (first (sort-by (comp - first first)
34 | (filter (comp seq first)
35 | server-with-id-xs))))))
36 |
37 |
38 | (defn- fetch-id
39 | [{:keys [storage-backend]}
40 | server
41 | key]
42 | (when-let [id-str-xs (bdd/get-id-xs storage-backend
43 | server
44 | (gen-id-set-key key))]
45 | [(map (fn [i]
46 | (Double/parseDouble i))
47 | id-str-xs)
48 | server]))
49 |
50 |
51 | (defn- fetch-id-xs
52 | [{:keys [^BunshinDataStorage storage-backend
53 | submit-to-threadpool-fn]
54 | :as ctx}
55 | servers
56 | key]
57 | (let [fetch-id-l (partial fetch-id ctx)
58 | results (map #(submit-to-threadpool-fn (fn []
59 | (fetch-id-l %
60 | key)))
61 | servers)]
62 | (doall (map deref
63 | results))))
64 |
65 |
66 | (defn- set*
67 | [{:keys [^BunshinDataStorage storage-backend
68 | running-set-operations submit-to-threadpool-fn]}
69 | servers-with-id key val id
70 | & {:keys [ttl]}]
71 | (let [val-key (gen-val-key key id)]
72 | (when-not (@running-set-operations val-key)
73 | (swap! running-set-operations conj val-key)
74 | (doseq [[id-xs server] servers-with-id]
75 | (bdd/set storage-backend
76 | server
77 | val-key
78 | val
79 | (gen-id-set-key key)
80 | id
81 | ttl)
82 | (let [extra-ids (remove #(= (double id) %)
83 | id-xs)]
84 | (when (seq extra-ids)
85 | (submit-to-threadpool-fn (fn []
86 | (bdd/prune-ids storage-backend
87 | server
88 | (gen-id-set-key key))
89 | (bdd/del storage-backend
90 | server
91 | (map (partial gen-val-key key)
92 | extra-ids)))))))
93 | (swap! running-set-operations disj val-key))))
94 |
95 |
96 | (defn gen-context
97 | "A function to generate context used by all API functions.
98 |
99 | servers-conf-list - A list of server configurations used by storage
100 | backend. This will be convereted into a ketama ring.
101 |
102 | storage-backend - By default gen-context will use redis-backend as
103 | storage backend but if you implement BunshinDataStorage you can pass
104 | any storage backend
105 |
106 | sumbit-to-threadpool-fn - All repair on reads and pruning old data on
107 | a key are submitted to this threadpool function. By default it's just
108 | a future. This is not advisible in production systems. You should
109 | provide your own implementation with this option.
110 |
111 | load-distribution-fn - This function is used by 'get' to decide which
112 | server to choose from a list of servers. List of servers is the
113 | argument to this function. Default behaviour just shuffles the list
114 | and picks the first entry"
115 | ([servers-conf-list]
116 | (gen-context servers-conf-list
117 | redis-backend))
118 | ([servers-conf-list storage-backend]
119 | (gen-context servers-conf-list
120 | storage-backend
121 | (fn [thunk]
122 | (future (thunk)))
123 | (comp first shuffle)))
124 | ([servers-conf-list storage-backend
125 | submit-to-threadpool-fn load-distribution-fn]
126 | {:storage-backend storage-backend
127 | :submit-to-threadpool-fn submit-to-threadpool-fn
128 | :load-distribution-fn load-distribution-fn
129 | :running-set-operations (atom #{})
130 | :ring (ketama/make-ring servers-conf-list)}))
131 |
132 |
133 | (defn store!
134 | "context - Use context generated by gen-context
135 | replication-factor - Number of copies you want of value
136 | ttl - time to live for the value
137 | id - A monotonically increasing number it will default to server timestamp
138 |
139 | Returns either :ok or :stale-write. :stale-write means the id
140 | provided is smaller than id already stored in datastore"
141 | [context key
142 | val & {:keys [replication-factor ttl id]
143 | :or {replication-factor 2
144 | ttl -1
145 | id (gen-id)}}]
146 | (let [servers (get-servers (:ring context) key replication-factor)
147 | servers-with-id (fetch-id-xs context servers key)
148 | fresh-id (get-fresh-id servers-with-id)]
149 | (if (or (nil? fresh-id)
150 | (<= fresh-id id))
151 | (do (set* context
152 | servers-with-id
153 | key
154 | val
155 | id
156 | :ttl ttl)
157 | :ok)
158 | :stale-write)))
159 |
160 |
161 | (defn get-with-meta!
162 | "context - Use context generated by gen-context
163 | replication-factor - Number of copies you want of value
164 | ttl - time to live for the value
165 |
166 | Replication factor and ttl are needed for repair on read.
167 |
168 | Returns either nil or
169 | {:value - Value for the key
170 | :servers - List of servers with this key
171 | :id - Latest id of value}
172 | "
173 | [context key & {:keys [replication-factor ttl]
174 | :or {replication-factor 2
175 | ttl -1}}]
176 | (let [{:keys [ring load-distribution-fn storage-backend
177 | submit-to-threadpool-fn]} context
178 | servers (get-servers ring key replication-factor)]
179 | (let [servers-with-id (filter (comp seq first)
180 | (fetch-id-xs context servers key))]
181 | (when (seq servers-with-id)
182 | (let [fresh-id (get-fresh-id servers-with-id)]
183 | (when fresh-id
184 | (let [in-sync-servers (map second
185 | (filter #(= fresh-id (first (first %)))
186 | servers-with-id))
187 | fresh-data (let [server (load-distribution-fn in-sync-servers)]
188 | (bdd/get storage-backend
189 | server
190 | (gen-val-key key fresh-id)))]
191 | (submit-to-threadpool-fn
192 | (fn []
193 | (let [out-of-sync-servers
194 | (cs/difference (clojure.core/set servers)
195 | (clojure.core/set in-sync-servers))]
196 | (set context
197 | out-of-sync-servers
198 | key
199 | fresh-data
200 | :id fresh-id
201 | :ttl ttl
202 | :replication-factor replication-factor))))
203 | {:value fresh-data
204 | :servers in-sync-servers
205 | :id fresh-id})))))))
206 |
207 |
208 | (defn get!
209 | "context - Use context generated by gen-context
210 | replication-factor - Number of copies you want of value
211 | ttl - time to live for the value
212 |
213 | Returns either nil or value"
214 | [context key & {:keys [replication-factor ttl]
215 | :or {replication-factor 2
216 | ttl -1}}]
217 | (:value (get-with-meta! context key
218 | :replication-factor replication-factor
219 | :ttl ttl)))
220 |
221 |
222 | (defn get-fast
223 | "context - Use context generated by gen-context
224 |
225 | Use id and servers returned from get-with-meta
226 |
227 | This will let you fetch data without in extra network hops. This
228 | function will return incosistent data if there are any writes between
229 | calling get-with-meta and get-fast. But this function lets you tune
230 | for performance at cost of incosistency"
231 | [context key id servers]
232 | (let [{:keys [load-distribution-fn storage-backend]} context
233 | val-key (gen-val-key key id)
234 | server (load-distribution-fn servers)]
235 | (bdd/get storage-backend
236 | server
237 | val-key)))
238 |
239 |
240 | (defn del!
241 | "context - Use context generated by gen-context
242 |
243 | Delete key from all servers. Providing correct replication factor is
244 | important."
245 | [context key & {:keys [replication-factor id]
246 | :or {replication-factor 2
247 | id (gen-id)}}]
248 | (store! context key nil
249 | :replication-factor replication-factor
250 | :id id))
251 |
252 |
253 | (comment
254 | (def ctx (gen-context [{:pool {}
255 | :spec {:host "127.0.0.1"
256 | :port 6379}}]))
257 |
258 | ;;; Request 1 to 127.0.0.1:6379
259 | ;;; zrevrange "bunshinids:foo" 0 -1 "withscores"
260 | (get! ctx "foo") ;; nil
261 |
262 | ;;; Request 1 to 127.0.0.1:6379
263 | ;;; zrevrange "bunshinids:foo" 0 -1 "withscores"
264 | ;;; Request 2 to 127.0.0.1:6379
265 | ;;; zadd "bunshinids:foo" 20 1
266 | ;;; set "foo:20" "hello world"
267 | (store! ctx "foo" "hello world" :id 20 :ttl 10) ;; :ok
268 |
269 | ;;; Request 1 to 127.0.0.1:6379
270 | ;;; zrevrange "bunshinids:foo" 0 -1 "withscores"
271 | (store! ctx "foo" "hello world new" :id 20) ;; :stale-write
272 |
273 | ;;; Request 1 to 127.0.0.1:6379
274 | ;;; zrevrange "bunshinids:foo" 0 -1 "withscores"
275 |
276 | ;;; Request 2 to 127.0.0.1:6379
277 | ;;; zadd "bunshinids:foo" 21 1
278 | ;;; set "foo:21" "hello worl new"
279 | ;;; zremrangebyrank "bunshin:foo" 1 -1
280 | ;;; del "foo:20"
281 | (store! ctx "foo" "hello world new" :id 21) ;; :ok
282 |
283 | ;;; Request 1 to 127.0.0.1:6379
284 | ;;; zrevrange "bunshinids:foo" 0 -1 "withscores"
285 | ;;; Request 2 to 127.0.0.1:6379
286 | ;;; get "foo:21"
287 | (get! ctx "foo") ;; "hello world new"
288 |
289 | (def ctx (gen-context [{:pool {}
290 | :spec {:host "127.0.0.1"
291 | :port 6379}}
292 | {:pool {}
293 | :spec {:host "127.0.0.1"
294 | :port 6380}}
295 | {:pool {}
296 | :spec {:host "127.0.0.1"
297 | :port 6381}}
298 | {:pool {}
299 | :spec {:host "127.0.0.1"
300 | :port 6382}}]))
301 |
302 |
303 | ;; Assume that mapping for id foo is 127.0.0.1:6380 and 127.0.0.1:6381
304 |
305 | ;;; Request phase 1
306 | ;;; Requests to 127.0.0.1:6380, 127.0.0.1:6381
307 | ;;; zrevrange "bunshinids:foo" 0 -1 "withscores"
308 | (get! ctx "foo") ;; nil
309 |
310 | ;;; Request phase 1
311 | ;;; Requests to 127.0.0.1:6380, 127.0.0.1:6381
312 | ;;; zrevrange "bunshinids:foo" 0 -1 "withscores"
313 |
314 | ;;; Request phase 2
315 | ;;; Requests to 127.0.0.1:6380, 127.0.0.1:6381
316 | ;;; zadd "bunshinids:foo" 20 1
317 | ;;; set "foo:20" "hello world"
318 | (store! ctx "foo" "hello world" :id 20) ;; :ok
319 |
320 | ;;; Request 1 to 127.0.0.1:6379
321 | ;;; zrevrange "bunshinids:foo" 0 -1 "withscores"
322 | (store! ctx "foo" "hello world new" :id 20) ;; :stale-write
323 |
324 | ;;; Request 1 to 127.0.0.1:6379
325 | ;;; zrevrange "bunshinids:foo" 0 -1 "withscores"
326 |
327 | ;;; Request 2 to 127.0.0.1:6379
328 | ;;; zadd "bunshinids:foo" 21 1
329 | ;;; set "foo:21" "hello worl new"
330 | ;;; zremrangebyrank "bunshin:foo" 1 -1
331 | ;;; del "foo:20"
332 | (store! ctx "foo" "hello world new" :id 21) ;; :ok
333 |
334 | (def ctx (gen-context [{:pool {}
335 | :spec {:host "127.0.0.1"
336 | :port 6379}}]))
337 |
338 |
339 | (get! ctx "foo") ;; served either from 6379
340 | )
341 |
--------------------------------------------------------------------------------
/src/bunshin/datastores/datastore.clj:
--------------------------------------------------------------------------------
1 | (ns bunshin.datastores.datastore)
2 |
3 |
4 | (defprotocol BunshinDataStorage
5 | (get-id-xs [this server-conf key] "Get list of ids for a given key")
6 | (get [this server-conf key] "Get value for a given key")
7 |
8 | (set [this server-conf val-key val id-key id ttl] "Store value and id against keys with expiry")
9 |
10 | (prune-ids [this server-conf id-key] "Delete all ids but the largest id for given id key. ")
11 | (del [this server-conf keys] "Delete keys"))
12 |
--------------------------------------------------------------------------------
/src/bunshin/datastores/in_memory.clj:
--------------------------------------------------------------------------------
1 | (ns bunshin.datastores.in-memory
2 | (:require [bunshin.datastores.datastore :refer [BunshinDataStorage]]
3 | [clojure.test.check.generators :as gen]
4 | [clj-time.core :as ctc]))
5 |
6 | (let [sleep-seq (gen/sample (gen/frequency [[1 (gen/return 300)]
7 | [4 (gen/return 4)]
8 | [95 (gen/return 1)]])
9 | 10000)]
10 | (defn rand-sleep
11 | []
12 | (nth sleep-seq (rand-int (dec 10000)))))
13 |
14 |
15 | (defprotocol TestableServer
16 | (start [this server fresh?])
17 | (partial-fail [this server map])
18 | (shutdown [this server])
19 | (get-data [this]))
20 |
21 |
22 | (defn gen-in-memory-backend
23 | []
24 | (let [r-stores (atom {})
25 | store-states (atom {})
26 | offline-stores (atom {})
27 | default-state-map {:get true
28 | :get-id-xs true
29 | :set true
30 | :prune-ids true
31 | :del true}
32 | get-server-conf (fn [server-conf]
33 | (if-let [r-store (get @r-stores
34 | server-conf)]
35 | r-store
36 | (let [r-store (atom {})]
37 | (swap! r-stores
38 | assoc
39 | server-conf
40 | r-store)
41 | r-store)))
42 | get-server-state (fn [server-conf k]
43 | (get (get @store-states
44 | server-conf
45 | default-state-map)
46 | k))]
47 | (reify
48 | BunshinDataStorage
49 | (get [this server-conf k]
50 | (when (get-server-state server-conf :get)
51 | (try
52 | (Thread/sleep (rand-sleep))
53 | (let [{:keys [val expire_at]} (get @(get-server-conf server-conf) k)]
54 | (if expire_at
55 | (when (ctc/after? expire_at (ctc/now))
56 | val)
57 | val))
58 | (catch Exception _))))
59 |
60 | (get-id-xs [this server-conf k]
61 | (when (get-server-state server-conf :get-id-xs)
62 | (Thread/sleep (rand-sleep))
63 | (try
64 | (let [r-store (get-server-conf server-conf)]
65 | (if-let [xs (get @r-store k)]
66 | (map (comp str first) (sort-by (comp - first) xs))
67 | []))
68 | (catch Exception _))))
69 |
70 | (set [this server-conf val-key val id-key id ttl]
71 | (when (get-server-state server-conf :set)
72 | (try
73 | (Thread/sleep (rand-sleep))
74 | (let [d (ctc/plus (ctc/now)
75 | (ctc/seconds ttl))
76 | val-map (if (and ttl
77 | (pos? ttl))
78 | {:val val
79 | :expire_at d}
80 | {:val val})]
81 | (swap! (get-server-conf server-conf)
82 | (fn [v]
83 | (-> v
84 | (update-in [id-key] (fn [s]
85 | (assoc s id 1)))
86 | (assoc val-key val-map)))))
87 | (catch Exception _))))
88 |
89 | (prune-ids [this server-conf id-key]
90 | (when (get-server-state server-conf :prune-ids)
91 | (try
92 | (Thread/sleep (rand-sleep))
93 | (swap!(get-server-conf server-conf)
94 | update-in
95 | [id-key]
96 | (fn [s]
97 | (into {} (take 1 (sort-by (comp - first) s)))))
98 | (catch Exception _))))
99 |
100 | (del [this server-conf keys]
101 | (when (get-server-state server-conf :del)
102 | (try
103 | (Thread/sleep (rand-sleep))
104 | (let [r-store (get-server-conf server-conf)]
105 | (doseq [key keys]
106 | (swap! r-store dissoc key)))
107 | (catch Exception _))))
108 | TestableServer
109 | (start [this server fresh?]
110 | (if fresh?
111 | (when-let [r-store (get @offline-stores server)]
112 | (do (swap! r-stores assoc server r-store)
113 | (swap! offline-stores dissoc server)))
114 | (swap! r-stores assoc server (atom {}))))
115 | (shutdown [this server]
116 | (let [r-store (get @r-stores server)]
117 | (swap! offline-stores assoc server r-store))
118 | (swap! r-stores assoc server true))
119 |
120 | (get-data [this]
121 | r-stores)
122 |
123 | (partial-fail [this server state-map]
124 | (swap! store-states
125 | assoc
126 | server
127 | (merge default-state-map
128 | state-map))))))
129 |
--------------------------------------------------------------------------------
/src/bunshin/datastores/redis.clj:
--------------------------------------------------------------------------------
1 | (ns bunshin.datastores.redis
2 | (:require [taoensso.carmine :as r]
3 | [bunshin.datastores.datastore :refer [BunshinDataStorage]]))
4 |
5 |
6 | (defmacro redis [server-conf & body] `(try
7 | (r/wcar ~server-conf
8 | ~@body)
9 | (catch Exception e#
10 | nil)))
11 |
12 |
13 | (def redis-backend
14 | (reify BunshinDataStorage
15 | (get [this server-conf key]
16 | (redis server-conf
17 | (r/get key)))
18 |
19 | (get-id-xs [this server-conf key]
20 | (map first
21 | (partition 2
22 | (redis server-conf
23 | (r/zrevrange key 0 -1 "WITHSCORES")))))
24 |
25 | (set [this server-conf val-key val id-key id ttl]
26 | (if (and ttl
27 | (pos? ttl))
28 | (redis server-conf
29 | (r/zadd id-key id id)
30 | (r/setex val-key ttl val))
31 | (redis server-conf
32 | (r/zadd id-key id 1)
33 | (r/set val-key val))))
34 |
35 | (prune-ids [this server-conf id-key]
36 | (= "OK"
37 | (redis server-conf
38 | (r/zremrangebyrank key 1 -1))))
39 |
40 | (del [this server-conf keys]
41 | (every? #(= "OK" %)
42 | (redis server-conf
43 | (doseq [key keys]
44 | (r/del key)))))))
45 |
--------------------------------------------------------------------------------
/test/bunshin/benchmarks.clj:
--------------------------------------------------------------------------------
1 | (ns bunshin.benchmarks
2 | (:require [bunshin.benchmarks :refer :all]
3 | [bunshin.core :as bc]
4 | [bunshin.datastores.in-memory :refer [gen-in-memory-backend
5 | shutdown start partial-fail
6 | get-data]]
7 | [criterium.core :as cc]
8 | [clojure.test :refer :all]))
9 |
10 | ;;; These benchmarks run on in-memory backend. In memory backend has
11 | ;;; thread/sleeps which try to emulate production latency
12 |
13 | ;;; This benchmark aims to test performance of bunshin's model of
14 | ;;; running query. These results will vary with real redis instances but
15 | ;;; this gives a clearer idea of how bunshin will work
16 |
17 | ;; (1 10 20 30 40 50 60 70 80 90 100)
18 | ;; (0.009077299428315413 0.07632339347222222 0.17755765050000002 0.25530742226666664 0.36088783383333334 0.4146356505 0.5087279088333333 0.5478025367777778 0.6030042421666667 0.6842258505000001 0.7761572588333334)
19 | (defn bench-store!
20 | [replication-factor]
21 | (let [ctx (bc/gen-context (take replication-factor (range))
22 | (gen-in-memory-backend))
23 | n (atom 0)]
24 | (cc/benchmark (bc/store! ctx "foo" "hello world"
25 | :id (swap! n inc)
26 | :replication-factor replication-factor)
27 | {})))
28 |
29 |
30 | ;; (1 10 20 30 40 50 60 70 80 90 100)
31 | ;; (0.006859095753333334 0.03977656643589744 0.08460825524561405 0.1337506301904762 0.1409151505 0.1914606556 0.2606408255 0.26275291300000003 0.30316741925 0.3500710201111112 0.40578012550000003)
32 |
33 | (defn bench-get!
34 | [replication-factor]
35 | (let [ctx (bc/gen-context (take replication-factor (range))
36 | (gen-in-memory-backend))]
37 | (bc/store! ctx "foo" "hello world" :replication-factor replication-factor)
38 | (cc/benchmark (bc/get! ctx "foo" :replication-factor replication-factor)
39 | {})))
40 |
41 |
42 | ;; (1 10 20 30 40 50 60 70 80 90 100)
43 | ;; (0.004199259339285714, 0.004231066232900434 0.004461683482269503 0.0042526224437500005 0.004651562815891473 0.004357024291666667 0.004586260432624114 0.0040047271763668435 0.004570258455426357 0.004137806738993711 0.004332861722789115)
44 | (defn bench-get-fast
45 | [replication-factor]
46 | (let [ctx (bc/gen-context (take replication-factor (range))
47 | (gen-in-memory-backend))]
48 | (bc/store! ctx "foo" "hello world" :replication-factor replication-factor)
49 | (let [{:keys [servers id]}
50 | (bc/get-with-meta! ctx "foo" :replication-factor replication-factor)]
51 | (cc/benchmark (bc/get-fast ctx "foo" id servers) {}))))
52 |
--------------------------------------------------------------------------------
/test/bunshin/core_test.clj:
--------------------------------------------------------------------------------
1 | (ns bunshin.core-test
2 | (:require [bunshin.datastores.datastore :refer [BunshinDataStorage]]
3 | [bunshin.datastores.in-memory :refer [gen-in-memory-backend
4 | shutdown start partial-fail
5 | get-data]]
6 | [ketamine.core :as ketama]
7 | [clojure.test :refer :all]
8 | [bunshin.core :as bc]))
9 |
10 |
11 | (deftest single-server-normal
12 | (let [ctx (bc/gen-context [6379]
13 | (gen-in-memory-backend))]
14 | (is (nil? (bc/get! ctx "foo")))
15 | (is (= (bc/store! ctx "foo" "hello world" :id 10) :ok))
16 | (is (= (bc/store! ctx "foo" "hello world" :id 9) :stale-write))
17 | (is (= (bc/store! ctx "foo" "hello new world" :id 11) :ok))
18 | (is (= (bc/get! ctx "foo") "hello new world"))))
19 |
20 | (deftest single-server-normal-ttl
21 | (let [ctx (bc/gen-context [6379]
22 | (gen-in-memory-backend))]
23 | (is (nil? (bc/get! ctx "foo")))
24 | (is (= (bc/store! ctx "foo" "hello world" :id 10 :ttl 2) :ok))
25 | (is (= (bc/get! ctx "foo" :ttl 2) "hello world"))
26 | (Thread/sleep 2100)
27 | (is (nil? (bc/get! ctx "foo" :ttl 2)))))
28 |
29 |
30 | (deftest multi-server-normal
31 | (let [ctx (bc/gen-context [6379 6380 6381 6382]
32 | (gen-in-memory-backend))]
33 | (is (nil? (bc/get! ctx "foo")))
34 | (is (= (bc/store! ctx "foo" "hello world" :id 10) :ok))
35 | (is (= (bc/store! ctx "foo" "hello world" :id 9) :stale-write))
36 | (is (= (bc/store! ctx "foo" "hello new world" :id 11) :ok))
37 | (is (= (bc/get! ctx "foo") "hello new world"))))
38 |
39 |
40 | (deftest multi-server-fail-scenario-1
41 | (let [ctx (bc/gen-context [6379 6380 6381 6382]
42 | (gen-in-memory-backend))
43 | {:keys [storage-backend ring]} ctx
44 | key "foo"
45 | replication-factor 4]
46 | (is (nil? (bc/get! ctx key :replication-factor replication-factor)))
47 | (is (= (bc/store! ctx key "hello world"
48 | :id 10 :replication-factor replication-factor)
49 | :ok))
50 | (is (= (bc/store! ctx key "hello world"
51 | :id 9 :replication-factor replication-factor)
52 | :stale-write))
53 |
54 | ;; All but one servers is running
55 | (let [nodes (#'bc/get-servers ring key replication-factor)
56 | nodes-to-shutdown (take (dec replication-factor) (shuffle nodes))]
57 | (doseq [node nodes-to-shutdown]
58 | (shutdown storage-backend node))
59 | (is (= (bc/store! ctx key "hello new world"
60 | :id 11 :replication-factor replication-factor)
61 | :ok))
62 | (is (= (bc/get! ctx key
63 | :replication-factor replication-factor)
64 | "hello new world")))
65 |
66 | ;; All servers are running again
67 | (let [nodes (#'bc/get-servers ring key replication-factor)]
68 | (doseq [node nodes]
69 | (start storage-backend node true))
70 | (is (= (bc/get! ctx key
71 | :replication-factor replication-factor)
72 | "hello new world")))
73 |
74 | ;; No servers are running
75 | (let [nodes (#'bc/get-servers ring key replication-factor)]
76 | (doseq [node nodes]
77 | (shutdown storage-backend node))
78 | (is (nil? (bc/get! ctx key
79 | :replication-factor replication-factor))))))
80 |
81 |
82 | (deftest multi-server-fail-scenario-2
83 | (let [ctx (bc/gen-context [6379 6380 6381 6382]
84 | (gen-in-memory-backend))
85 | {:keys [storage-backend ring]} ctx
86 | key "foo"
87 | replication-factor 4]
88 | (is (nil? (bc/get! ctx key
89 | :replication-factor replication-factor)))
90 | (is (= (bc/store! ctx key "hello world"
91 | :id 10 :replication-factor replication-factor)
92 | :ok))
93 |
94 | ;; All but one servers is running //
95 | (let [nodes (take (dec replication-factor)
96 | (#'bc/get-servers ring key replication-factor))]
97 | (doseq [node nodes]
98 | (shutdown storage-backend node))
99 | (is (= (bc/store! ctx key "hello new world"
100 | :id 11 :replication-factor replication-factor)
101 | :ok))
102 | (is (= (bc/get! ctx key
103 | :replication-factor replication-factor)
104 | "hello new world")))
105 |
106 | ;; All servers are running again except the server running //
107 | ;; previously //
108 | (let [nodes (#'bc/get-servers ring key replication-factor)]
109 | (doseq [node (take (dec replication-factor)
110 | nodes)]
111 | (start storage-backend node true))
112 | (shutdown storage-backend (last nodes))
113 | (is (= (bc/get! ctx key
114 | :replication-factor replication-factor)
115 | ;; This scenario can be avoided by either using ttl //
116 | "hello world")))))
117 |
118 |
119 | (deftest concurrent-writes
120 | (let [ctx (bc/gen-context [6379 6380 6381 6382]
121 | (gen-in-memory-backend))
122 | {:keys [storage-backend ring]} ctx
123 | key "foo"
124 | replication-factor 4]
125 | (is (nil? (bc/get! ctx key
126 | :replication-factor replication-factor)))
127 |
128 | (is (every? #{:stale-write :ok}
129 | (map deref
130 | (map (fn [n]
131 | (future (Thread/sleep (rand-int 100))
132 | (bc/store! ctx key (str "hello world" n)
133 | :id n
134 | :replication-factor replication-factor)))
135 | (range 100)))))
136 |
137 | (is (= (bc/get! ctx key
138 | :replication-factor replication-factor)
139 | (str "hello world" 99)))))
140 |
141 |
142 | (deftest partial-failures-scenario-1
143 | (let [mem (gen-in-memory-backend)
144 | server-list [6379
145 | 6380]
146 | ctx (bc/gen-context [6379
147 | 6380]
148 | mem
149 | (fn [thunk]
150 | (future (thunk)))
151 | ;; Always select first server to fetch data
152 | first)
153 | {:keys [storage-backend ring]} ctx
154 | key "foo"
155 | replication-factor 4]
156 |
157 | (is (nil? (bc/get! ctx "foo")))
158 | (is (= (bc/store! ctx "foo" "hello world" :id 10) :ok))
159 | (is (= (bc/store! ctx "foo" "hello world" :id 9) :stale-write))
160 |
161 | ;; For 6379 id list will succeed but next get request will fails.
162 | (partial-fail mem
163 | 6379
164 | {:get false})
165 | (is (nil? (bc/get! ctx "foo")))
166 |
167 | (partial-fail mem
168 | 6379
169 | {:get true})
170 | (partial-fail mem
171 | 6380
172 | {:get false})
173 | (is (= (bc/get! ctx "foo") "hello world"))))
174 |
--------------------------------------------------------------------------------