├── README
└── jmemstore
├── .gitignore
├── etc
└── header.txt
├── pom.xml
└── src
├── main
└── java
│ └── org
│ └── gridkit
│ └── offheap
│ └── jmemstore
│ ├── BinHash.java
│ ├── BinaryHashTable.java
│ ├── BinaryKeyListStore.java
│ ├── BinaryKeyValueStore.java
│ ├── BinaryStoreManager.java
│ ├── ByteChunk.java
│ ├── IdentityMapping.java
│ ├── IdentityMappingHashTable.java
│ ├── InHeapBinaryStoreManager2.java
│ ├── InHeapMemoryStoreBackend.java
│ ├── MemoryConsumer.java
│ ├── MemoryStoreBackend.java
│ ├── OffHeapBinaryStoreManager2.java
│ ├── OffHeapMemoryStoreBackend.java
│ └── PagedBinaryStoreManager.java
└── test
└── java
└── org
└── gridkit
└── offheap
└── jmemstore
├── BaseBinaryStoreManagerTest.java
├── CRC32SpeedTest.java
├── HashSpeedTest.java
├── IdentityMappingHashTableTest.java
├── MurmurSpeedTest.java
├── PagedMemoryBinaryStoreManager2OffHeapTest.java
├── PagedMemoryBinaryStoreManager2Test.java
├── RandomIdentityMappingTester.java
├── SimpleIdentityMapping.java
└── SplitHashTest.java
/README:
--------------------------------------------------------------------------------
1 | JMemStore - High level storage API for storing binary data out of JVM heap.
--------------------------------------------------------------------------------
/jmemstore/.gitignore:
--------------------------------------------------------------------------------
1 | /.settings
2 | /.classpath
3 | /.project
4 |
--------------------------------------------------------------------------------
/jmemstore/etc/header.txt:
--------------------------------------------------------------------------------
1 | Licensed under the Apache License, Version 2.0 (the "License");
2 | you may not use this file except in compliance with the License.
3 | You may obtain a copy of the License at
4 |
5 | http://www.apache.org/licenses/LICENSE-2.0
6 |
7 | Unless required by applicable law or agreed to in writing, software
8 | distributed under the License is distributed on an "AS IS" BASIS,
9 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | See the License for the specific language governing permissions and
11 | limitations under the License.
--------------------------------------------------------------------------------
/jmemstore/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
19 |
21 |
22 | 4.0.0
23 | org.gridkit.offheap
24 | jmemstore
25 | jar
26 | 0.1-SNAPSHOT
27 |
28 |
29 |
30 |
31 |
32 | 1.6
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 | mc-release
41 | maven-license-plugin repository of releases
42 | http://mc-repo.googlecode.com/svn/maven2/releases
43 |
44 | false
45 |
46 |
47 | true
48 |
49 |
50 |
51 |
52 |
53 |
54 | junit
55 | junit
56 | 4.8.1
57 | test
58 |
59 |
60 | org.slf4j
61 | slf4j-api
62 | 1.6.1
63 |
64 |
65 | org.slf4j
66 | slf4j-jdk14
67 | 1.6.1
68 |
69 |
70 |
71 |
72 |
73 |
74 | org.apache.maven.plugins
75 | maven-compiler-plugin
76 |
77 | ${javaVersion}
78 | ${javaVersion}
79 |
80 |
81 |
82 | org.apache.maven.plugins
83 | maven-resources-plugin
84 | 2.3
85 |
86 | UTF-8
87 |
88 |
89 |
90 |
91 | maven-license-plugin
92 | com.mathieucarbou.mojo
93 |
94 | ${project.parent.basedir}
95 |
96 |
97 | **/src/**/*.java
98 | **/src/**/*.xml
99 | **/pom.xml
100 |
101 |
102 |
103 |
104 | test
105 |
106 | check
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/BinHash.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | /**
20 | * A simple variation of CRC
21 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
22 | */
23 | class BinHash {
24 |
25 | static public int[] CRC32_TABLE = new int[256];
26 |
27 | static
28 | {
29 | for (int i = 0; i < 256; i++)
30 | {
31 | int r = i;
32 | for (int j = 0; j < 8; j++)
33 | if ((r & 1) != 0)
34 | r = (r >>> 1) ^ 0xEDB88320;
35 | else
36 | r >>>= 1;
37 | CRC32_TABLE[i] = r;
38 | }
39 | }
40 |
41 |
42 | public static int hash(int n) {
43 | return appendHash(-1, n);
44 | }
45 |
46 | public static int hash(ByteChunk bytes) {
47 | return appendHash(-1, bytes);
48 | }
49 |
50 | public static int appendHash(int hash, int n) {
51 | hash = CRC32_TABLE[(hash ^ (n & 0xFF)) & 0xFF] ^ (hash >>> 8);
52 | hash = CRC32_TABLE[(hash ^ ((n >> 8) & 0xFF)) & 0xFF] ^ (hash >>> 8);
53 | hash = CRC32_TABLE[(hash ^ ((n >> 16) & 0xFF)) & 0xFF] ^ (hash >>> 8);
54 | hash = CRC32_TABLE[(hash ^ ((n >> 24) & 0xFF)) & 0xFF] ^ (hash >>> 8);
55 | return hash;
56 | }
57 |
58 | public static int appendHash(int hash, ByteChunk bytes) {
59 | for (int i = 0; i < bytes.lenght(); i++) {
60 | hash = CRC32_TABLE[(hash ^ bytes.at(i)) & 0xFF] ^ (hash >>> 8);
61 | }
62 | return hash;
63 | }
64 |
65 | public static int splitHash(int hash, int capacity) {
66 | int round = Integer.highestOneBit(capacity);
67 | int split = capacity & ~round;
68 |
69 | long idx = (0xFFFFFFFFl & hash) % (round);
70 |
71 | if (idx < split) {
72 | idx = (0xFFFFFFFFl & hash) % (round << 1);
73 | }
74 | return (int) idx;
75 | }
76 |
77 | public static int murmur3_fmix(int h) {
78 | h ^= h >> 16;
79 | h *= 0x85ebca6d;
80 | h ^= h >> 13;
81 | h *= 0xc2b2ae35;
82 | h ^= h >> 16;
83 |
84 | return h;
85 | }
86 |
87 | public static int murmur3_mix(byte[] data, int offs, int len, int seed) {
88 |
89 | int h1 = seed;
90 | int c1 = 0xcc9e2d51;
91 | int c2 = 0x1b873593;
92 | int k1;
93 |
94 | int p = offs;
95 | int l = offs + len;
96 | while(p + 3 < l) {
97 | int block = data[p++];
98 | block |= (0xFF & data[p++]) << 8;
99 | block |= (0xFF & data[p++]) << 16;
100 | block |= (0xFF & data[p++]) << 24;
101 |
102 | k1 = block;
103 | k1 *= c1;
104 | k1 = Integer.rotateLeft(k1, 15);
105 | k1 *= c2;
106 |
107 | h1 ^= k1;
108 | h1 = Integer.rotateLeft(h1, 13);
109 | h1 = h1*5 +0xe6546b64;
110 | }
111 |
112 | int block = 0;
113 | switch(l - p) {
114 | case 3: block |= (0xFF & data[p+2]) << 16;
115 | case 2: block |= (0xFF & data[p+1]) << 8;
116 | case 1: block |= (0xFF & data[p]);
117 | }
118 | k1 = block;
119 | k1 *= c1;
120 | k1 = Integer.rotateLeft(k1, 15);
121 | k1 *= c2;
122 | h1 ^= k1;
123 |
124 | return h1;
125 | }
126 |
127 | public static int murmur3_hash(byte[] data, int offs, int len, int seed) {
128 | int h = murmur3_mix(data, offs, len, seed);
129 | h ^= len;
130 | return murmur3_fmix(h);
131 | }
132 | }
133 |
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/BinaryHashTable.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import java.util.ArrayList;
20 | import java.util.Arrays;
21 | import java.util.Iterator;
22 | import java.util.List;
23 | import java.util.NoSuchElementException;
24 | import java.util.concurrent.atomic.AtomicInteger;
25 | import java.util.concurrent.atomic.AtomicIntegerArray;
26 |
27 |
28 | /**
29 | * Class hashtable data structure, using explicit memory manager
30 | *
31 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
32 | */
33 | class BinaryHashTable implements BinaryKeyValueStore, MemoryConsumer {
34 |
35 | private final MemoryStoreBackend pageManager;
36 |
37 | private final int segmentCount;
38 | private int[] masterHashtable;
39 | private AtomicIntegerArray locktable;
40 | private volatile int capacity;
41 |
42 | private AtomicInteger size = new AtomicInteger();
43 | private float targetLoadFactor = 0.8f;
44 |
45 | public BinaryHashTable(MemoryStoreBackend pageManager, int segmentCount) {
46 | this.pageManager = pageManager;
47 | this.segmentCount = segmentCount;
48 | this.masterHashtable = new int[segmentCount];
49 | this.locktable = createLocktable(segmentCount);
50 | while(capacity < segmentCount) {
51 | increamentCapacity();
52 | }
53 | }
54 |
55 | private int hashtableGet(int index) {
56 | int slot = index % segmentCount;
57 | int ix = index / segmentCount;
58 |
59 | int pp = masterHashtable[slot];
60 | int value = pageManager.readInt(pp, ix * 4);
61 | return value;
62 | }
63 |
64 | private void hashtableSet(int index, int value) {
65 | int slot = index % segmentCount;
66 | int ix = index / segmentCount;
67 |
68 | int pp = masterHashtable[slot];
69 | pageManager.writeInt(pp, ix * 4, value);
70 | }
71 |
72 | // lock is assumed
73 | private int increamentCapacity() {
74 | // assumed newCap = capacity + 1
75 |
76 | int slot = capacity % segmentCount;
77 | int oldSegSize = alignSegmentSize(capacity / segmentCount);
78 | int newSegSize = alignSegmentSize(1 + (capacity / segmentCount));
79 | if (oldSegSize != newSegSize) {
80 | resizeSegment(slot, newSegSize);
81 | }
82 | return ++capacity;
83 | }
84 |
85 | private int getSegmentSize(int n) {
86 | int size = (capacity / segmentCount + (n < (capacity % segmentCount) ? 1 : 0));
87 | return alignSegmentSize(size);
88 | }
89 |
90 | private int alignSegmentSize(int cap) {
91 | if (cap == 0) {
92 | return 0;
93 | }
94 | ++cap;
95 | int allignment = 0xF;
96 | if (cap > 256) {
97 | allignment = 0x3F;
98 | }
99 | else if (cap > 1024) {
100 | allignment = 0xFF;
101 | }
102 | else if (cap > 4096) {
103 | allignment = 0x3FF;
104 | }
105 | cap = (cap + allignment) & (~allignment);
106 | --cap; // reserve one slot for memory manager
107 | return cap;
108 | }
109 |
110 | private void resizeSegment(int slot, int newSegSize) {
111 | int opp = masterHashtable[slot];
112 | int npp = pageManager.allocate(newSegSize * 4, PagedBinaryStoreManager.ALLOC_HASH_SEGMENT);
113 | if (opp != PagedBinaryStoreManager.EMPTY) {
114 | ByteChunk oldSeg = pageManager.get(opp);
115 | ByteChunk newSeg = pageManager.get(npp);
116 | newSeg.putBytes(oldSeg);
117 | // not required for in-heap backend
118 | pageManager.update(npp, newSeg);
119 | pageManager.release(opp);
120 | }
121 | masterHashtable[slot] = npp;
122 | }
123 |
124 | // lock assumed
125 | private int[] getEntries(int index) {
126 | int pointer;
127 | pointer = hashtableGet(index);
128 | if (pointer == 0) {
129 | return null;
130 | }
131 | else if (pointer > 0) {
132 | return new int[]{pointer};
133 | }
134 | else {
135 | pointer = -pointer;
136 | ByteChunk chunk = pageManager.get(pointer);
137 | int[] entries = new int[chunk.lenght() / 4 - 1];
138 | for(int i = 0; i != entries.length; ++i) {
139 | entries[i] = chunk.intAt(4 + i * 4);
140 | }
141 | return entries;
142 | }
143 | }
144 |
145 | // lock assumed
146 | private void setEntries(int index, int[] entries) {
147 | int pointer;
148 | pointer = hashtableGet(index);
149 | if (pointer != PagedBinaryStoreManager.EMPTY && pointer < 0) {
150 | pointer = -pointer;
151 | pageManager.release(pointer);
152 | }
153 | if (entries == null || entries.length == 0) {
154 | hashtableSet(index, PagedBinaryStoreManager.EMPTY);
155 | }
156 | else if (entries.length == 1) {
157 | hashtableSet(index, entries[0]);
158 | }
159 | else {
160 | ByteChunk first = pageManager.get(entries[0]);
161 | int hash = first.intAt(0);
162 | int npp = pageManager.allocate(4 + 4 * entries.length, PagedBinaryStoreManager.ALLOC_NEW_LIST);
163 | ByteChunk list = pageManager.get(npp);
164 | try {
165 | list.assertEmpty();
166 | }
167 | catch(AssertionError e) {
168 | System.out.println("Problem pointer is " + pageManager.page(npp) + ":" + pageManager.offset(npp));
169 | throw e;
170 | }
171 | list.putInt(0, hash);
172 | for(int i = 0; i != entries.length; ++i) {
173 | list.putInt(4 + 4 * i, entries[i]);
174 | }
175 | // not required for in-heap backend
176 | pageManager.update(npp, list);
177 | hashtableSet(index, -npp);
178 | }
179 | }
180 |
181 | private void createEntry(int npp, ByteChunk key, ByteChunk value, int hash) {
182 | ByteChunk chunk = pageManager.get(npp);
183 | try {
184 | chunk.assertEmpty();
185 | }
186 | catch(AssertionError e) {
187 | System.out.println("Problem pointer is " + pageManager.page(npp) + ":" + pageManager.offset(npp));
188 | throw e;
189 | }
190 | chunk.putInt(PagedBinaryStoreManager.HASH_POS, hash);
191 | chunk.putInt(PagedBinaryStoreManager.KEY_SIZE_POS, key.lenght());
192 | chunk.putInt(PagedBinaryStoreManager.VALUE_SIZE_POS, value.lenght());
193 | chunk.putBytes(PagedBinaryStoreManager.DATA_POS, key);
194 | chunk.putBytes(PagedBinaryStoreManager.DATA_POS + key.lenght(), value);
195 |
196 | // no need for in-heap storage
197 | pageManager.update(npp, chunk);
198 | }
199 |
200 | private boolean sameKey(ByteChunk entry, ByteChunk key) {
201 | int keySize = entry.intAt(PagedBinaryStoreManager.KEY_SIZE_POS);
202 | if (keySize == key.lenght()) {
203 | for (int i = 0; i != keySize; ++i) {
204 | if (entry.at(PagedBinaryStoreManager.DATA_POS + i) != key.at(i)) {
205 | return false;
206 | }
207 | }
208 | return true;
209 | }
210 | else {
211 | return false;
212 | }
213 | }
214 |
215 | private boolean sameValue(ByteChunk entry, ByteChunk value) {
216 | int keySize = entry.intAt(PagedBinaryStoreManager.KEY_SIZE_POS);
217 | int valueSize = entry.intAt(PagedBinaryStoreManager.VALUE_SIZE_POS);
218 | if (valueSize == value.lenght()) {
219 | // TODO memcpy?
220 | for (int i = 0; i != valueSize; ++i) {
221 | if (entry.at(PagedBinaryStoreManager.DATA_POS + keySize + i) != value.at(i)) {
222 | return false;
223 | }
224 | }
225 | return true;
226 | }
227 | else {
228 | return false;
229 | }
230 | }
231 |
232 | private ByteChunk getKey(ByteChunk entry) {
233 | int keySize = entry.intAt(PagedBinaryStoreManager.KEY_SIZE_POS);
234 | return entry.subChunk(PagedBinaryStoreManager.DATA_POS, keySize);
235 | }
236 |
237 | private ByteChunk getValue(ByteChunk entry) {
238 | int keySize = entry.intAt(PagedBinaryStoreManager.KEY_SIZE_POS);
239 | int valueSize = entry.intAt(PagedBinaryStoreManager.VALUE_SIZE_POS);
240 | return entry.subChunk(PagedBinaryStoreManager.DATA_POS + keySize, valueSize);
241 | }
242 |
243 | @Override
244 | public int size() {
245 | return size.get();
246 | }
247 |
248 | @Override
249 | public ByteChunk get(ByteChunk key) {
250 | // tableLock.readLock().lock();
251 | try {
252 | while(true) {
253 | int hash = BinHash.hash(key);
254 | int index = BinHash.splitHash(hash, capacity);
255 | readLock(index);
256 | try {
257 | if (index != BinHash.splitHash(hash, capacity)) {
258 | continue;
259 | }
260 | int[] entries = getEntries(index);
261 | if (entries != null) {
262 | for(int pp : entries) {
263 | ByteChunk entry = pageManager.get(pp);
264 | if (sameKey(entry, key)) {
265 | return getValue(entry);
266 | }
267 | }
268 | }
269 | return null;
270 | }
271 | finally {
272 | readUnlock(index);
273 | }
274 | }
275 | }
276 | finally {
277 | // tableLock.readLock().unlock();
278 | }
279 | }
280 |
281 | @Override
282 | public void put(ByteChunk key, ByteChunk value) {
283 | boolean inserted = internalPut(key, value, false, null);
284 | if (inserted) {
285 | checkTableSize();
286 | }
287 | }
288 |
289 |
290 |
291 | @Override
292 | public boolean compareAndPut(ByteChunk key, ByteChunk expected, ByteChunk newValue) {
293 | boolean inserted = internalPut(key, newValue, true, expected);
294 | if (inserted && expected == null) {
295 | checkTableSize();
296 | }
297 | return inserted;
298 | }
299 |
300 | private boolean internalPut(ByteChunk key, ByteChunk value, boolean checkOldValue, ByteChunk expected) {
301 |
302 | while(true) {
303 | int hash = BinHash.hash(key);
304 | int index = BinHash.splitHash(hash, capacity);
305 | writeLock(index);
306 | try {
307 | if (index != BinHash.splitHash(hash, capacity)) {
308 | continue;
309 | }
310 |
311 | int[] entries = getEntries(index);
312 |
313 | if (entries != null) {
314 | for(int i = 0; i != entries.length; ++i) {
315 | int pp = entries[i];
316 | ByteChunk entry = pageManager.get(pp);
317 | if (sameKey(entry, key)) {
318 |
319 | if (checkOldValue) {
320 | if (expected == null && !sameValue(entry, expected)) {
321 | return false;
322 | }
323 | }
324 |
325 | // overriding value
326 | pageManager.release(pp);
327 | int npp = pageManager.allocate(PagedBinaryStoreManager.DATA_POS + key.lenght() + value.lenght(), PagedBinaryStoreManager.ALLOC_NEW_VALUE);
328 | createEntry(npp, key, value, hash);
329 | entries[i] = npp;
330 | setEntries(index, entries);
331 | return checkOldValue ? true : false;
332 | }
333 | }
334 | }
335 |
336 | // TODO refactoring, move allocation to createEntry method
337 | if (checkOldValue) {
338 | if (expected != null) {
339 | return false;
340 | }
341 | }
342 |
343 | // add new entry
344 | int npp = pageManager.allocate(PagedBinaryStoreManager.DATA_POS + key.lenght() + value.lenght(), PagedBinaryStoreManager.ALLOC_NEW_VALUE);
345 | createEntry(npp, key, value, hash);
346 |
347 | int[] newEntries;
348 | if (entries == null || entries.length == 0) {
349 | newEntries = new int[]{npp};
350 | }
351 | else {
352 | newEntries = Arrays.copyOf(entries, entries.length + 1);
353 | newEntries[entries.length] = npp;
354 | }
355 |
356 | setEntries(index, newEntries);
357 | size.incrementAndGet();
358 | return true;
359 | }
360 | finally {
361 | writeUnlock(index);
362 | }
363 | }
364 | }
365 |
366 | @Override
367 | public void remove(ByteChunk key) {
368 | internalRemove(key, null);
369 | }
370 |
371 |
372 | @Override
373 | public boolean compareAndRemove(ByteChunk key, ByteChunk expected) {
374 | if (expected != null) {
375 | return internalRemove(key, expected);
376 | }
377 | else {
378 | return false;
379 | }
380 | }
381 |
382 | private boolean internalRemove(ByteChunk key, ByteChunk expected) {
383 | while(true) {
384 | int hash = BinHash.hash(key);
385 | int index = BinHash.splitHash(hash, capacity);
386 | writeLock(index);
387 | try {
388 | if (index != BinHash.splitHash(hash, capacity)) {
389 | continue;
390 | }
391 |
392 | int[] entries = getEntries(index);
393 |
394 | if (entries != null) {
395 | for(int pp : entries) {
396 | ByteChunk entry = pageManager.get(pp);
397 | if (sameKey(entry, key)) {
398 |
399 | if (expected != null) {
400 | if (!sameValue(entry, expected)) {
401 | return false;
402 | }
403 | }
404 |
405 | pageManager.release(pp);
406 | if (entries.length == 1) {
407 | setEntries(index, null);
408 | }
409 | else {
410 | int[] newEntries = new int[entries.length - 1];
411 | int n = 0;
412 | for(int pi : entries) {
413 | if (pi != pp) {
414 | newEntries[n++] = pi;
415 | }
416 | }
417 | setEntries(index, newEntries);
418 | }
419 | size.decrementAndGet();
420 | return true;
421 | }
422 | }
423 | }
424 | // not found
425 | return false;
426 | }
427 | finally {
428 | writeUnlock(index);
429 | }
430 | }
431 | }
432 |
433 | @Override
434 | public Iterator keys() {
435 | return new HashIterator();
436 | }
437 |
438 | @Override
439 | public void clear() {
440 | clear(true);
441 | }
442 |
443 | @Override
444 | public void destroy() {
445 | clear(false);
446 | }
447 |
448 | // TODO check clear method
449 | void clear(boolean reinit) {
450 | // lock everything
451 | for(int i = 0; i != segmentCount; ++i) {
452 | segmentWriteLock(i);
453 | }
454 | int[] empty = new int[0];
455 | for(int i = 0; i != capacity; ++i) {
456 | int[] entries = getEntries(i);
457 | if (entries != null) {
458 | for(int pp : entries) {
459 | pageManager.release(pp);
460 | }
461 | setEntries(i, empty);
462 | }
463 | }
464 | capacity = 0;
465 | size.set(0);
466 |
467 | for(int i = 0; i != segmentCount; ++i) {
468 | int pp = masterHashtable[i];
469 | if (pp != PagedBinaryStoreManager.EMPTY) {
470 | pageManager.release(pp);
471 | masterHashtable[i] = 0;
472 | }
473 | }
474 |
475 | if (reinit) {
476 | while(capacity < segmentCount) {
477 | increamentCapacity();
478 | }
479 | }
480 |
481 | // unlock, unlock order does not matter
482 | for(int i = 0; i != segmentCount; ++i) {
483 | segmentWriteUnlock(i);
484 | }
485 | }
486 |
487 | public int getTableCapacity() {
488 | return capacity;
489 | }
490 |
491 | public double getTargetLoadFactor() {
492 | return targetLoadFactor;
493 | }
494 |
495 | public int getTableGapNumber() {
496 | int n = 0;
497 | for(int i = 0; i < capacity; ++i) {
498 | readLock(i);
499 | try {
500 | if (hashtableGet(i) == 0) {
501 | ++n;
502 | }
503 | }
504 | finally{
505 | readUnlock(i);
506 | }
507 | }
508 | return n;
509 | }
510 |
511 | public void recycleHashes(int[] evacuationHashes, int hashCount) {
512 | for (int i = 0; i != hashCount; ++i) {
513 | int hash = evacuationHashes[i];
514 | recycleHash(hash);
515 | }
516 | recycleHashtable();
517 | }
518 |
519 | // TODO slow pace recycling
520 | void recycleHashtable() {
521 | for(int i = 0; i != segmentCount; ++i) {
522 | int pp = masterHashtable[i];
523 | if (needRecycle(pp)) {
524 | segmentWriteLock(i);
525 | try {
526 | pp = masterHashtable[i];
527 | if (needRecycle(pp)) {
528 | int segSize = getSegmentSize(i);
529 | // System.out.println("Recycling hash segment " + pageManager.page(pp) + ":" + pageManager.offset(pp));
530 | resizeSegment(i, segSize);
531 | }
532 | }
533 | finally {
534 | segmentWriteUnlock(i);
535 | }
536 | }
537 | }
538 | }
539 |
540 | // tableLock assumed
541 | void recycleHash(int hash) {
542 | while(true) {
543 | int index = BinHash.splitHash(hash, capacity);
544 | writeLock(index);
545 | try {
546 | if (BinHash.splitHash(hash, capacity) != index) {
547 | // capacity has been updated
548 | // need to recalculate index
549 | continue;
550 | }
551 |
552 | int[] entries = getEntries(index);
553 |
554 | if (entries != null && entries.length > 0) {
555 | boolean modified = false;
556 | for(int i = 0; i != entries.length; ++i) {
557 | int pp = entries[i];
558 | if (needRecycle(pp)) {
559 | ByteChunk chunk = pageManager.get(pp);
560 | int npp = pageManager.allocate(chunk.lenght(), PagedBinaryStoreManager.ALLOC_RELOCATE_VALUE);
561 | ByteChunk newChunk = pageManager.get(npp);
562 | newChunk.putBytes(chunk);
563 | pageManager.release(pp);
564 | // not required for in-heap storage
565 | pageManager.update(npp, newChunk);
566 | entries[i] = npp;
567 | modified = true;
568 | }
569 | }
570 |
571 | if (!modified) {
572 | int pe = hashtableGet(index);
573 | pe = pe > 0 ? pe : -pe;
574 | if (needRecycle(pe)) {
575 | modified = true;
576 | }
577 | }
578 |
579 | if (modified) {
580 | setEntries(index, entries);
581 | }
582 | }
583 | }
584 | finally {
585 | writeUnlock(index);
586 | }
587 | break;
588 | }
589 | }
590 |
591 | private boolean needRecycle(int pointer) {
592 | return pointer != PagedBinaryStoreManager.EMPTY && pageManager.isMarkedForRecycle(pointer);
593 | }
594 |
595 | private void checkTableSize() {
596 | float loadFactor = ((float)size.get()) / capacity;
597 | if (loadFactor > targetLoadFactor) {
598 | // grow by 1.5
599 | if (capacity % 2 == 0) {
600 | growTable(2);
601 | }
602 | else {
603 | growTable(1);
604 | }
605 | }
606 | }
607 |
608 | private void growTable(int n) {
609 | // tableLock.readLock().lock();
610 | try {
611 | for(int i = 0; i != n; ++i) {
612 | int nRound = Integer.highestOneBit(capacity);
613 | int nLast = capacity;
614 | int nSplit = (nLast) & ~nRound;
615 | writeDoubleLock(nSplit, nLast);
616 | // checkHashConsistency();
617 | try {
618 | if (capacity == nLast) {
619 | int originalCapacity = capacity;
620 | // writeLock(nLast) ensures what we are holding lock for capacity
621 | increamentCapacity(); // capacity increased to capacity + 1
622 |
623 | int[] entries = getEntries(nSplit);
624 | if (entries != null) {
625 | int n1 = 0;
626 | int[] el1 = new int[entries.length];
627 | int n2 = 0;
628 | int[] el2 = new int[entries.length];
629 |
630 | for(int pp: entries) {
631 | ByteChunk chunk = pageManager.get(pp);
632 | int hash = chunk.intAt(PagedBinaryStoreManager.HASH_POS);
633 | int index = BinHash.splitHash(hash, nLast + 1);
634 | if (index == nSplit) {
635 | el1[n1++] = pp;
636 | }
637 | else if (index == nLast) {
638 | el2[n2++] = pp;
639 | }
640 | else {
641 | System.err.println("[[ Problem in 'growTable' - Thread:" + Thread.currentThread().toString());
642 | System.err.println("New index of hash " + Integer.toHexString(hash) +" is " + index + ", expected values eigther " + nSplit + " or " + nLast);
643 | System.err.println("Original capacity: " + originalCapacity + " hash index " + BinHash.splitHash(hash, originalCapacity));
644 | System.err.println("Current capacity: " + capacity + " hash index " + Integer.toHexString(hash));
645 | System.err.println("]]");
646 | throw new AssertionError("New index of hash " + Integer.toHexString(hash) + " is " + index + ", expected values eigther " + nSplit + " or " + nLast);
647 | }
648 | }
649 | el1 = Arrays.copyOf(el1, n1);
650 | el2 = Arrays.copyOf(el2, n2);
651 |
652 | setEntries(nSplit, el1);
653 | setEntries(nLast, el2);
654 | }
655 | }
656 | }
657 | finally {
658 | // checkHashConsistency();
659 | writeDoubleUnlock(nSplit, nLast);
660 | }
661 | }
662 | }
663 | finally {
664 | // tableLock.readLock().unlock();
665 | }
666 | }
667 |
668 | @SuppressWarnings("unused") // for testing
669 | private void checkHashConsistency() {
670 | // tableLock.readLock().lock();
671 | try {
672 | for(int i = 0; i != capacity; ++i) {
673 | int[] entries = getEntries(i);
674 | if (entries != null) {
675 | for(int pp : entries) {
676 | ByteChunk entry = pageManager.get(pp);
677 | int hash = entry.intAt(PagedBinaryStoreManager.HASH_POS);
678 | if (BinHash.splitHash(hash, capacity) != i) {
679 | throw new AssertionError();
680 | }
681 | }
682 | }
683 | }
684 | }
685 | finally {
686 | // tableLock.readLock().unlock();
687 | }
688 | }
689 |
690 | // private int hashIndex(ByteChunk key, int capacity) {
691 | // int hash = BinHash.hash(key);
692 | // return PagedBinaryStoreManager.splitHash(hash, capacity);
693 | // }
694 |
695 | private AtomicIntegerArray createLocktable(int size) {
696 | AtomicIntegerArray table = new AtomicIntegerArray(size / 4); // 8 bits per lock
697 | return table;
698 | }
699 |
700 | private void readLock(int index) {
701 | int seg = index % segmentCount;
702 | segmentReadLock(seg);
703 | }
704 |
705 | private void readUnlock(int index) {
706 | int seg = index % segmentCount;
707 | segmentReadUnlock(seg);
708 | }
709 |
710 | private void segmentReadLock(int index) {
711 | int n = 0;
712 | while(true) {
713 | byte c = byte_get(locktable, index);
714 | if (c >= 0 && c < 126) {
715 | byte u = (byte) (c + 1) ;
716 | if (byte_compareAndSet(locktable, index, c, u)) {
717 | return;
718 | }
719 | }
720 | ++n;
721 | if (n % 10 == 0) {
722 | Thread.yield();
723 | }
724 | }
725 | }
726 |
727 | private void segmentReadUnlock(int index) {
728 | int n = 0;
729 | while(true) {
730 | byte c = byte_get(locktable, index);
731 | if (c > 0) {
732 | byte u = (byte) (c - 1) ;
733 | if (byte_compareAndSet(locktable, index, c, u)) {
734 | return;
735 | }
736 | }
737 | else if (c < 0) {
738 | byte u = (byte) (c + 1);
739 | if (byte_compareAndSet(locktable, index, c, u)) {
740 | return;
741 | }
742 | }
743 | else {
744 | throw new IllegalStateException("Invalid lock state");
745 | }
746 | ++n;
747 | if (n % 10 == 0) {
748 | Thread.yield();
749 | }
750 | }
751 | }
752 |
753 |
754 | private void writeLock(int index) {
755 | int segment = index % segmentCount;
756 | segmentWriteLock(segment);
757 | }
758 |
759 | private void writeUnlock(int index) {
760 | int segment = index % segmentCount;
761 | segmentWriteUnlock(segment);
762 | }
763 |
764 | private void writeDoubleLock(int index1, int index2) {
765 | int seg1 = index1 % segmentCount;
766 | int seg2 = index2 % segmentCount;
767 |
768 | if (seg1 > seg2) {
769 | int t = seg1;
770 | seg1 = seg2;
771 | seg2 = t;
772 | }
773 |
774 | segmentWriteLock(seg1);
775 | if (seg1 != seg2) {
776 | segmentWriteLock(seg2);
777 | }
778 | }
779 |
780 | private void writeDoubleUnlock(int index1, int index2) {
781 | int seg1 = index1 % segmentCount;
782 | int seg2 = index2 % segmentCount;
783 |
784 | if (seg1 > seg2) {
785 | int t = seg1;
786 | seg1 = seg2;
787 | seg2 = t;
788 | }
789 |
790 | if (seg1 != seg2) {
791 | segmentWriteUnlock(seg2);
792 | }
793 | segmentWriteUnlock(seg1);
794 | }
795 |
796 | private void segmentWriteLock(int index) {
797 | int n = 0;
798 | while(true) {
799 | byte c = byte_get(locktable, index);
800 | if (c == 0) {
801 | byte u = (byte) -1;
802 | if (byte_compareAndSet(locktable, index, c, u)) {
803 | return;
804 | }
805 | }
806 | else if (c < 0) {
807 | // another writer is pending
808 | }
809 | else if (c > 0){
810 | byte u = (byte) (-c - 1);
811 | if (byte_compareAndSet(locktable, index, c, u)) {
812 | break;
813 | }
814 | }
815 | ++n;
816 | if (n % 10 == 0) {
817 | Thread.yield();
818 | }
819 | }
820 | // waiting read locks to get released
821 | while(true) {
822 | byte c = byte_get(locktable, index);
823 | if (c == -1) {
824 | return;
825 | }
826 |
827 | ++n;
828 | if (n % 10 == 0) {
829 | Thread.yield();
830 | }
831 | }
832 | }
833 |
834 | private void segmentWriteUnlock(int index) {
835 | int n = 0;
836 | while(true) {
837 | byte c = byte_get(locktable, index);
838 | if (c == -1) {
839 | byte u = (byte) 0;
840 | if (byte_compareAndSet(locktable, index, c, u)) {
841 | return;
842 | }
843 | }
844 | else {
845 | throw new IllegalStateException("Broken lock");
846 | }
847 | ++n;
848 | if (n % 10 == 0) {
849 | Thread.yield();
850 | }
851 | }
852 | }
853 |
854 | private byte byte_get(AtomicIntegerArray table, int index) {
855 | int x = index / 4;
856 | int xx = index % 4;
857 | int word = table.get(x);
858 | return getByte(word, xx);
859 | }
860 |
861 | private boolean byte_compareAndSet(AtomicIntegerArray table, int index, byte expected, byte newValue) {
862 | int x = index / 4;
863 | int xx = index % 4;
864 |
865 | while(true) {
866 | int word = table.get(x);
867 | byte val = getByte(word, xx);
868 | if (val == expected) {
869 | int newWord = setByte(word, xx, newValue);
870 | if (table.compareAndSet(x, word, newWord)) {
871 | return true;
872 | }
873 | else {
874 | continue;
875 | }
876 | }
877 | else {
878 | return false;
879 | }
880 | }
881 | }
882 |
883 | private byte getByte(int word, int i) {
884 | switch(i) {
885 | case 0:
886 | return (byte) (0xFF & word);
887 | case 1:
888 | return (byte) (0xFF & (word >> 8));
889 | case 2:
890 | return (byte) (0xFF & (word >> 16));
891 | case 3:
892 | return (byte) (0xFF & (word >> 24));
893 | default:
894 | throw new IllegalArgumentException("4 bytes per int");
895 | }
896 | }
897 |
898 | private int setByte(int word,int i, byte value) {
899 | switch(i) {
900 | case 0:
901 | word &= 0xFFFFFF00;
902 | word |= 0xFF & (int)value;
903 | return word;
904 | case 1:
905 | word &= 0xFFFF00FF;
906 | word |= (0xFF & (int)value) << 8;
907 | return word;
908 | case 2:
909 | word &= 0xFF00FFFF;
910 | word |= (0xFF & (int)value) << 16;
911 | return word;
912 | case 3:
913 | word &= 0x00FFFFFF;
914 | word |= (0xFF & (int)value) << 24;
915 | return word;
916 | default:
917 | throw new IllegalArgumentException("4 bytes per int");
918 | }
919 | }
920 |
921 | private class HashIterator implements Iterator {
922 |
923 | private int position = 0;
924 | private final List buffer = new ArrayList();
925 |
926 | public HashIterator() {
927 | feed();
928 | }
929 |
930 | private void feed() {
931 | readLock(position);
932 | try {
933 | int[] entries = getEntries(position);
934 | if (entries != null) {
935 | for(int pp : entries) {
936 | ByteChunk entry = pageManager.get(pp);
937 | buffer.add(getKey(entry));
938 | }
939 | }
940 | }
941 | finally{
942 | readUnlock(position);
943 | }
944 | }
945 |
946 | @Override
947 | public boolean hasNext() {
948 | while(buffer.isEmpty()) {
949 | ++position;
950 | if (position >= capacity) {
951 | return false;
952 | }
953 | else {
954 | feed();
955 | }
956 | }
957 | return true;
958 | }
959 |
960 | @Override
961 | public ByteChunk next() {
962 | if (hasNext()) {
963 | return buffer.remove(0);
964 | }
965 | else {
966 | throw new NoSuchElementException();
967 | }
968 | }
969 |
970 | @Override
971 | public void remove() {
972 | throw new UnsupportedOperationException();
973 | }
974 | }
975 | }
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/BinaryKeyListStore.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import java.util.Iterator;
20 |
21 | /**
22 | * Key/List store is a store dedicated for inverted indexes.
23 | * Logically this a multimap, with values sorted within each key for paging retrieval.
24 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
25 | */
26 | public interface BinaryKeyListStore {
27 |
28 | /**
29 | * @return true
is combination of key/value is present
30 | */
31 | public boolean contains(ByteChunk key, ByteChunk value);
32 |
33 | /**
34 | * Returns values associated with key. Returned values are optionally limited by range. Values are returned via provided array.
35 | * @param key
36 | * @param lowerBound lower bound for value range being returned (exclusive), null
- unlimited
37 | * @param upperBound upper bound for value range being returned (exclusive), null
- unlimited
38 | * @param values buffer for values being returned
39 | * @return number of objects returned via values
40 | */
41 | public int fetch(ByteChunk key, ByteChunk lowerBound, ByteChunk upperBound, ByteChunk[] values);
42 |
43 | /**
44 | * @return number of value associated with key
45 | */
46 | public int cordinality(ByteChunk key);
47 |
48 | public void append(ByteChunk key, ByteChunk value);
49 |
50 | public void remove(ByteChunk key);
51 |
52 | public void remove(ByteChunk key, ByteChunk value);
53 |
54 | public Iterator keys();
55 |
56 | public int size();
57 |
58 | public void clear();
59 |
60 | }
61 |
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/BinaryKeyValueStore.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import java.util.Iterator;
20 |
21 | /**
22 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
23 | */
24 | public interface BinaryKeyValueStore {
25 |
26 | public ByteChunk get(ByteChunk key);
27 |
28 | public void put(ByteChunk key, ByteChunk value);
29 |
30 | public boolean compareAndPut(ByteChunk key, ByteChunk expected, ByteChunk newValue);
31 |
32 | public void remove(ByteChunk key);
33 |
34 | public boolean compareAndRemove(ByteChunk key, ByteChunk expected);
35 |
36 | public Iterator keys();
37 |
38 | public int size();
39 |
40 | public void clear();
41 |
42 | }
43 |
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/BinaryStoreManager.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | /**
20 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
21 | */
22 | public interface BinaryStoreManager {
23 |
24 | public BinaryKeyValueStore createKeyValueStore();
25 |
26 | public BinaryKeyValueStore createKeyValueStore(int segments);
27 |
28 | public IdentityMapping createIdentityMapping();
29 |
30 | public void destroy(MemoryConsumer store);
31 |
32 | public void close();
33 |
34 |
35 | }
36 |
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/ByteChunk.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import java.nio.ByteBuffer;
20 |
21 | /**
22 | * A kind of {@link ByteBuffer}, byte buffer itself is a bit too complicated
23 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
24 | *
25 | */
26 | public final class ByteChunk {
27 |
28 | private final byte[] bytes;
29 | private final int offset;
30 | private final int len;
31 |
32 | public ByteChunk(byte[] bytes) {
33 | this.bytes = bytes;
34 | this.offset = 0;
35 | this.len = bytes.length;
36 | }
37 |
38 | public ByteChunk(byte[] bytes, int offset, int len) {
39 | this.bytes = bytes;
40 | this.offset = offset;
41 | this.len = len;
42 | }
43 |
44 | public byte[] array() {
45 | return bytes;
46 | }
47 |
48 | public int offset() {
49 | return offset;
50 | }
51 |
52 | public int lenght() {
53 | return len;
54 | }
55 |
56 | public byte at(int i) {
57 | return bytes[offset + i];
58 | }
59 |
60 | public void set(int i, byte b) {
61 | bytes[offset + i] = b;
62 | }
63 |
64 | public ByteChunk subChunk(int offs, int len) {
65 | if (offs + len > this.len) {
66 | throw new IllegalArgumentException("Chunk " + bytes + " offs: " + offset + " len: " + this.len + ". Required subrange " + offs + " by " + len + "(" + Integer.toHexString(len) + ")");
67 | }
68 | return new ByteChunk(bytes, offset + offs, len);
69 | }
70 |
71 | public int intAt(int offs) {
72 | if (offs + 4 > len) {
73 | throw new IllegalArgumentException("Chunk " + bytes + " offs: " + offset + " len: " + len + ". Required subrange " + offs + " by " + 4);
74 | }
75 | // internal byte order - little endian
76 | int value = (0xFF & bytes[offset + offs]) << 24
77 | | (0xFF & bytes[offset + offs + 1]) << 16
78 | | (0xFF & bytes[offset + offs + 2]) << 8
79 | | (0xFF & bytes[offset + offs + 3]);
80 | return value;
81 | }
82 |
83 | public void putInt(int offs, int val) {
84 | if (offs + 4 > len) {
85 | throw new IllegalArgumentException("Out of bounds");
86 | }
87 | // internal byte order - little endian
88 | bytes[offset + offs] = (byte) (val >> 24);
89 | bytes[offset + offs + 1] = (byte) (val >> 16);
90 | bytes[offset + offs + 2] = (byte) (val >> 8);
91 | bytes[offset + offs + 3] = (byte) val;
92 |
93 | }
94 |
95 | public long longAt(int offs) {
96 | if (offs + 8 > len) {
97 | throw new IllegalArgumentException("Out of bounds");
98 | }
99 | // internal byte order - little endian
100 | long value = (0xFFl & bytes[offset + offs]) << 56
101 | | (0xFFl & bytes[offset + offs + 1]) << 48
102 | | (0xFFl & bytes[offset + offs + 2]) << 40
103 | | (0xFFl & bytes[offset + offs + 3]) << 32
104 | | (0xFFl & bytes[offset + offs + 4]) << 24
105 | | (0xFFl & bytes[offset + offs + 5]) << 16
106 | | (0xFFl & bytes[offset + offs + 6]) << 8
107 | | (0xFFl & bytes[offset + offs + 7]);
108 | return value;
109 | }
110 |
111 | public void putLong(int offs, long val) {
112 | if (offs + 8 > len) {
113 | throw new IllegalArgumentException("Out of bounds");
114 | }
115 | // internal byte order - little endian
116 | bytes[offset + offs] = (byte) (val >> 56);
117 | bytes[offset + offs + 1] = (byte) (val >> 48);
118 | bytes[offset + offs + 2] = (byte) (val >> 40);
119 | bytes[offset + offs + 3] = (byte) (val >> 32);
120 | bytes[offset + offs + 4] = (byte) (val >> 24);
121 | bytes[offset + offs + 5] = (byte) (val >> 16);
122 | bytes[offset + offs + 6] = (byte) (val >> 8);
123 | bytes[offset + offs + 7] = (byte) val;
124 |
125 | }
126 |
127 | public void putBytes(ByteChunk bytes) {
128 | if (bytes.len > len) {
129 | throw new IllegalArgumentException("Out of bounds");
130 | }
131 | for(int i = 0; i != bytes.len; ++i) {
132 | if (this.bytes[offset + i] != 0) {
133 | throw new AssertionError("Chunk " + bytes + " offs: " + offset + " len: " + len + ". Dirty data for putBytes. Params " + 0 + " by " + bytes.len);
134 | }
135 | }
136 | System.arraycopy(bytes.bytes, bytes.offset, this.bytes, offset, bytes.len);
137 | }
138 |
139 | public void putBytes(int offs, ByteChunk bytes) {
140 | if (offs + bytes.len > len) {
141 | throw new IllegalArgumentException("Out of bounds");
142 | }
143 | for(int i = 0; i != bytes.len; ++i) {
144 | if (this.bytes[offset + offs + i] != 0) {
145 | throw new AssertionError("Chunk " + bytes + " offs: " + offset + " len: " + len + ". Dirty data for putBytes. Params " + offs + " by " + bytes.len);
146 | }
147 | }
148 | System.arraycopy(bytes.bytes, bytes.offset, this.bytes, offset + offs, bytes.len);
149 | }
150 |
151 | public boolean sameBytes(ByteChunk that) {
152 | if (len != that.len) {
153 | return false;
154 | }
155 | else {
156 | for(int i = 0; i != len; ++i) {
157 | if (at(i) != that.at(i)) {
158 | return false;
159 | }
160 | }
161 | }
162 | return true;
163 | }
164 |
165 | @Override
166 | public String toString() {
167 | StringBuilder buf = new StringBuilder();
168 | buf.append('[');
169 | for(int i = 0; i != len; ++i) {
170 | byte val = at(i);
171 | if (i > 0 && i % 4 == 0) {
172 | buf.append(".");
173 | }
174 | buf.append(Integer.toHexString((val >> 4) & 0xF)).append(Integer.toHexString(val & 0xF));
175 | if (i > 126) {
176 | buf.append("...");
177 | break;
178 | }
179 | }
180 | buf.append(']');
181 | return buf.toString();
182 | }
183 |
184 | public void assertEmpty() {
185 | for(int i = 0; i != len; ++i) {
186 | if (bytes[offset + i] != 0) {
187 | throw new AssertionError("Not empty " + this.toString());
188 | }
189 | }
190 | }
191 | }
192 |
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/IdentityMapping.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | /**
20 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
21 | */
22 | public interface IdentityMapping {
23 |
24 | public static final int UNMAPPED = -1;
25 |
26 | public int map(ByteChunk chunk);
27 |
28 | public int getIdByChunk(ByteChunk chunk);
29 |
30 | public ByteChunk getChunkById(int id);
31 |
32 | public void unmap(ByteChunk chunk);
33 |
34 | public void unmap(int id);
35 |
36 | public int size();
37 |
38 | public void clear();
39 |
40 | }
41 |
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/IdentityMappingHashTable.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import java.util.Arrays;
20 | import java.util.concurrent.atomic.AtomicInteger;
21 | import java.util.concurrent.atomic.AtomicIntegerArray;
22 |
23 |
24 | /**
25 | * Class hashtable data structure, using explicit memory manager
26 | *
27 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
28 | */
29 | class IdentityMappingHashTable implements IdentityMapping, MemoryConsumer {
30 |
31 | private static final int[] NO_ENTRIES = new int[0];
32 |
33 | // Entry structure
34 | // | -- -- -- -- | -- -- -- -- | -- -- -- -- | -- -- -- -- | -- ... -- |
35 | // | hash | key size | id | ref count | key data |
36 |
37 | private final int POS_HASH = 0;
38 | private final int POS_KEY_SIZE = 4;
39 | private final int POS_ID = 8;
40 | private final int POS_REF_COUNT = 16;
41 | private final int POS_KEY_DATA = 20;
42 |
43 | static int ALLOC_NEW_ENTRY = 0;
44 | static int ALLOC_NEW_LIST = 0;
45 | static int ALLOC_RELOCATE_VALUE = 0;
46 | static int ALLOC_HASH_SEGMENT = 1;
47 |
48 | private final MemoryStoreBackend pageManager;
49 |
50 | private final int segmentCount;
51 | private int[] masterHashtable;
52 | private AtomicIntegerArray locktable;
53 | private volatile int capacity;
54 |
55 | private AtomicInteger size = new AtomicInteger();
56 | private float targetLoadFactor = 0.8f;
57 |
58 | private Object idLock = new String("idLock");
59 | private int upperBound;
60 | private int[] freeList = new int[1024];
61 | private int freeListL = 0;
62 | private int freeListR = 0;
63 | private int sweepPos;
64 |
65 | public IdentityMappingHashTable(MemoryStoreBackend pageManager, int segmentCount) {
66 | this.pageManager = pageManager;
67 | this.segmentCount = segmentCount;
68 | this.masterHashtable = new int[segmentCount];
69 | this.locktable = createLocktable(segmentCount);
70 | while(capacity < segmentCount) {
71 | increamentCapacity();
72 | }
73 | }
74 |
75 | private int allocateId() {
76 | synchronized(idLock) {
77 | while(true) {
78 | if (freeListL != freeListR) {
79 | int id = freeList[freeListL];
80 | freeList[freeListL] = UNMAPPED;
81 | freeListL = (freeListL + 1) % freeList.length;
82 | if (id == UNMAPPED) {
83 | throw new AssertionError();
84 | }
85 | return id;
86 | }
87 | else {
88 | if (upperBound > (Integer.MAX_VALUE >> 1)) {
89 | scanForGaps(256);
90 | continue;
91 | }
92 | if ((size.get() / 2) < upperBound) {
93 | scanForGaps(8);
94 | if (freeListSize() > 0) {
95 | continue;
96 | }
97 | else {
98 | int id = upperBound;
99 | ++upperBound;
100 | return id;
101 | }
102 | }
103 | else {
104 | int id = upperBound;
105 | ++upperBound;
106 | return id;
107 | }
108 | }
109 | }
110 | }
111 | }
112 |
113 | private void scanForGaps(int n) {
114 | scan_loop:
115 | while(true) {
116 | if (sweepPos >= upperBound || freeListSize() == (freeList.length - 1)) {
117 | sweepPos = 0;
118 | return;
119 | }
120 | int id = sweepPos;
121 | int nhash = BinHash.hash(id);
122 | sweepPos++;
123 | n++;
124 | int nindex = BinHash.splitHash(nhash, capacity);
125 | readLock(nindex);
126 | try {
127 | if (nindex != BinHash.splitHash(nhash, capacity)) {
128 | continue; // this will skip id check, but it should be ok
129 | }
130 |
131 | int[] entries = getEntries(nindex);
132 | if (entries != null) {
133 | for (int pp : entries) {
134 | if (getId(pp) == id) {
135 | continue scan_loop;
136 | }
137 | }
138 | }
139 |
140 | // no such id in table
141 | addToFreeList(id);
142 | }
143 | finally {
144 | readUnlock(nindex);
145 | }
146 | }
147 | }
148 |
149 | private void addToFreeList(int id) {
150 | freeList[freeListR] = id;
151 | freeListR = (freeListR + 1) % freeList.length;
152 | if (freeListL == freeListR) {
153 | throw new AssertionError();
154 | }
155 | }
156 |
157 | private int freeListSize() {
158 | int sz = freeListR - freeListL;
159 | if (sz < 0) {
160 | sz += freeList.length;
161 | }
162 | return sz;
163 | }
164 |
165 | @Override
166 | public int getIdByChunk(ByteChunk key) {
167 | while(true) {
168 | int hash = BinHash.hash(key);
169 | int index = BinHash.splitHash(hash, capacity);
170 | readLock(index);
171 | try {
172 | if (index != BinHash.splitHash(hash, capacity)) {
173 | continue;
174 | }
175 | int[] entries = getEntries(index);
176 | if (entries != null) {
177 | for(int pp : entries) {
178 | ByteChunk entry = pageManager.get(pp);
179 | if (sameKey(entry, key)) {
180 | return getId(entry);
181 | }
182 | }
183 | }
184 | return UNMAPPED;
185 | }
186 | finally {
187 | readUnlock(index);
188 | }
189 | }
190 | }
191 |
192 | @Override
193 | public ByteChunk getChunkById(int id) {
194 | while(true) {
195 | int hash = BinHash.hash(id);
196 | int index = BinHash.splitHash(hash, capacity);
197 | readLock(index);
198 | try {
199 | if (index != BinHash.splitHash(hash, capacity)) {
200 | continue;
201 | }
202 | int[] entries = getEntries(index);
203 | if (entries != null) {
204 | for(int pp : entries) {
205 | ByteChunk entry = pageManager.get(pp);
206 | int entryId = getId(entry);
207 | if (entryId == id) {
208 | return getKeyData(entry);
209 | }
210 | }
211 | }
212 | return null;
213 | }
214 | finally {
215 | readUnlock(index);
216 | }
217 | }
218 | }
219 |
220 | @Override
221 | public int map(ByteChunk key) {
222 | // Step I. Try increment ref counter on existing entry
223 | int hash = BinHash.hash(key);
224 | while(true) {
225 | int index = BinHash.splitHash(hash, capacity);
226 | writeLock(index);
227 | try {
228 | if (index != BinHash.splitHash(hash, capacity)) {
229 | // table has been resized, try again
230 | continue;
231 | }
232 | int id = incRefCount(index, key);
233 | if (id != UNMAPPED) {
234 | return id;
235 | }
236 | else {
237 | break;
238 | }
239 | }
240 | finally {
241 | writeUnlock(index);
242 | }
243 | }
244 | // Step II. Create new entry and assign new ID.
245 | try {
246 | while(true) {
247 | int newId = allocateId();
248 | int idHash = BinHash.hash(newId);
249 | int hIndex = BinHash.splitHash(hash, capacity);
250 | int nIndex = BinHash.splitHash(idHash, capacity);
251 | writeDoubleLock(hIndex, nIndex);
252 | try {
253 | if (hIndex != BinHash.splitHash(hash, capacity) || nIndex != BinHash.splitHash(idHash, capacity)) {
254 | // table has been resized, try again
255 | continue;
256 | }
257 |
258 | int oldId = incRefCount(hIndex, key);
259 | if (oldId != UNMAPPED) {
260 | // somebody else have already created a mapping
261 | return oldId;
262 | }
263 | else {
264 | // adding new entry
265 | int npp = createEntry(key, newId, hash);
266 | addEntry(hIndex, npp);
267 | if (hIndex != nIndex) {
268 | addEntry(nIndex, npp);
269 | }
270 | size.addAndGet(2); // counting hash entries
271 | return newId;
272 | }
273 | }
274 | finally {
275 | writeDoubleUnlock(hIndex, nIndex);
276 | }
277 | }
278 | }
279 | finally {
280 | checkTableSize();
281 | }
282 | }
283 |
284 | // writeLock on index is assumed
285 | private int incRefCount(int index, ByteChunk key) {
286 | int[] entries = getEntries(index);
287 | if (entries != null) {
288 | for(int pp : entries) {
289 | ByteChunk entry = pageManager.get(pp);
290 | if (sameKey(entry, key)) {
291 | int refCount = getRefCount(entry);
292 | setRefCount(entry, refCount + 1);
293 | pageManager.update(pp, entry);
294 | // XXX
295 | if (key.toString().equals("[8ea5456f.c07d6f79.8bbbade2.30]")) {
296 | System.out.println(">>" + key + " refCount=" + pageManager.readInt(pp, POS_REF_COUNT));
297 | }
298 | return getId(entry);
299 | }
300 | }
301 | }
302 | return UNMAPPED;
303 | }
304 |
305 | @Override
306 | public void unmap(ByteChunk key) {
307 | // Step I. Try decrement ref counter on existing entry
308 | int hash = BinHash.hash(key);
309 | int id;
310 | while(true) {
311 | int index = BinHash.splitHash(hash, capacity);
312 | writeLock(index);
313 | try {
314 | if (index != BinHash.splitHash(hash, capacity)) {
315 | // table has been resized, try again
316 | continue;
317 | }
318 | id = decRefCount(index, key);
319 | if (id == UNMAPPED) {
320 | return;
321 | }
322 | else {
323 | break;
324 | }
325 | }
326 | finally {
327 | writeUnlock(index);
328 | }
329 | }
330 | // Step II. Remove entry from hashtable
331 | while(true) {
332 | int idHash = BinHash.hash(id);
333 | int hIndex = BinHash.splitHash(hash, capacity);
334 | int nIndex = BinHash.splitHash(idHash, capacity);
335 | writeDoubleLock(hIndex, nIndex);
336 | try {
337 | if (hIndex != BinHash.splitHash(hash, capacity) || nIndex != BinHash.splitHash(idHash, capacity)) {
338 | // table has been resized, try again
339 | continue;
340 | }
341 |
342 | int[] entries = getEntries(nIndex);
343 | if (entries != null) {
344 | for(int pp : entries) {
345 | ByteChunk entry = pageManager.get(pp);
346 | if (sameKey(entry, key)) {
347 | int refCount = getRefCount(entry);
348 | if (refCount == 1) {
349 | pageManager.release(pp);
350 | removeEntry(nIndex, pp);
351 | removeEntry(hIndex, pp);
352 | size.addAndGet(-2); // size of hashtable
353 | return;
354 | }
355 | else {
356 | setRefCount(entry, refCount - 1);
357 | pageManager.update(pp, entry);
358 | //XXX
359 | if (key.toString().equals("[8ea5456f.c07d6f79.8bbbade2.30]")) {
360 | System.out.println("<<" + key + " refCount=" + pageManager.readInt(pp, POS_REF_COUNT));
361 | }
362 | return;
363 | }
364 | }
365 | }
366 | }
367 | throw new IllegalArgumentException("No mapping found for key " + key);
368 | }
369 | finally {
370 | writeDoubleUnlock(hIndex, nIndex);
371 | }
372 | }
373 | }
374 |
375 | @Override
376 | public void unmap(int id) {
377 | // Step I. Try decrement ref counter on existing entry
378 | int hash = 0;
379 | int ihash = BinHash.hash(id);
380 | step1:
381 | while(true) {
382 | int index = BinHash.splitHash(ihash, capacity);
383 | writeLock(index);
384 | try {
385 | if (index != BinHash.splitHash(ihash, capacity)) {
386 | // table has been resized, try again
387 | continue;
388 | }
389 |
390 | int[] entries = getEntries(index);
391 | if (entries != null) {
392 | for(int pp : entries) {
393 | if (id == getId(pp)) {
394 | ByteChunk entry = pageManager.get(pp);
395 | int refCount = getRefCount(entry);
396 | if (refCount == 1) {
397 | hash = entry.intAt(POS_HASH);
398 | break step1;
399 | }
400 | setRefCount(entry, refCount - 1);
401 | pageManager.update(pp, entry);
402 | // XXX
403 | ByteChunk key = getKeyData(entry);
404 | if (key.toString().equals("[8ea5456f.c07d6f79.8bbbade2.30]")) {
405 | System.out.println("<<" + key + " refCount=" + pageManager.readInt(pp, POS_REF_COUNT));
406 | }
407 | return;
408 | }
409 | }
410 | }
411 | throw new IllegalArgumentException("No mapping found for id " + id);
412 | }
413 | finally {
414 | writeUnlock(index);
415 | }
416 | }
417 | // Step II. Remove entry from hashtable
418 | while(true) {
419 | int nIndex = BinHash.splitHash(ihash, capacity);
420 | int hIndex = BinHash.splitHash(hash, capacity);
421 | writeDoubleLock(hIndex, nIndex);
422 | try {
423 | if (hIndex != BinHash.splitHash(hash, capacity) || nIndex != BinHash.splitHash(ihash, capacity)) {
424 | // table has been resized, try again
425 | continue;
426 | }
427 |
428 | int[] entries = getEntries(nIndex);
429 | if (entries != null) {
430 | for(int pp : entries) {
431 | if (id == getId(pp)) {
432 | ByteChunk entry = pageManager.get(pp);
433 | int refCount = getRefCount(entry);
434 | if (refCount == 1) {
435 | pageManager.release(pp);
436 | removeEntry(nIndex, pp);
437 | removeEntry(hIndex, pp);
438 | size.addAndGet(-2); // size of hashtable
439 | return;
440 | }
441 | else {
442 | setRefCount(entry, refCount - 1);
443 | pageManager.update(pp, entry);
444 | // XXX
445 | ByteChunk key = getKeyData(entry);
446 | if (key.toString().equals("[8ea5456f.c07d6f79.8bbbade2.30]")) {
447 | System.out.println("<<" + key + " refCount=" + pageManager.readInt(pp, POS_REF_COUNT));
448 | }
449 | return;
450 | }
451 | }
452 | }
453 | }
454 | throw new IllegalArgumentException("No mapping found for id " + id);
455 | }
456 | finally {
457 | writeDoubleUnlock(hIndex, nIndex);
458 | }
459 | }
460 | }
461 |
462 | /**
463 | * @return UNMAPPED if ref counter > 0, pointer to chunk otherwise
464 | */
465 | // writeLock on index is assumed
466 | private int decRefCount(int index, ByteChunk key) {
467 | int[] entries = getEntries(index);
468 | if (entries != null) {
469 | for(int pp : entries) {
470 | ByteChunk entry = pageManager.get(pp);
471 | if (sameKey(entry, key)) {
472 | int refCount = getRefCount(entry);
473 | if (refCount == 1) {
474 | return getId(entry);
475 | }
476 | setRefCount(entry, refCount - 1);
477 | pageManager.update(pp, entry);
478 | if (key.toString().equals("[8ea5456f.c07d6f79.8bbbade2.30]")) {
479 | System.out.println("<<" + key + " refCount=" + pageManager.readInt(pp, POS_REF_COUNT));
480 | }
481 | return UNMAPPED;
482 | }
483 | }
484 | }
485 | throw new IllegalArgumentException("No mapping found for key " + key);
486 | }
487 |
488 | private int hashtableGet(int index) {
489 | int slot = index % segmentCount;
490 | int ix = index / segmentCount;
491 |
492 | int pp = masterHashtable[slot];
493 | int value = pageManager.readInt(pp, ix * 4);
494 | return value;
495 | }
496 |
497 | private void hashtableSet(int index, int value) {
498 | int slot = index % segmentCount;
499 | int ix = index / segmentCount;
500 |
501 | int pp = masterHashtable[slot];
502 | pageManager.writeInt(pp, ix * 4, value);
503 | }
504 |
505 | // lock is assumed
506 | private int increamentCapacity() {
507 | // assumed newCap = capacity + 1
508 |
509 | int slot = capacity % segmentCount;
510 | int oldSegSize = alignSegmentSize(capacity / segmentCount);
511 | int newSegSize = alignSegmentSize(1 + (capacity / segmentCount));
512 | if (oldSegSize != newSegSize) {
513 | resizeSegment(slot, newSegSize);
514 | }
515 | return ++capacity;
516 | }
517 |
518 | private int getSegmentSize(int n) {
519 | int size = (capacity / segmentCount + (n < (capacity % segmentCount) ? 1 : 0));
520 | return alignSegmentSize(size);
521 | }
522 |
523 | private int alignSegmentSize(int cap) {
524 | if (cap == 0) {
525 | return 0;
526 | }
527 | ++cap;
528 | int allignment = 0xF;
529 | if (cap > 256) {
530 | allignment = 0x3F;
531 | }
532 | else if (cap > 1024) {
533 | allignment = 0xFF;
534 | }
535 | else if (cap > 4096) {
536 | allignment = 0x3FF;
537 | }
538 | cap = (cap + allignment) & (~allignment);
539 | --cap; // reserve one slot for memory manager
540 | return cap;
541 | }
542 |
543 | private void resizeSegment(int slot, int newSegSize) {
544 | int opp = masterHashtable[slot];
545 | int npp = pageManager.allocate(newSegSize * 4, PagedBinaryStoreManager.ALLOC_HASH_SEGMENT);
546 | if (opp != PagedBinaryStoreManager.EMPTY) {
547 | ByteChunk oldSeg = pageManager.get(opp);
548 | ByteChunk newSeg = pageManager.get(npp);
549 | newSeg.putBytes(oldSeg);
550 | // not required for in-heap backend
551 | pageManager.update(npp, newSeg);
552 | pageManager.release(opp);
553 | }
554 | masterHashtable[slot] = npp;
555 | }
556 |
557 | // lock assumed
558 | private int[] getEntries(int index) {
559 | int pointer;
560 | pointer = hashtableGet(index);
561 | if (pointer == 0) {
562 | return NO_ENTRIES;
563 | }
564 | else if (pointer > 0) {
565 | return new int[]{pointer};
566 | }
567 | else {
568 | pointer = -pointer;
569 | ByteChunk chunk = pageManager.get(pointer);
570 | int[] entries = new int[chunk.lenght() / 4 - 1];
571 | for(int i = 0; i != entries.length; ++i) {
572 | entries[i] = chunk.intAt(4 + i * 4);
573 | }
574 | return entries;
575 | }
576 | }
577 |
578 | // write lock for index assumed
579 | private void setEntries(int index, int[] entries) {
580 | int pointer;
581 | pointer = hashtableGet(index);
582 | if (pointer != PagedBinaryStoreManager.EMPTY && pointer < 0) {
583 | pointer = -pointer;
584 | pageManager.release(pointer);
585 | }
586 | if (entries == null || entries.length == 0) {
587 | hashtableSet(index, PagedBinaryStoreManager.EMPTY);
588 | }
589 | else if (entries.length == 1) {
590 | hashtableSet(index, entries[0]);
591 | }
592 | else {
593 | ByteChunk first = pageManager.get(entries[0]);
594 | int hash = first.intAt(0);
595 | int npp = pageManager.allocate(4 + 4 * entries.length, PagedBinaryStoreManager.ALLOC_NEW_LIST);
596 | ByteChunk list = pageManager.get(npp);
597 | try {
598 | list.assertEmpty();
599 | }
600 | catch(AssertionError e) {
601 | System.out.println("Problem pointer is " + pageManager.page(npp) + ":" + pageManager.offset(npp));
602 | throw e;
603 | }
604 | list.putInt(0, hash);
605 | for(int i = 0; i != entries.length; ++i) {
606 | list.putInt(4 + 4 * i, entries[i]);
607 | }
608 | // not required for in-heap backend
609 | pageManager.update(npp, list);
610 | hashtableSet(index, -npp);
611 | }
612 | }
613 |
614 | // write lock for index assumed
615 | private void addEntry(int index, int pp) {
616 | int[] entries = getEntries(index);
617 | if (entries == null) {
618 | setEntries(index, new int[]{pp});
619 | }
620 | else {
621 | int[] nentries = new int[entries.length + 1];
622 | for(int i = 0; i != entries.length; ++i) {
623 | nentries[i] = entries[i];
624 | }
625 | nentries[entries.length] = pp;
626 | setEntries(index, nentries);
627 | }
628 | }
629 |
630 | // write lock for index assumed
631 | private void removeEntry(int index, int pp) {
632 | int[] entries = getEntries(index);
633 | if (entries == null) {
634 | setEntries(index, new int[]{pp});
635 | }
636 | if (entries.length == 1) {
637 | if (entries[0] != pp) {
638 | // it is ok, just ignore
639 | // throw new AssertionError("No such pointer in hash slot. Slot=" + index + ", pointer=" + pp);
640 | }
641 | else {
642 | setEntries(index, null);
643 | }
644 | }
645 | else {
646 | int[] nentries = new int[entries.length];
647 | int n = 0;
648 | for(int i = 0; i != entries.length; ++i) {
649 | if (entries[i] != pp) {
650 | nentries[n++] = entries[i];
651 | }
652 | }
653 | if (n != entries.length) {
654 | nentries = Arrays.copyOf(nentries, n);
655 | setEntries(index, nentries);
656 | }
657 | }
658 | }
659 |
660 | private int createEntry(ByteChunk key, int id, int hash) {
661 | int size = POS_KEY_DATA + key.lenght();
662 | int npp = pageManager.allocate(size, ALLOC_NEW_ENTRY);
663 | ByteChunk chunk = pageManager.get(npp);
664 | try {
665 | chunk.assertEmpty();
666 | }
667 | catch(AssertionError e) {
668 | System.out.println("Problem pointer is " + pageManager.page(npp) + ":" + pageManager.offset(npp));
669 | throw e;
670 | }
671 |
672 | chunk.putInt(POS_HASH, hash);
673 | chunk.putInt(POS_KEY_SIZE, key.lenght());
674 | chunk.putInt(POS_ID, id);
675 | chunk.putInt(POS_REF_COUNT, 1);
676 | chunk.putBytes(POS_KEY_DATA, key);
677 |
678 | // no need for in-heap storage
679 | pageManager.update(npp, chunk);
680 |
681 | return npp;
682 | }
683 |
684 | private ByteChunk getKeyData(ByteChunk entry) {
685 | int size = entry.intAt(POS_KEY_SIZE);
686 | return entry.subChunk(POS_KEY_DATA, size);
687 | }
688 |
689 | private boolean sameKey(ByteChunk entry, ByteChunk key) {
690 | int keySize = entry.intAt(POS_KEY_SIZE);
691 | if (keySize == key.lenght()) {
692 | for (int i = 0; i != keySize; ++i) {
693 | if (entry.at(POS_KEY_DATA + i) != key.at(i)) {
694 | return false;
695 | }
696 | }
697 | return true;
698 | }
699 | else {
700 | return false;
701 | }
702 | }
703 |
704 | private int getId(ByteChunk entry) {
705 | return entry.intAt(POS_ID);
706 | }
707 |
708 | private int getId(int pp) {
709 | return pageManager.readInt(pp, POS_ID);
710 | }
711 |
712 | private int getRefCount(ByteChunk entry) {
713 | return entry.intAt(POS_REF_COUNT);
714 | }
715 |
716 | private void setRefCount(ByteChunk entry, int refCount) {
717 | entry.putInt(POS_REF_COUNT, refCount);
718 | }
719 |
720 | @Override
721 | public int size() {
722 | return size.get() / 2;
723 | }
724 |
725 | @Override
726 | public void clear() {
727 | clear(true);
728 | }
729 |
730 | @Override
731 | public void destroy() {
732 | clear(false);
733 | }
734 |
735 | // TODO check clear method
736 | void clear(boolean reinit) {
737 | // lock everything
738 | for(int i = 0; i != segmentCount; ++i) {
739 | segmentWriteLock(i);
740 | }
741 | int[] empty = new int[0];
742 | for(int i = 0; i != capacity; ++i) {
743 | int[] entries = getEntries(i);
744 | if (entries != null) {
745 | for(int pp : entries) {
746 | if (pp != PagedBinaryStoreManager.EMPTY) {
747 | ByteChunk entry = pageManager.get(pp);
748 | int id = getId(entry);
749 | int nIndex = BinHash.splitHash(BinHash.hash(id), capacity);
750 | if (i == nIndex) {
751 | pageManager.release(pp);
752 | // there may be two pp in same entries array, so we have
753 | // to zero them for avoiding double deallocating of memory chunk
754 | for(int x = 0; x != entries.length; ++x) {
755 | if (entries[x] == pp) {
756 | entries[x] = PagedBinaryStoreManager.EMPTY;
757 | }
758 | }
759 | }
760 | }
761 | }
762 | setEntries(i, empty);
763 | }
764 | }
765 | capacity = 0;
766 | size.set(0);
767 |
768 | for(int i = 0; i != segmentCount; ++i) {
769 | int pp = masterHashtable[i];
770 | if (pp != PagedBinaryStoreManager.EMPTY) {
771 | pageManager.release(pp);
772 | masterHashtable[i] = 0;
773 | }
774 | }
775 |
776 | if (reinit) {
777 | while(capacity < segmentCount) {
778 | increamentCapacity();
779 | }
780 | }
781 |
782 | // unlock, unlock order does not matter
783 | for(int i = 0; i != segmentCount; ++i) {
784 | segmentWriteUnlock(i);
785 | }
786 | }
787 |
788 | public int getTableCapacity() {
789 | return capacity;
790 | }
791 |
792 | public double getTargetLoadFactor() {
793 | return targetLoadFactor;
794 | }
795 |
796 | public int getTableGapNumber() {
797 | int n = 0;
798 | for(int i = 0; i < capacity; ++i) {
799 | readLock(i);
800 | try {
801 | if (hashtableGet(i) == 0) {
802 | ++n;
803 | }
804 | }
805 | finally{
806 | readUnlock(i);
807 | }
808 | }
809 | return n;
810 | }
811 |
812 | public void recycleHashes(int[] evacuationHashes, int hashCount) {
813 | int evacuated = 0;
814 | for (int i = 0; i != hashCount; ++i) {
815 | int hash = evacuationHashes[i];
816 | evacuated += recycleHash(hash);
817 | }
818 | System.out.println("Evacuated " + evacuated + " bytes");
819 | recycleHashtable();
820 | }
821 |
822 | // TODO slow pace recycling
823 | void recycleHashtable() {
824 | for(int i = 0; i != segmentCount; ++i) {
825 | int pp = masterHashtable[i];
826 | if (needRecycle(pp)) {
827 | segmentWriteLock(i);
828 | try {
829 | pp = masterHashtable[i];
830 | if (needRecycle(pp)) {
831 | int segSize = getSegmentSize(i);
832 | // System.out.println("Recycling hash segment " + pageManager.page(pp) + ":" + pageManager.offset(pp));
833 | resizeSegment(i, segSize);
834 | }
835 | }
836 | finally {
837 | segmentWriteUnlock(i);
838 | }
839 | }
840 | }
841 | }
842 |
843 | int recycleHash(int hash) {
844 | int evacuated = 0;
845 | int[] recycleSet = null;
846 | int recycleCount = 0;
847 | while(true) {
848 | int cap = capacity;
849 | if (cap == 0) {
850 | // table is blank
851 | return evacuated;
852 | }
853 | int index = BinHash.splitHash(hash, cap);
854 | readLock(index);
855 | try {
856 | if (BinHash.splitHash(hash, capacity) != index) {
857 | // capacity has been updated
858 | // need to recalculate index
859 | continue;
860 | }
861 |
862 | int[] entries = getEntries(index);
863 |
864 | if (entries == null || entries.length == 0) {
865 | return 0;
866 | }
867 | else {
868 | for(int i = 0; i != entries.length; ++i) {
869 | int pp = entries[i];
870 | int hIndex = BinHash.splitHash(pageManager.readInt(pp, POS_HASH), capacity);
871 | int nIndex = BinHash.splitHash(BinHash.hash(pageManager.readInt(pp, POS_ID)), capacity);
872 | if (needRecycle(pp) && pageManager.readInt(pp, POS_HASH) == hash) {
873 | if (recycleSet == null) {
874 | recycleSet = new int[entries.length - i];
875 | }
876 | recycleSet[recycleCount++] = getId(pp);
877 | }
878 | }
879 | }
880 |
881 | if (recycleCount == 0) {
882 | int hx = hashtableGet(index);
883 | if (hx < 0) {
884 | hx = -hx;
885 | if (needRecycle(hx)) {
886 | setEntries(index, entries);
887 | evacuated += 4 + 4 * entries.length;
888 | }
889 | }
890 | }
891 | }
892 | finally {
893 | readUnlock(index);
894 | }
895 | break;
896 | }
897 |
898 | recycle_loop:
899 | for(int i = 0; i != recycleCount; ++i) {
900 | int id = recycleSet[i];
901 |
902 | while(true) {
903 | int cap = capacity;
904 | if (cap == 0) {
905 | // table is blank
906 | return evacuated;
907 | }
908 | int idHash = BinHash.hash(id);
909 | int hIndex = BinHash.splitHash(hash, cap);
910 | int nIndex = BinHash.splitHash(idHash, cap);
911 | writeDoubleLock(hIndex, nIndex);
912 | try {
913 | if (hIndex != BinHash.splitHash(hash, capacity) || nIndex != BinHash.splitHash(idHash, capacity)) {
914 | // table has been resized, try again
915 | continue;
916 | }
917 |
918 | int[] hentries = getEntries(hIndex);
919 | int[] nentries = getEntries(nIndex);
920 |
921 | int pp = PagedBinaryStoreManager.EMPTY;
922 | for(int j = 0; j != nentries.length; ++j) {
923 | if (getId(nentries[j]) == id) {
924 | pp = nentries[j];
925 | break;
926 | }
927 | }
928 |
929 | if (needRecycle(pp)) {
930 | ByteChunk chunk = pageManager.get(pp);
931 | int cid = chunk.intAt(POS_ID);
932 | int chash = chunk.intAt(POS_HASH);
933 | if (id != cid || chash != hash) {
934 | // ignoring
935 | continue recycle_loop;
936 | }
937 | // XXX
938 | if (getKeyData(chunk).toString().equals("[8ea5456f.c07d6f79.8bbbade2.30]")) {
939 | new String();
940 | }
941 | if (chunk.intAt(POS_HASH) != hash) {
942 | // actually it is possible due to race condition
943 | // such case should be ignored
944 | throw new AssertionError();
945 | }
946 | int np = pageManager.allocate(chunk.lenght(), ALLOC_RELOCATE_VALUE);
947 | ByteChunk newchunk = pageManager.get(np);
948 | newchunk.putBytes(chunk);
949 | pageManager.update(np, newchunk);
950 | pageManager.release(pp);
951 |
952 | for(int j = 0; j != hentries.length; ++j) {
953 | if (hentries[j] == pp) {
954 | hentries[j] = np;
955 | }
956 | }
957 | for(int j = 0; j != nentries.length; ++j) {
958 | if (nentries[j] == pp) {
959 | nentries[j] = np;
960 | }
961 | }
962 |
963 | evacuated += chunk.lenght();
964 | setEntries(hIndex, hentries);
965 | setEntries(nIndex, nentries);
966 | }
967 |
968 | // should check if collision list requires recycling
969 | int hx = hashtableGet(hIndex);
970 | if (hx < 0) {
971 | hx = -hx;
972 | if (needRecycle(hx)) {
973 | setEntries(hIndex, hentries);
974 | evacuated += 4 + 4 * hentries.length;
975 | }
976 | }
977 | int nx = hashtableGet(nIndex);
978 | if (nx < 0) {
979 | nx = -nx;
980 | if (needRecycle(nx)) {
981 | setEntries(nIndex, nentries);
982 | evacuated += 4 + 4 * nentries.length;
983 | }
984 | }
985 | break;
986 | }
987 | finally {
988 | writeDoubleUnlock(hIndex, nIndex);
989 | }
990 | }
991 | }
992 | return evacuated;
993 | }
994 |
995 | private boolean needRecycle(int pointer) {
996 | return pointer != PagedBinaryStoreManager.EMPTY && pageManager.isMarkedForRecycle(pointer);
997 | }
998 |
999 | private void checkTableSize() {
1000 | float loadFactor = ((float)size.get()) / capacity;
1001 | if (loadFactor > targetLoadFactor) {
1002 | // grow by 1.5
1003 | if (capacity % 2 == 0) {
1004 | growTable(2);
1005 | }
1006 | else {
1007 | growTable(1);
1008 | }
1009 | }
1010 | }
1011 |
1012 | private void growTable(int n) {
1013 | for(int i = 0; i != n; ++i) {
1014 | int nRound = Integer.highestOneBit(capacity);
1015 | int nLast = capacity;
1016 | int nSplit = (nLast) & ~nRound;
1017 | writeDoubleLock(nSplit, nLast);
1018 | // checkHashConsistency();
1019 | try {
1020 | if (capacity == nLast) {
1021 | // writeLock(nLast) ensures what we are holding lock for capacity
1022 | increamentCapacity(); // capacity increased to capacity + 1
1023 |
1024 | int[] entries = getEntries(nSplit);
1025 | if (entries != null) {
1026 | Arrays.sort(entries);
1027 | int ppp = PagedBinaryStoreManager.EMPTY;
1028 |
1029 | int n1 = 0;
1030 | int[] el1 = new int[entries.length];
1031 | int n2 = 0;
1032 | int[] el2 = new int[entries.length];
1033 |
1034 | for(int pp: entries) {
1035 | // avoid processing of duplicated pointers
1036 | if (ppp == pp) {
1037 | continue;
1038 | }
1039 | ppp = pp;
1040 |
1041 | ByteChunk chunk = pageManager.get(pp);
1042 | int hash = chunk.intAt(POS_HASH);
1043 | int id = chunk.intAt(POS_ID);
1044 | int ihash = BinHash.hash(id);
1045 |
1046 | boolean copied = false;
1047 | int hIndex = BinHash.splitHash(hash, nLast); // old index
1048 | int hhIndex = BinHash.splitHash(hash, nLast + 1);
1049 | if (hIndex == nSplit) {
1050 | if (hhIndex == nSplit) {
1051 | el1[n1++] = pp;
1052 | }
1053 | else if (hhIndex == nLast) {
1054 | el2[n2++] = pp;
1055 | }
1056 | else {
1057 | throw new AssertionError("New index of hash " + Integer.toHexString(hash) + " is " + hhIndex + ", expected values eigther " + nSplit + " or " + nLast);
1058 | }
1059 | copied = true;
1060 | }
1061 |
1062 | int nIndex = BinHash.splitHash(ihash, nLast);
1063 | int nnIndex = BinHash.splitHash(ihash, nLast + 1);
1064 | if (nIndex == nSplit) {
1065 | if (nnIndex == nSplit && hhIndex != nSplit) {
1066 | el1[n1++] = pp;
1067 | }
1068 | else if (nnIndex == nLast && hhIndex != nLast) {
1069 | el2[n2++] = pp;
1070 | }
1071 | else if (nnIndex != nSplit && nnIndex != nLast){
1072 | throw new AssertionError("New index of hash " + Integer.toHexString(hash) + " is " + nnIndex + ", expected values eigther " + nSplit + " or " + nLast);
1073 | }
1074 | copied = true;
1075 | }
1076 |
1077 | if (!copied) {
1078 | throw new AssertionError("Entry does not belong to hash index");
1079 | }
1080 | }
1081 | el1 = Arrays.copyOf(el1, n1);
1082 | el2 = Arrays.copyOf(el2, n2);
1083 |
1084 | setEntries(nSplit, el1);
1085 | setEntries(nLast, el2);
1086 | }
1087 | }
1088 | }
1089 | finally {
1090 | // checkHashConsistency();
1091 | writeDoubleUnlock(nSplit, nLast);
1092 | }
1093 | }
1094 | }
1095 |
1096 | @SuppressWarnings("unused") // for testing
1097 | private void checkHashConsistency() {
1098 | for(int i = 0; i != capacity; ++i) {
1099 | int[] entries = getEntries(i);
1100 | if (entries != null) {
1101 | for(int pp : entries) {
1102 | ByteChunk entry = pageManager.get(pp);
1103 | int hash = entry.intAt(POS_HASH);
1104 | int ihash = BinHash.hash(entry.intAt(POS_ID));
1105 | if (BinHash.splitHash(hash, capacity) != i && BinHash.splitHash(ihash, capacity) != i) {
1106 | throw new AssertionError();
1107 | }
1108 | }
1109 | }
1110 | }
1111 | }
1112 |
1113 | // private int hashIndex(ByteChunk key, int capacity) {
1114 | // int hash = BinHash.hash(key);
1115 | // return PagedBinaryStoreManager.splitHash(hash, capacity);
1116 | // }
1117 |
1118 | private AtomicIntegerArray createLocktable(int size) {
1119 | AtomicIntegerArray table = new AtomicIntegerArray(size / 4); // 8 bits per lock
1120 | return table;
1121 | }
1122 |
1123 | private void readLock(int index) {
1124 | int seg = index % segmentCount;
1125 | segmentReadLock(seg);
1126 | }
1127 |
1128 | private void readUnlock(int index) {
1129 | int seg = index % segmentCount;
1130 | segmentReadUnlock(seg);
1131 | }
1132 |
1133 | private void segmentReadLock(int index) {
1134 | int n = 0;
1135 | while(true) {
1136 | byte c = byte_get(locktable, index);
1137 | if (c >= 0 && c < 126) {
1138 | byte u = (byte) (c + 1) ;
1139 | if (byte_compareAndSet(locktable, index, c, u)) {
1140 | return;
1141 | }
1142 | }
1143 | ++n;
1144 | if (n % 10 == 0) {
1145 | Thread.yield();
1146 | }
1147 | }
1148 | }
1149 |
1150 | private void segmentReadUnlock(int index) {
1151 | int n = 0;
1152 | while(true) {
1153 | byte c = byte_get(locktable, index);
1154 | if (c > 0) {
1155 | byte u = (byte) (c - 1) ;
1156 | if (byte_compareAndSet(locktable, index, c, u)) {
1157 | return;
1158 | }
1159 | }
1160 | else if (c < 0) {
1161 | byte u = (byte) (c + 1);
1162 | if (byte_compareAndSet(locktable, index, c, u)) {
1163 | return;
1164 | }
1165 | }
1166 | else {
1167 | throw new IllegalStateException("Invalid lock state");
1168 | }
1169 | ++n;
1170 | if (n % 10 == 0) {
1171 | Thread.yield();
1172 | }
1173 | }
1174 | }
1175 |
1176 |
1177 | private void writeLock(int index) {
1178 | int segment = index % segmentCount;
1179 | segmentWriteLock(segment);
1180 | }
1181 |
1182 | private void writeUnlock(int index) {
1183 | int segment = index % segmentCount;
1184 | segmentWriteUnlock(segment);
1185 | }
1186 |
1187 | private void writeDoubleLock(int index1, int index2) {
1188 | int seg1 = index1 % segmentCount;
1189 | int seg2 = index2 % segmentCount;
1190 |
1191 | if (seg1 > seg2) {
1192 | int t = seg1;
1193 | seg1 = seg2;
1194 | seg2 = t;
1195 | }
1196 |
1197 | segmentWriteLock(seg1);
1198 | if (seg1 != seg2) {
1199 | segmentWriteLock(seg2);
1200 | }
1201 | }
1202 |
1203 | private void writeDoubleUnlock(int index1, int index2) {
1204 | int seg1 = index1 % segmentCount;
1205 | int seg2 = index2 % segmentCount;
1206 |
1207 | if (seg1 > seg2) {
1208 | int t = seg1;
1209 | seg1 = seg2;
1210 | seg2 = t;
1211 | }
1212 |
1213 | if (seg1 != seg2) {
1214 | segmentWriteUnlock(seg2);
1215 | }
1216 | segmentWriteUnlock(seg1);
1217 | }
1218 |
1219 | private void segmentWriteLock(int index) {
1220 | int n = 0;
1221 | while(true) {
1222 | byte c = byte_get(locktable, index);
1223 | if (c == 0) {
1224 | byte u = (byte) -1;
1225 | if (byte_compareAndSet(locktable, index, c, u)) {
1226 | return;
1227 | }
1228 | }
1229 | else if (c < 0) {
1230 | // another writer is pending
1231 | }
1232 | else if (c > 0){
1233 | byte u = (byte) (-c - 1);
1234 | if (byte_compareAndSet(locktable, index, c, u)) {
1235 | break;
1236 | }
1237 | }
1238 | ++n;
1239 | if (n % 10 == 0) {
1240 | Thread.yield();
1241 | }
1242 | }
1243 | // waiting read locks to get released
1244 | while(true) {
1245 | byte c = byte_get(locktable, index);
1246 | if (c == -1) {
1247 | return;
1248 | }
1249 |
1250 | ++n;
1251 | if (n % 10 == 0) {
1252 | Thread.yield();
1253 | }
1254 | }
1255 | }
1256 |
1257 | private void segmentWriteUnlock(int index) {
1258 | int n = 0;
1259 | while(true) {
1260 | byte c = byte_get(locktable, index);
1261 | if (c == -1) {
1262 | byte u = (byte) 0;
1263 | if (byte_compareAndSet(locktable, index, c, u)) {
1264 | return;
1265 | }
1266 | }
1267 | else {
1268 | throw new IllegalStateException("Broken lock");
1269 | }
1270 | ++n;
1271 | if (n % 10 == 0) {
1272 | Thread.yield();
1273 | }
1274 | }
1275 | }
1276 |
1277 | private byte byte_get(AtomicIntegerArray table, int index) {
1278 | int x = index / 4;
1279 | int xx = index % 4;
1280 | int word = table.get(x);
1281 | return getByte(word, xx);
1282 | }
1283 |
1284 | private boolean byte_compareAndSet(AtomicIntegerArray table, int index, byte expected, byte newValue) {
1285 | int x = index / 4;
1286 | int xx = index % 4;
1287 |
1288 | while(true) {
1289 | int word = table.get(x);
1290 | byte val = getByte(word, xx);
1291 | if (val == expected) {
1292 | int newWord = setByte(word, xx, newValue);
1293 | if (table.compareAndSet(x, word, newWord)) {
1294 | return true;
1295 | }
1296 | else {
1297 | continue;
1298 | }
1299 | }
1300 | else {
1301 | return false;
1302 | }
1303 | }
1304 | }
1305 |
1306 | private byte getByte(int word, int i) {
1307 | switch(i) {
1308 | case 0:
1309 | return (byte) (0xFF & word);
1310 | case 1:
1311 | return (byte) (0xFF & (word >> 8));
1312 | case 2:
1313 | return (byte) (0xFF & (word >> 16));
1314 | case 3:
1315 | return (byte) (0xFF & (word >> 24));
1316 | default:
1317 | throw new IllegalArgumentException("4 bytes per int");
1318 | }
1319 | }
1320 |
1321 | private int setByte(int word,int i, byte value) {
1322 | switch(i) {
1323 | case 0:
1324 | word &= 0xFFFFFF00;
1325 | word |= 0xFF & (int)value;
1326 | return word;
1327 | case 1:
1328 | word &= 0xFFFF00FF;
1329 | word |= (0xFF & (int)value) << 8;
1330 | return word;
1331 | case 2:
1332 | word &= 0xFF00FFFF;
1333 | word |= (0xFF & (int)value) << 16;
1334 | return word;
1335 | case 3:
1336 | word &= 0x00FFFFFF;
1337 | word |= (0xFF & (int)value) << 24;
1338 | return word;
1339 | default:
1340 | throw new IllegalArgumentException("4 bytes per int");
1341 | }
1342 | }
1343 |
1344 | public void _debug_dump() {
1345 | for(int i = 0; i != capacity; ++i) {
1346 | int[] entries = getEntries(i);
1347 | if (entries != null && entries.length > 0) {
1348 | System.out.print(i + "\t -> " + (entries == null ? "[]" : Arrays.toString(entries)));
1349 | for(int pp : entries) {
1350 | ByteChunk chunk = pageManager.get(pp);
1351 | int hash = chunk.intAt(POS_HASH);
1352 | int id = chunk.intAt(POS_ID);
1353 | int refCount = chunk.intAt(POS_REF_COUNT);
1354 | ByteChunk key = getKeyData(chunk);
1355 | System.out.print(" #" + hash + " " + key + " id=" + id + " refCount=" + refCount);
1356 | }
1357 | System.out.println();
1358 | }
1359 | }
1360 | }
1361 | }
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/InHeapBinaryStoreManager2.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | /**
20 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
21 | */
22 | public class InHeapBinaryStoreManager2 extends PagedBinaryStoreManager {
23 |
24 | public InHeapBinaryStoreManager2(String name, int pageSize, long totalSizeLimit) {
25 | super(name, new InHeapMemoryStoreBackend(pageSize, (int) (totalSizeLimit / pageSize), 2));
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/InHeapMemoryStoreBackend.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import java.util.concurrent.TimeUnit;
20 | import java.util.concurrent.atomic.AtomicInteger;
21 | import java.util.concurrent.atomic.AtomicIntegerArray;
22 | import java.util.concurrent.atomic.AtomicLong;
23 | import java.util.concurrent.atomic.AtomicReferenceArray;
24 | import java.util.concurrent.locks.LockSupport;
25 | import java.util.concurrent.locks.ReentrantLock;
26 |
27 | /**
28 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
29 | */
30 | class InHeapMemoryStoreBackend implements MemoryStoreBackend {
31 |
32 | private final static int PAGE_HEADER = 32; // leaves 0 and 1 pointers as special values
33 | private final static int ALIGNMENT = 4; // 16 bytes
34 |
35 | private final int pageSize;
36 | private final int pageUsageLimit;
37 | private final int offsetMask;
38 | private final int pageShift;
39 |
40 | private AtomicLong memUsed = new AtomicLong();
41 | private long memUsageLimit;
42 |
43 | private ReentrantLock[] allocationLock;
44 | private AtomicInteger pagesInUse = new AtomicInteger();
45 | private AtomicInteger[] top;
46 | private AtomicReferenceArray pages;
47 | private AtomicIntegerArray pageUtilizations;
48 | private AtomicInteger evacuationQueueLength = new AtomicInteger();
49 | private int evacuationQueueLimit;
50 | private long[] pageTimestamps;
51 | private volatile boolean[] pagesForCleanUp;
52 |
53 | private ReentrantLock evacuationLock = new ReentrantLock();
54 | private int evacuationPage;
55 | private int evacuationPointer;
56 | private AtomicInteger pageReleaseCounter = new AtomicInteger();
57 |
58 | private float scavengeGcThreshold = 0.8f;
59 | private float minGcThreshold = 0.4f;
60 | private float gcThreshold = minGcThreshold;
61 |
62 | private volatile int fence;
63 |
64 | public InHeapMemoryStoreBackend(int pageSize, int pageUsageLimit, int allocNumber) {
65 | this.pageSize = pageSize;
66 | if (pageSize != Integer.highestOneBit(pageSize) || pageSize > 1 << 30) {
67 | throw new IllegalArgumentException("Invalid page size " + pageSize + ", valid page size should be power of 2 and no more than 1Gb");
68 | }
69 | this.offsetMask = (pageSize - 1) >> ALIGNMENT;
70 | this.pageShift = Integer.bitCount(offsetMask);
71 | if (1l * pageSize * pageUsageLimit > 32l << 30) {
72 | throw new IllegalArgumentException("Single manager cannot handle more than 32Gb of memory");
73 | }
74 |
75 | this.pageUsageLimit = pageUsageLimit;
76 | this.memUsageLimit = ((long)pageSize) * pageUsageLimit;
77 |
78 | this.memUsed.set(0);
79 | this.pagesInUse.set(0);
80 |
81 | this.pages = new AtomicReferenceArray(pageUsageLimit);
82 | this.pageUtilizations = new AtomicIntegerArray(pageUsageLimit);
83 | this.pageTimestamps = new long[pageUsageLimit];
84 | this.pagesForCleanUp = new boolean[pageUsageLimit];
85 |
86 | // evacuationQueueLimit = pageUsageLimit / 16;
87 | // evacuationQueueLimit = evacuationQueueLimit < 2 ? 2 : evacuationQueueLimit;
88 | evacuationQueueLimit = pageUsageLimit;
89 |
90 | // allocate first page
91 | allocationLock = new ReentrantLock[allocNumber];
92 | top = new AtomicInteger[allocNumber];
93 | for(int i = 0; i!= allocNumber; ++i) {
94 | allocationLock[i] = new ReentrantLock();
95 | top[i] = new AtomicInteger();
96 | top[i].set(pointer(newPage(i), PAGE_HEADER));
97 | }
98 |
99 | // dumpStatistics();
100 | }
101 |
102 | public int page(int pointer) {
103 | int page = (0x7FFFFFFF & pointer) >> pageShift;
104 | return page;
105 | }
106 |
107 | public int offset(int pointer) {
108 | int offs = (offsetMask & pointer) << ALIGNMENT;
109 | // if (offs >= pageSize) {
110 | // // debug;
111 | // new String();
112 | // }
113 | return offs;
114 | }
115 |
116 | int size(int sv) {
117 | return 0x7FFFFFFF & sv;
118 | }
119 |
120 | boolean erased(int sv) {
121 | return (0x80000000 & sv) != 0;
122 | }
123 |
124 | int pointer(int page, int offset) {
125 | int pointer = offsetMask & (offset >> ALIGNMENT);
126 | // if (pointer == 0 && offset != 0) {
127 | // throw new AssertionError();
128 | // }
129 | pointer |= (0x7FFFFFFF & (page << pageShift));
130 | // if (page != page(pointer)) {
131 | // new String();
132 | // }
133 | // if (offset != offset(pointer)) {
134 | // new String();
135 | // }
136 | return pointer;
137 | }
138 |
139 | /* (non-Javadoc)
140 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#get(int)
141 | */
142 | public ByteChunk get(int pointer) {
143 | validate(pointer);
144 | int page = page(pointer);
145 | int offs = offset(pointer);
146 | if (offs < PAGE_HEADER) {
147 | throw new IllegalArgumentException("Invalid pointer " + Long.toHexString(pointer));
148 | }
149 |
150 | ByteChunk chunk = pages.get(page);
151 | if (chunk == null) {
152 | throw new IllegalArgumentException("Broken pointer " + Long.toHexString(pointer) + " page " + Integer.toHexString(page) + " is not allocated");
153 | }
154 | int len = size(chunk.intAt(offs));
155 | if (offs + 4 + len > pageSize) {
156 | new String();
157 | }
158 | return chunk.subChunk(offs + 4, len - 4);
159 | }
160 |
161 | @Override
162 | public void update(int pointer, ByteChunk bytes) {
163 | // TODO
164 | // no need to update
165 | }
166 |
167 | public int readInt(int pointer, int offset) {
168 | validate(pointer);
169 | int page = page(pointer);
170 | int offs = offset(pointer);
171 | if (offs < PAGE_HEADER) {
172 | throw new IllegalArgumentException("Invalid pointer " + Long.toHexString(pointer));
173 | }
174 |
175 | ByteChunk chunk = pages.get(page);
176 | if (chunk == null) {
177 | throw new IllegalArgumentException("Broken pointer " + Long.toHexString(pointer) + " page " + Integer.toHexString(page) + " is not allocated");
178 | }
179 | int len = size(chunk.intAt(offs));
180 | if (offs + 4 + len > pageSize) {
181 | new String();
182 | }
183 | if (offset + 4 > len) {
184 | throw new IndexOutOfBoundsException("Requested offset " + offset + ", chunk lenght " + len);
185 | }
186 |
187 | return chunk.intAt(offs + 4 + offset);
188 | }
189 |
190 | public void writeInt(int pointer, int offset, int value) {
191 | validate(pointer);
192 | int page = page(pointer);
193 | int offs = offset(pointer);
194 | if (offs < PAGE_HEADER) {
195 | throw new IllegalArgumentException("Invalid pointer " + Long.toHexString(pointer));
196 | }
197 |
198 | ByteChunk chunk = pages.get(page);
199 | if (chunk == null) {
200 | throw new IllegalArgumentException("Broken pointer " + Long.toHexString(pointer) + " page " + Integer.toHexString(page) + " is not allocated");
201 | }
202 | int len = size(chunk.intAt(offs));
203 | if (offs + 4 + len > pageSize) {
204 | new String();
205 | }
206 | if (offset + 4 > len) {
207 | throw new IndexOutOfBoundsException("Requested offset " + offset + ", chunk lenght " + len);
208 | }
209 |
210 | chunk.putInt(offs + 4 + offset, value);
211 | }
212 |
213 | /* (non-Javadoc)
214 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#allocate(int, int)
215 | */
216 | public int allocate(int size, int allocNo) {
217 | if (size > pageSize >> 2) {
218 | // TODO allocate large objects in heap
219 | throw new IllegalArgumentException("Size is too large");
220 | }
221 | while(true) {
222 | int len = size;
223 | len += 4;
224 | int alen = align(len);
225 |
226 | int pp;
227 | while(true) {
228 | pp = top[allocNo].get();
229 | int offs;
230 | offs = offset(pp);
231 | // this is a dirty way to avoid perfect page fit edge case
232 | if (offs + alen + 1> pageSize) {
233 | allocationLock[allocNo].lock();
234 | try {
235 | pp = top[allocNo].get();
236 | offs = offset(pp);
237 | if (offs + alen + 1> pageSize) {
238 | int page = page(pp);
239 | int newPage = newPage(page + 1);
240 | System.out.println("Page allocated " + newPage);
241 | top[allocNo].set(pointer(newPage,PAGE_HEADER));
242 | if (pageUtilizations.get(page) == 0) {
243 | ByteChunk oldPage = pages.getAndSet(page, null);
244 | if (oldPage != null) {
245 | pagesInUse.decrementAndGet();
246 | pageReleaseCounter.incrementAndGet();
247 | }
248 | }
249 | }
250 | else {
251 | continue;
252 | }
253 | }
254 | finally {
255 | allocationLock[allocNo].unlock();
256 | }
257 | }
258 | else {
259 | int npp = pointer(page(pp), offs + alen);
260 | if (top[allocNo].compareAndSet(pp, npp)) {
261 | break;
262 | }
263 | }
264 | }
265 |
266 | int page = page(pp);
267 | int offs = offset(pp);
268 | ByteChunk pageBuf = pages.get(page);
269 | // TODO debug
270 | // if (pageBuf == null) {
271 | // new String();
272 | // }
273 | if (pageBuf.intAt(offs) != 0) {
274 | int xx = top[allocNo].get();
275 | System.err.println("Dirty memory allocated!!!");
276 | System.err.println("Allocation pointer " + page(xx) + ":" + offset(xx) + " stream " + allocNo);
277 | dumpPage(page, pageBuf);
278 | throw new AssertionError("Allocation have failed (" + size + " requested). Dirty memory: " + page + ":" + offs);
279 | }
280 | pageBuf.putInt(offs, len);
281 | pageUtilizations.addAndGet(page, len);
282 | memUsed.addAndGet(len);
283 | fence += 2;
284 | validate(pp);
285 | return pp;
286 | }
287 | }
288 |
289 | private void dumpPage(int page, ByteChunk pageBuf) {
290 | System.err.println("Page dump, page " + page);
291 | int offs = PAGE_HEADER;
292 | while(offs < pageSize) {
293 | int size = pageBuf.intAt(offs);
294 | if (size == 0) {
295 | System.err.println("End of page 0x" + Integer.toHexString(offs) + "(" + offs + ")");
296 | break;
297 | }
298 | System.err.println("Chunk 0x" + Integer.toHexString(offs) + "(" + offs + ")" + ", size=" + size(size) + ", erased=" + erased(size));
299 | size = align(size(size));
300 | ByteChunk chunk = pageBuf.subChunk(offs, size);
301 | System.err.println(chunk.toString());
302 | offs += size;
303 | }
304 | }
305 |
306 | private int align(int len) {
307 | // TODO unhardcode alignment
308 | return (len + 0xF) & (0xFFFFFFF0);
309 | }
310 |
311 | private int newPage(int start) {
312 | ByteChunk chunk = new ByteChunk(new byte[pageSize]);
313 | pagesInUse.incrementAndGet();
314 | while(true) {
315 | for(int i = 0; i != pageUsageLimit; ++i) {
316 | int page = (start + i) % pageUsageLimit;
317 | if (pages.get(page) == null) {
318 | pagesForCleanUp[page] = false;
319 | if (pages.compareAndSet(page, null, chunk)) {
320 | pageTimestamps[page] = System.nanoTime();
321 | return page;
322 | }
323 | else {
324 | continue;
325 | }
326 | }
327 | }
328 | // hit memory limit, should give scavenger some time to
329 | // recover pages
330 | System.out.println("Out of pages");
331 | LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
332 | }
333 | }
334 |
335 | /* (non-Javadoc)
336 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#release(int)
337 | */
338 | public void release(int pointer) {
339 | int page = page(pointer);
340 | int offs = offset(pointer);
341 | if (offs < PAGE_HEADER) {
342 | throw new IllegalArgumentException("Invalid pointer " + Long.toHexString(pointer));
343 | }
344 | if (page >= pages.length()) {
345 | // TODO allocate large objects in heap
346 | throw new IllegalArgumentException("Invalid pointer " + Integer.toHexString(pointer));
347 | }
348 |
349 |
350 | ByteChunk pageBuf = pages.get(page);
351 | int len = pageBuf.intAt(offs);
352 | int newSize = pageUtilizations.addAndGet(page, -len);
353 | memUsed.addAndGet(-len);
354 |
355 | if (!pagesForCleanUp[page]) {
356 | checkPageUsage(page);
357 | }
358 | if (newSize == 0) {
359 | System.out.println("Page " + page + " has no more data");
360 |
361 | int allocNo = -1;
362 | for(int j = 0; j != top.length; ++j) {
363 | if (page(top[j].get()) == page) {
364 | allocNo = j;
365 | break;
366 | }
367 | }
368 | // TODO is race condition possible ?
369 | if (pageUtilizations.get(page) == 0 && allocNo == -1) {
370 | if (pages.compareAndSet(page, pageBuf, null)) {
371 | pagesInUse.decrementAndGet();
372 | pageReleaseCounter.incrementAndGet();
373 | if (pagesForCleanUp[page]) {
374 | evacuationQueueLength.decrementAndGet();
375 | }
376 | }
377 | }
378 |
379 | evacuationLock.lock();
380 | try {
381 | if (evacuationPage == page) {
382 | evacuationPage = -1;
383 | evacuationPointer = 0;
384 | }
385 | }
386 | finally {
387 | evacuationLock.unlock();
388 | }
389 | }
390 |
391 | // mark chunk as deleted
392 | pageBuf.putInt(offs, 0x80000000 | len);
393 | }
394 |
395 | private void checkPageUsage(int page) {
396 | int allocNo = -1;
397 | for(int j = 0; j != top.length; ++j) {
398 | if (page(top[j].get()) == page) {
399 | allocNo = j;
400 | break;
401 | }
402 | }
403 | if (allocNo == -1) {
404 | int usage = pageUtilizations.get(page);
405 | if (usage < (gcThreshold * pageSize)) {
406 | int ql = evacuationQueueLength.get();
407 | if (pages.get(page) != null && ql < evacuationQueueLimit) {
408 | if (evacuationQueueLength.compareAndSet(ql, ql + 1)) {
409 | pagesForCleanUp[page] = true;
410 | }
411 | }
412 | }
413 | }
414 | }
415 |
416 | /* (non-Javadoc)
417 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#shouldEvacuate()
418 | */
419 | public boolean shouldEvacuate() {
420 | evacuationLock.lock();
421 | try {
422 | if (evacuationPage >= 0) {
423 | return true;
424 | }
425 | else {
426 | evacuationPage = choosePageToEvacuate();
427 | if (evacuationPage != -1) {
428 | ByteChunk pageBuf = pages.get(evacuationPage);
429 | if (pageBuf != null) {
430 | evacuationPointer = nextChunk(pageBuf, pointer(evacuationPage, PAGE_HEADER), true);
431 | }
432 | }
433 | return evacuationPage >= 0;
434 | }
435 | }
436 | finally {
437 | evacuationLock.unlock();
438 | }
439 | }
440 |
441 | /* (non-Javadoc)
442 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#collectHashesForEvacuation(int[], int)
443 | */
444 | public int collectHashesForEvacuation(int[] hashes, int len) {
445 | len = len == 0 ? hashes.length : len;
446 | evacuationLock.lock();
447 | try {
448 |
449 | int i;
450 | for (i = 0; i != len; ++i) {
451 |
452 | if (evacuationPointer == 0) {
453 | evacuationPage = choosePageToEvacuate();
454 | if (evacuationPage != -1) {
455 | ByteChunk pageBuf = pages.get(evacuationPage);
456 | if (pageBuf != null) {
457 | evacuationPointer = nextChunk(pageBuf, pointer(evacuationPage, PAGE_HEADER), true);
458 | }
459 | }
460 | }
461 |
462 | if (evacuationPointer == 0) {
463 | break;
464 | }
465 | else {
466 | int page = page(evacuationPointer);
467 | int offset = offset(evacuationPointer);
468 | ByteChunk pageBuf = pages.get(page);
469 | if (pageBuf == null) {
470 | evacuationPointer = 0;
471 | break;
472 | }
473 | int hash = pageBuf.intAt(offset + 4);
474 | evacuationPointer = nextChunk(pageBuf, evacuationPointer, false);
475 | hashes[i] = hash;
476 | // if (evacuationPointer == 0) {
477 | // break;
478 | // }
479 | }
480 | }
481 | return i;
482 | }
483 | finally {
484 | evacuationLock.unlock();
485 | }
486 | }
487 |
488 | private int choosePageToEvacuate() {
489 | int page = -1;
490 | int minUsed = pageSize;
491 | for (int i = 0; i != pages.length(); ++i) {
492 | if (pageUtilizations.get(i) == 0) {
493 | continue;
494 | }
495 | if (evacuationPage == i) {
496 | continue;
497 | }
498 | boolean tp = false;
499 | for (int j = 0; j != top.length; ++j) {
500 | if (page(top[j].get()) == i) {
501 | tp = true;
502 | break;
503 | }
504 | }
505 | if (!tp) {
506 | int usage = pageUtilizations.get(i);
507 | if (minUsed > usage) {
508 | minUsed = usage;
509 | page = i;
510 | }
511 | }
512 | }
513 |
514 | if (minUsed < scavengeGcThreshold * pageSize) {
515 | System.out.println("Next evacuation page is " + page + " utilization " + ((float)minUsed) / pageSize);
516 | evacuationPage = page;
517 | return page;
518 | }
519 |
520 | return -1;
521 | }
522 |
523 | private int nextChunk(ByteChunk pageBuf, int pointer, boolean inclusive) {
524 | validate(pointer);
525 |
526 | int page = page(pointer);
527 | int offs = offset(pointer);
528 |
529 | pageBuf = pageBuf != null ? pageBuf : pages.get(page);
530 |
531 | if (inclusive && !erased(pageBuf.intAt(offs))) {
532 | return pointer;
533 | }
534 |
535 | int len = align(size(pageBuf.intAt(offs)));
536 | offs += len;
537 |
538 | while(offs < pageSize) {
539 | int sv = pageBuf.intAt(offs);
540 | if (sv == 0) {
541 | break;
542 | }
543 | else {
544 | if (erased(sv)) {
545 | offs += align(size(sv));
546 | continue;
547 | }
548 | else {
549 | return pointer(page, offs);
550 | }
551 | }
552 | }
553 | return 0;
554 | }
555 |
556 | /* (non-Javadoc)
557 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#isMarkedForRecycle(int)
558 | */
559 | public boolean isMarkedForRecycle(int pp) {
560 | int page = page(pp);
561 | // if ((page < 0) || (page > pagesForCleanUp.length)) {
562 | // // for debug
563 | // new String();
564 | // }
565 | return page == evacuationPage || pagesForCleanUp[page];
566 | }
567 |
568 | /* (non-Javadoc)
569 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#getMemUsage()
570 | */
571 | public long getMemUsage() {
572 | return memUsed.get();
573 | }
574 |
575 | // for debug only
576 | void validate(int pp) {
577 | if (true) {
578 | return;
579 | }
580 | else {
581 | int page = page(pp);
582 | int offs = offset(pp);
583 |
584 | if (offs < PAGE_HEADER) {
585 | throw new AssertionError();
586 | }
587 |
588 | // if (page == 0) {
589 | // return;
590 | // }
591 |
592 | if (page <0 || page > pages.length()) {
593 | throw new AssertionError();
594 | }
595 |
596 | ByteChunk buf = pages.get(page);
597 | if (buf == null) {
598 | throw new IllegalArgumentException("Pointer " + Integer.toHexString(pp) + " points to non existent page");
599 | }
600 | int roll = PAGE_HEADER;
601 | int oldRoll = 0;
602 | while(true) {
603 | // forcing memory fence
604 | synchronized(buf) {
605 | if (roll == offs) {
606 | int size = buf.intAt(roll);
607 | if (align(size) + offs > pageSize) {
608 | throw new AssertionError();
609 | }
610 | return;
611 | }
612 | if (roll >offs) {
613 | throw new AssertionError();
614 | }
615 | int size = 0;
616 | for (int i = 0; i != 50; ++i) {
617 | synchronized (buf) {
618 | size = size(buf.intAt(roll));
619 | if (size != 0) {
620 | break;
621 | }
622 | Thread.yield();
623 | }
624 | }
625 | if (size == 0) {
626 | throw new AssertionError();
627 | }
628 | oldRoll = roll;
629 | roll += align(size);
630 | if (roll >offs) {
631 | throw new AssertionError();
632 | }
633 | }
634 | }
635 | }
636 | }
637 |
638 | /* (non-Javadoc)
639 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#dumpStatistics()
640 | */
641 | public void dumpStatistics() {
642 |
643 | long unallocated = 0;
644 | for(int j = 0; j != top.length; ++j) {
645 | int tp = top[j].get();
646 | unallocated += pageSize - offset(tp);
647 | }
648 |
649 | StringBuilder buf = new StringBuilder();
650 | buf.append("Pages allocated: ").append(pagesInUse.get()).append('/').append(pageUsageLimit).append(" (").append(pageSize).append(" per page)").append('\n');
651 | buf.append("Pages freed since last report: ").append(pageReleaseCounter.get()).append('\n');
652 | buf.append("Memory used: ").append(memUsed.get()).append('/').append(((long)pageUsageLimit) * pageSize).append('\n');
653 | buf.append("Page utilization: ").append(String.format("%f", ((double)memUsed.get() + unallocated) / (((double)pagesInUse.get()) * pageSize))).append('\n');
654 |
655 | for(int i = 0; i != pageUtilizations.length(); ++i) {
656 | int us = pageSize;
657 | for(int j = 0; j != top.length; ++j) {
658 | int tp = top[j].get();
659 | if (i == page(tp)) {
660 | us = offset(tp);
661 | break;
662 | }
663 | }
664 |
665 | int pu = pageUtilizations.get(i);
666 | if (pu > 0) {
667 | buf.append(i).append(" -> ").append(String.format("%f", ((double)pu) / (((double)us))));
668 | if (us < pageSize) {
669 | buf.append('*');
670 | }
671 | if (i == evacuationPage) {
672 | buf.append('!');
673 | }
674 | buf.append('\n');
675 | }
676 | }
677 | buf.append('\n');
678 |
679 | pageReleaseCounter.set(0);
680 | System.out.println(buf.toString());
681 | }
682 | }
683 |
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/MemoryConsumer.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | /**
20 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
21 | */
22 | interface MemoryConsumer {
23 |
24 | public void recycleHashes(int[] evacuationHashes, int hashCount);
25 |
26 | public void destroy();
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/MemoryStoreBackend.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 |
20 | /**
21 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
22 | */
23 | interface MemoryStoreBackend {
24 |
25 | public abstract ByteChunk get(int pointer);
26 |
27 | public abstract void update(int pointer, ByteChunk bytes);
28 |
29 | public abstract int allocate(int size, int allocNo);
30 |
31 | public abstract void release(int pointer);
32 |
33 | public abstract int collectHashesForEvacuation(int[] hashes, int len);
34 |
35 | public abstract boolean isMarkedForRecycle(int pp);
36 |
37 | public abstract long getMemUsage();
38 |
39 | public abstract void dumpStatistics();
40 |
41 | // for diagnostic reasons
42 | public abstract int page(int npp);
43 |
44 | // for diagnostic reasons
45 | public abstract int offset(int npp);
46 |
47 | public int readInt(int pointer, int offset);
48 |
49 | public void writeInt(int pointer, int offset, int value);
50 |
51 | }
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/OffHeapBinaryStoreManager2.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | /**
20 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
21 | */
22 | public class OffHeapBinaryStoreManager2 extends PagedBinaryStoreManager {
23 |
24 | public OffHeapBinaryStoreManager2(String name, int pageSize, long totalSizeLimit) {
25 | super(name, new OffHeapMemoryStoreBackend(pageSize, (int) (totalSizeLimit / pageSize), 2));
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/OffHeapMemoryStoreBackend.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import java.nio.ByteBuffer;
20 | import java.util.BitSet;
21 | import java.util.concurrent.Semaphore;
22 | import java.util.concurrent.TimeUnit;
23 | import java.util.concurrent.atomic.AtomicInteger;
24 | import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
25 | import java.util.concurrent.atomic.AtomicLong;
26 | import java.util.concurrent.atomic.AtomicReferenceArray;
27 | import java.util.concurrent.locks.LockSupport;
28 | import java.util.concurrent.locks.ReentrantLock;
29 |
30 | /**
31 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
32 | */
33 | class OffHeapMemoryStoreBackend implements MemoryStoreBackend {
34 |
35 | final static int PAGE_HEADER = 32; // leaves 0 and 1 pointers as special values
36 | private final static int ALIGNMENT = 4; // 16 bytes, IMPORTANT if allignment constant is changing align() method should be updated
37 |
38 | private final static int DIRECT_BUFFER_SIZE = 64 << 20; // 64MiB
39 | // private final static int DIRECT_BUFFER_SIZE = 64 << 10; // 64KiB, for testing only
40 |
41 | final int pageSize;
42 | private final int pageUsageLimit;
43 | private final int offsetMask;
44 | private final int pageShift;
45 |
46 | private AtomicLong memUsed = new AtomicLong();
47 | private long memUsageLimit;
48 |
49 | private AtomicInteger pagesInUse = new AtomicInteger();
50 | private Allocator[] allocators;
51 | AtomicReferenceArray pages;
52 |
53 | private ReentrantLock evacuationLock = new ReentrantLock();
54 | private int evacuationPage;
55 | private int evacuationPointer;
56 | private AtomicInteger pageReleaseCounter = new AtomicInteger();
57 |
58 | private float scavengeGcThreshold = 0.8f;
59 | private float minGcThreshold = 0.2f;
60 | private float gcThreshold = minGcThreshold;
61 |
62 | private volatile int fence;
63 |
64 | private OffHeapPageAllocator pageAllocator;
65 |
66 | public OffHeapMemoryStoreBackend(int pageSize, int pageUsageLimit, int allocNumber) {
67 | this.pageSize = pageSize;
68 | if (pageSize != Integer.highestOneBit(pageSize) || pageSize > 1 << 30) {
69 | throw new IllegalArgumentException("Invalid page size " + pageSize + ", valid page size should be power of 2 and no more than 1Gb");
70 | }
71 | this.offsetMask = (pageSize - 1) >> ALIGNMENT;
72 | this.pageShift = Integer.bitCount(offsetMask);
73 | if (1l * pageSize * pageUsageLimit > 32l << 30) {
74 | throw new IllegalArgumentException("Single manager cannot handle more than 32Gb of memory");
75 | }
76 |
77 | this.pageUsageLimit = pageUsageLimit;
78 | this.memUsageLimit = ((long)pageSize) * pageUsageLimit;
79 |
80 | this.pageAllocator = new OffHeapPageAllocator(pageSize, pageUsageLimit);
81 |
82 | this.memUsed.set(0);
83 | this.pagesInUse.set(0);
84 |
85 | this.pages = new AtomicReferenceArray(pageUsageLimit);
86 |
87 | // allocate first page
88 | allocators = new Allocator[allocNumber];
89 | for(int i = 0; i!= allocNumber; ++i) {
90 | allocators[i] = new Allocator("#" + i);
91 | }
92 |
93 | // dumpStatistics();
94 | }
95 |
96 | public int page(int pointer) {
97 | int page = (0x7FFFFFFF & pointer) >> pageShift;
98 | return page;
99 | }
100 |
101 | public int offset(int pointer) {
102 | int offs = (offsetMask & pointer) << ALIGNMENT;
103 | // if (offs >= pageSize) {
104 | // // debug;
105 | // new String();
106 | // }
107 | return offs;
108 | }
109 |
110 | int size(int sv) {
111 | return 0x7FFFFFFF & sv;
112 | }
113 |
114 | boolean erased(int sv) {
115 | return (0x80000000 & sv) != 0;
116 | }
117 |
118 | int pointer(int page, int offset) {
119 | int pointer = offsetMask & (offset >> ALIGNMENT);
120 | // if (pointer == 0 && offset != 0) {
121 | // throw new AssertionError();
122 | // }
123 | pointer |= (0x7FFFFFFF & (page << pageShift));
124 | // if (page != page(pointer)) {
125 | // new String();
126 | // }
127 | // if (offset != offset(pointer)) {
128 | // new String();
129 | // }
130 | return pointer;
131 | }
132 |
133 | /* (non-Javadoc)
134 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#get(int)
135 | */
136 | public ByteChunk get(int pointer) {
137 | validate(pointer);
138 | int page = page(pointer);
139 | int offs = offset(pointer);
140 | if (offs < PAGE_HEADER) {
141 | throw new IllegalArgumentException("Invalid pointer " + Long.toHexString(pointer));
142 | }
143 |
144 | Page chunk = pages.get(page);
145 | if (chunk == null) {
146 | throw new IllegalArgumentException("Broken pointer " + Long.toHexString(pointer) + " page " + Integer.toHexString(page) + " is not allocated");
147 | }
148 | int len = size(chunk.intAt(offs));
149 | if (offs + 4 + len > pageSize) {
150 | new String();
151 | }
152 | return chunk.subChunk(offs + 4, len - 4);
153 | }
154 |
155 | public int readInt(int pointer, int offset) {
156 | validate(pointer);
157 | int page = page(pointer);
158 | int offs = offset(pointer);
159 | if (offs < PAGE_HEADER) {
160 | throw new IllegalArgumentException("Invalid pointer " + page + ":" + offset + "[" + Long.toHexString(pointer) + "]");
161 | }
162 |
163 | Page chunk = pages.get(page);
164 | if (chunk == null) {
165 | throw new IllegalArgumentException("Broken pointer " + Long.toHexString(pointer) + " page " + page + " is not allocated");
166 | }
167 | int len = size(chunk.intAt(offs));
168 | if (offs + 4 + len > pageSize) {
169 | new String();
170 | }
171 | // 4 bytes of len is reserved for chunk size
172 | if (offset + 4 > len - 4) {
173 | throw new IndexOutOfBoundsException("Requested offset " + offset + ", chunk lenght " + (len - 4));
174 | }
175 |
176 | return chunk.intAt(offs + 4 + offset);
177 | }
178 |
179 | public void writeInt(int pointer, int offset, int value) {
180 | validate(pointer);
181 | int page = page(pointer);
182 | int offs = offset(pointer);
183 | if (offs < PAGE_HEADER) {
184 | throw new IllegalArgumentException("Invalid pointer " + Long.toHexString(pointer));
185 | }
186 |
187 | Page chunk = pages.get(page);
188 | if (chunk == null) {
189 | throw new IllegalArgumentException("Broken pointer " + Long.toHexString(pointer) + " page " + Integer.toHexString(page) + " is not allocated");
190 | }
191 | int len = size(chunk.intAt(offs));
192 | if (offs + 4 + len > pageSize) {
193 | new String();
194 | }
195 | // 4 bytes of len is reserved for chunk size
196 | if (offset + 4 > len - 4) {
197 | throw new IndexOutOfBoundsException("Requested offset " + offset + ", chunk lenght " + len);
198 | }
199 |
200 | chunk.putInt(offs + 4 + offset, value);
201 | }
202 |
203 | @Override
204 | public void update(int pointer, ByteChunk bytes) {
205 | validate(pointer);
206 | int page = page(pointer);
207 | int offs = offset(pointer);
208 | if (offs < PAGE_HEADER) {
209 | throw new IllegalArgumentException("Invalid pointer " + Long.toHexString(pointer));
210 | }
211 |
212 | Page pageBuf = pages.get(page);
213 | if (pageBuf == null) {
214 | throw new IllegalArgumentException("Broken pointer " + Long.toHexString(pointer) + " page " + Integer.toHexString(page) + " is not allocated");
215 | }
216 | int len = size(pageBuf.intAt(offs));
217 | if (offs + 4 + len > pageSize) {
218 | new String();
219 | }
220 |
221 | if (bytes.lenght() != (len - 4)) {
222 | throw new IllegalArgumentException("Slot size does match buffer size. Slot:" + (len - 4) + ", buffer:" + bytes.lenght());
223 | }
224 |
225 | pageBuf.putBytes(offs + 4, bytes);
226 |
227 | // TODO debug
228 | // System.err.println("Memory updated " + page + ":" + offs);
229 | // System.err.println("Written: " + bytes);
230 | // dumpPage(pageBuf);
231 | }
232 |
233 | /* (non-Javadoc)
234 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#allocate(int, int)
235 | */
236 | public int allocate(int size, int allocNo) {
237 | Allocator allocator = allocators[allocNo];
238 | return allocator.allocate(size);
239 | }
240 |
241 | private static int align(int len) {
242 | // TODO unhardcode alignment
243 | return (len + 0xF) & (0xFFFFFFF0);
244 | }
245 |
246 | /* (non-Javadoc)
247 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#release(int)
248 | */
249 | public void release(int pointer) {
250 | int page = page(pointer);
251 | int offs = offset(pointer);
252 | if (offs < PAGE_HEADER) {
253 | throw new IllegalArgumentException("Invalid pointer " + Long.toHexString(pointer));
254 | }
255 | if (page >= pages.length()) {
256 | // TODO allocate large objects in heap
257 | throw new IllegalArgumentException("Invalid pointer " + Integer.toHexString(pointer));
258 | }
259 |
260 |
261 | Page pageBuf = pages.get(page);
262 | int len = pageBuf.intAt(offs);
263 | pageBuf.updateMemUsage(-len);
264 | memUsed.addAndGet(-len);
265 |
266 | if (!pageBuf.isMarkedForEvacuation() && !pageBuf.isForAllocation()) {
267 | checkPageUsage(page);
268 | }
269 |
270 | if (pageBuf.getMemUsage() == 0 && !pageBuf.isForAllocation()) {
271 | System.out.println("Page " + page + " has no more data");
272 |
273 | releasePage(pageBuf);
274 | }
275 |
276 | if (pageBuf != null) {
277 | // mark chunk as deleted
278 | pageBuf.putInt(offs, 0x80000000 | len);
279 | }
280 | }
281 |
282 | private void checkPageUsage(int page) {
283 | Page pageBuf = pages.get(page);
284 | if (pageBuf != null) {
285 | if ( pageBuf.getMemUsage() < (gcThreshold * pageSize)) {
286 | pageBuf.markForEvacuation();
287 | }
288 | }
289 | }
290 |
291 | /* (non-Javadoc)
292 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#collectHashesForEvacuation(int[], int)
293 | */
294 | public int collectHashesForEvacuation(int[] hashes, int len) {
295 | len = len == 0 ? hashes.length : len;
296 | evacuationLock.lock();
297 | try {
298 |
299 | int i;
300 | for (i = 0; i != len; ++i) {
301 |
302 | if (evacuationPointer == 0) {
303 | evacuationPage = choosePageToEvacuate();
304 | if (evacuationPage != -1) {
305 | Page pageBuf = pages.get(evacuationPage);
306 | if (pageBuf != null) {
307 | evacuationPointer = nextChunk(pageBuf, pointer(evacuationPage, PAGE_HEADER), true);
308 | }
309 | }
310 | }
311 |
312 | if (evacuationPointer == 0) {
313 | break;
314 | }
315 | else {
316 | int page = page(evacuationPointer);
317 | int offset = offset(evacuationPointer);
318 | Page pageBuf = pages.get(page);
319 | if (pageBuf == null) {
320 | evacuationPointer = 0;
321 | break;
322 | }
323 | int hash = pageBuf.intAt(offset + 4);
324 | evacuationPointer = nextChunk(pageBuf, evacuationPointer, false);
325 | hashes[i] = hash;
326 | // if (evacuationPointer == 0) {
327 | // break;
328 | // }
329 | }
330 | }
331 | return i;
332 | }
333 | finally {
334 | evacuationLock.unlock();
335 | }
336 | }
337 |
338 | private int choosePageToEvacuate() {
339 | int page = -1;
340 | int minUsed = pageSize;
341 | for (int i = 0; i != pages.length(); ++i) {
342 | Page pageBuf = pages.get(i);
343 | if (pageBuf == null || pageBuf.getMemUsage() == 0 || pageBuf.isForAllocation()) {
344 | continue;
345 | }
346 | if (evacuationPage == i) {
347 | continue;
348 | }
349 | int usage = pageBuf.getMemUsage();
350 | if (minUsed > usage) {
351 | minUsed = usage;
352 | page = i;
353 | }
354 | }
355 |
356 | if (minUsed < scavengeGcThreshold * pageSize) {
357 | System.out.println("Next evacuation page is " + page + " utilization " + ((float)minUsed) / pageSize);
358 | evacuationPage = page;
359 | Page pageBuf = pages.get(page);
360 | if (pageBuf != null) {
361 | pageBuf.markForEvacuation();
362 | }
363 | return page;
364 | }
365 |
366 | return -1;
367 | }
368 |
369 | private int nextChunk(Page pageBuf, int pointer, boolean inclusive) {
370 | validate(pointer);
371 |
372 | int page = page(pointer);
373 | int offs = offset(pointer);
374 |
375 | pageBuf = pageBuf != null ? pageBuf : pages.get(page);
376 |
377 | if (inclusive && !erased(pageBuf.intAt(offs))) {
378 | return pointer;
379 | }
380 |
381 | int len = align(size(pageBuf.intAt(offs)));
382 | offs += len;
383 |
384 | while(offs < pageSize) {
385 | int sv = pageBuf.intAt(offs);
386 | if (sv == 0) {
387 | break;
388 | }
389 | else {
390 | if (erased(sv)) {
391 | offs += align(size(sv));
392 | continue;
393 | }
394 | else {
395 | return pointer(page, offs);
396 | }
397 | }
398 | }
399 | return 0;
400 | }
401 |
402 | /* (non-Javadoc)
403 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#isMarkedForRecycle(int)
404 | */
405 | public boolean isMarkedForRecycle(int pp) {
406 | int page = page(pp);
407 | Page pageBuf = pages.get(page);
408 | // if ((page < 0) || (page > pagesForCleanUp.length)) {
409 | // // for debug
410 | // new String();
411 | // }
412 | // pp may not be valid pointer at the moment of call
413 | return pageBuf != null && pageBuf.isMarkedForEvacuation();
414 | }
415 |
416 | /* (non-Javadoc)
417 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#getMemUsage()
418 | */
419 | public long getMemUsage() {
420 | return memUsed.get();
421 | }
422 |
423 | // for debug only
424 | void validate(int pp) {
425 | if (true) {
426 | return;
427 | }
428 | else {
429 | int page = page(pp);
430 | int offs = offset(pp);
431 |
432 | if (offs < PAGE_HEADER) {
433 | throw new AssertionError();
434 | }
435 |
436 | // if (page == 0) {
437 | // return;
438 | // }
439 |
440 | if (page <0 || page > pages.length()) {
441 | throw new AssertionError();
442 | }
443 |
444 | Page buf = pages.get(page);
445 | if (buf == null) {
446 | throw new IllegalArgumentException("Pointer " + Integer.toHexString(pp) + " points to non existent page");
447 | }
448 | int roll = PAGE_HEADER;
449 | int oldRoll = 0;
450 | while(true) {
451 | // forcing memory fence
452 | synchronized(buf) {
453 | if (roll == offs) {
454 | int size = buf.intAt(roll);
455 | if (align(size) + offs > pageSize) {
456 | throw new AssertionError();
457 | }
458 | return;
459 | }
460 | if (roll >offs) {
461 | throw new AssertionError();
462 | }
463 | int size = 0;
464 | for (int i = 0; i != 50; ++i) {
465 | synchronized (buf) {
466 | size = size(buf.intAt(roll));
467 | if (size != 0) {
468 | break;
469 | }
470 | Thread.yield();
471 | }
472 | }
473 | if (size == 0) {
474 | throw new AssertionError();
475 | }
476 | oldRoll = roll;
477 | roll += align(size);
478 | if (roll >offs) {
479 | throw new AssertionError();
480 | }
481 | }
482 | }
483 | }
484 | }
485 |
486 |
487 |
488 |
489 |
490 | /* (non-Javadoc)
491 | * @see org.gridkit.coherence.offheap.storage.memlog.MemoryStoreBackend#dumpStatistics()
492 | */
493 | public void dumpStatistics() {
494 |
495 | long unallocated = 0;
496 | for(int j = 0; j != allocators.length; ++j) {
497 | int tp = allocators[j].bumpPointer;
498 | unallocated += pageSize - offset(tp);
499 | }
500 |
501 | StringBuilder buf = new StringBuilder();
502 | buf.append("Pages allocated: ").append(pagesInUse.get()).append('/').append(pageUsageLimit).append(" (").append(pageSize).append(" per page)").append('\n');
503 | buf.append("Pages freed since last report: ").append(pageReleaseCounter.get()).append('\n');
504 | buf.append("Memory used: ").append(memUsed.get()).append('/').append(((long)pageUsageLimit) * pageSize).append('\n');
505 | buf.append("Page utilization: ").append(String.format("%f", ((double)memUsed.get() + unallocated) / (((double)pagesInUse.get()) * pageSize))).append('\n');
506 |
507 | for(int i = 0; i != pages.length(); ++i) {
508 | Page pageBuf = pages.get(i);
509 | if (pageBuf == null) {
510 | continue;
511 | }
512 | int us = pageSize;
513 | for(int j = 0; j != allocators.length; ++j) {
514 | int tp = allocators[j].bumpPointer;
515 | if (i == page(tp)) {
516 | us = offset(tp);
517 | break;
518 | }
519 | }
520 |
521 | int pu = pageBuf.memUsed;
522 | if (pu > 0) {
523 | buf.append(i).append(" -> ").append(String.format("%f", ((double)pu) / (((double)us))));
524 | if (us < pageSize) {
525 | buf.append('*');
526 | }
527 | if (i == evacuationPage) {
528 | buf.append('!');
529 | }
530 | buf.append('\n');
531 | }
532 | }
533 | buf.append('\n');
534 |
535 | pageReleaseCounter.set(0);
536 | System.out.println(buf.toString());
537 | }
538 |
539 |
540 | class OffHeapPageAllocator {
541 |
542 | final ByteBuffer[] buffers;
543 | final int pageSize;
544 | final int pageCount;
545 | final int pagesPerBuffer;
546 | final BitSet pages;
547 | final Semaphore allocPermits;
548 | final int directPageSize;
549 |
550 | public OffHeapPageAllocator(int pageSize, int pageCount) {
551 | this.directPageSize = DIRECT_BUFFER_SIZE > pageSize ? DIRECT_BUFFER_SIZE : pageSize;
552 | this.pageSize = pageSize;
553 | this.pageCount = pageCount;
554 | pagesPerBuffer = directPageSize / pageSize;
555 | if (directPageSize % pageSize != 0) {
556 | throw new AssertionError("Page size should be power of 2! (pageSize: " + pageSize + ")");
557 | }
558 |
559 | buffers = new ByteBuffer[(pageCount + pagesPerBuffer - 1) / pagesPerBuffer];
560 | pages = new BitSet(pageCount);
561 | allocPermits = new Semaphore(pageCount, true);
562 | }
563 |
564 | public Page allocate() {
565 | allocPermits.acquireUninterruptibly();
566 | return allocBuffer();
567 | }
568 |
569 | public Page tryAllocate() {
570 | if (allocPermits.tryAcquire()) {
571 | return allocBuffer();
572 | }
573 | else {
574 | return null;
575 | }
576 | }
577 |
578 | private synchronized Page allocBuffer() {
579 | int bufferId = pages.nextClearBit(0);
580 | if (bufferId >= pageCount) {
581 | throw new IllegalArgumentException("No more buffers");
582 | }
583 |
584 | pages.set(bufferId);
585 |
586 | ByteBuffer master = buffers[bufferId / pagesPerBuffer];
587 | if (master == null) {
588 | master = buffers[bufferId / pagesPerBuffer] = ByteBuffer.allocateDirect(directPageSize);
589 | }
590 |
591 | int offset = pageSize * (bufferId % pagesPerBuffer);
592 | // working under critical section, not need to make a defensive copy of master buffer
593 | ByteBuffer bb = master;
594 | try {
595 | bb.position(0);
596 | bb.limit(bb.capacity());
597 | bb.position(offset);
598 | bb.limit(offset + pageSize);
599 | bb = bb.slice();
600 | }
601 | catch(IllegalArgumentException e) {
602 | e.printStackTrace();
603 | throw e;
604 | }
605 |
606 | // zeroing buffer
607 | for (int i = 0; i != pageSize; i += 8) {
608 | bb.putLong(i, 0);
609 | }
610 |
611 | return new Page(this, bb, bufferId);
612 | }
613 |
614 | public synchronized void release(int bufferId) {
615 | if (!pages.get(bufferId)) {
616 | // for Debug
617 | new String();
618 | }
619 | pages.clear(bufferId);
620 | allocPermits.release();
621 | }
622 | }
623 |
624 | private void releasePage(Page page) {
625 | if (!page.markForEvacuation()) {
626 | evacuationLock.lock();
627 | try {
628 | if (evacuationPage == page.getPageNo()) {
629 | evacuationPage = -1;
630 | evacuationPointer = 0;
631 | }
632 | }
633 | finally {
634 | evacuationLock.unlock();
635 | }
636 | }
637 | if (page.markForRelease()) {
638 | pages.compareAndSet(page.getPageNo(), page, null);
639 | pagesInUse.decrementAndGet();
640 | pageReleaseCounter.incrementAndGet();
641 | page.release();
642 | }
643 | }
644 |
645 | private static AtomicIntegerFieldUpdater ALLOC_BUMP_POINTER = AtomicIntegerFieldUpdater.newUpdater(Allocator.class, "bumpPointer");
646 |
647 | class Allocator {
648 |
649 | String name;
650 | ReentrantLock lock;
651 | volatile int bumpPointer;
652 |
653 | public Allocator(String name) {
654 | this.name = name;
655 | lock = new ReentrantLock();
656 | bumpPointer = pointer(newPage(0), PAGE_HEADER);
657 | }
658 |
659 | public int allocate(int size) {
660 | if (size > pageSize >> 2) {
661 | // TODO allocate large objects in heap
662 | throw new IllegalArgumentException("Size is too large");
663 | }
664 | while(true) {
665 | int len = size;
666 | len += 4;
667 | int alen = align(len);
668 |
669 | int pp;
670 | while(true) {
671 | pp = bumpPointer;
672 | int offs;
673 | offs = offset(pp);
674 | // this is a dirty way to avoid perfect page fit edge case
675 | if (offs + alen + 1> pageSize) {
676 | lock.lock();
677 | try {
678 | pp = bumpPointer;
679 | offs = offset(pp);
680 | if (offs + alen + 1> pageSize) {
681 | int page = page(pp);
682 | int newPage = newPage(page + 1);
683 | System.out.println("Page allocated " + newPage);
684 | bumpPointer = pointer(newPage,PAGE_HEADER);
685 | Page oldPage = pages.get(page);
686 | oldPage.markForStorage();
687 | if (oldPage.getMemUsage() == 0) {
688 | releasePage(oldPage);
689 | }
690 | }
691 | else {
692 | continue;
693 | }
694 | }
695 | finally {
696 | lock.unlock();
697 | }
698 | }
699 | else {
700 | Page pageBuf = pages.get(page(pp));
701 | if (pageBuf == null) {
702 | continue;
703 | }
704 |
705 | // have to speculatively increment memory usage counter to prevent concurrent release of page
706 | pageBuf.updateMemUsage(len);
707 | int npp = pointer(page(pp), offs + alen);
708 | if (ALLOC_BUMP_POINTER.compareAndSet(this, pp, npp)) {
709 | break;
710 | }
711 | else {
712 | pageBuf.updateMemUsage(-len);
713 | if (!pageBuf.isForAllocation() && pageBuf.getMemUsage() == 0) {
714 | releasePage(pageBuf);
715 | }
716 | }
717 | }
718 | }
719 |
720 | int page = page(pp);
721 | int offs = offset(pp);
722 | Page pageBuf = pages.get(page);
723 | // TODO debug
724 | // if (pageBuf == null) {
725 | // new String();
726 | // }
727 | if (pageBuf.intAt(offs) != 0) {
728 | int xx = bumpPointer;
729 | System.err.println("Dirty memory allocated!!!");
730 | System.err.println("Allocation pointer " + page(xx) + ":" + offset(xx) + " stream " + this);
731 | dumpPage(pageBuf);
732 | throw new AssertionError("Allocation have failed (" + size + " requested). Dirty memory: " + page + ":" + offs);
733 | }
734 | pageBuf.putInt(offs, len);
735 | memUsed.addAndGet(len);
736 | fence += 2;
737 | validate(pp);
738 | return pp;
739 | }
740 | }
741 |
742 | // guarded by 'lock' field
743 | private int newPage(int start) {
744 | // code looks little awkward with off-heap allocator
745 | // refactoring required
746 | Page pageBuf = pageAllocator.allocate();
747 | pagesInUse.incrementAndGet();
748 | while(true) {
749 | int nextSlot = page(bumpPointer);
750 | for(int i = 0; i != pageUsageLimit; ++i) {
751 | int page = (nextSlot + i) % pageUsageLimit;
752 | if (pages.get(page) == null) {
753 | if (pages.compareAndSet(page, null, pageBuf)) {
754 | pageBuf.setPageNo(page);
755 | pageBuf.markForAllocation();
756 | return page;
757 | }
758 | else {
759 | continue;
760 | }
761 | }
762 | }
763 | // hit memory limit, should give scavenger some time to
764 | // recover pages
765 | System.out.println("Out of pages");
766 | LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
767 | }
768 | }
769 |
770 | @Override
771 | public String toString() {
772 | return name;
773 | }
774 | }
775 |
776 |
777 | static class Page {
778 |
779 | private static int STATUS_NEW = 0;
780 | private static int STATUS_ALLOCATING = 1;
781 | private static int STATUS_STORAGE = 2;
782 | private static int STATUS_FOR_EVACUATION = 3;
783 | private static int STATUS_RELEASED = 4;
784 |
785 | private static AtomicIntegerFieldUpdater MEM_USED = AtomicIntegerFieldUpdater.newUpdater(Page.class, "memUsed");
786 | private static AtomicIntegerFieldUpdater STATUS = AtomicIntegerFieldUpdater.newUpdater(Page.class, "status");
787 |
788 | private final OffHeapPageAllocator allocator;
789 | private final ByteBuffer bytes;
790 | private final int bufferId;
791 |
792 | private int pageNo = -1;
793 | // if true page is being used for allocating new chunks and should not be scavenged
794 | private volatile int status;
795 | private volatile int memUsed;
796 |
797 | public Page(OffHeapPageAllocator allocator, ByteBuffer buffer, int bufferId) {
798 | this.allocator = allocator;
799 | this.bytes = buffer;
800 | this.bufferId = bufferId;
801 | }
802 |
803 | public int getPageNo() {
804 | return pageNo;
805 | }
806 |
807 | public void setPageNo(int pageNo) {
808 | this.pageNo = pageNo;
809 | }
810 |
811 | public boolean isForAllocation() {
812 | return status == STATUS_ALLOCATING;
813 | }
814 |
815 | public void markForAllocation() {
816 | STATUS.compareAndSet(this, STATUS_NEW, STATUS_ALLOCATING);
817 | }
818 |
819 | public void markForStorage() {
820 | STATUS.compareAndSet(this, STATUS_ALLOCATING, STATUS_STORAGE);
821 | }
822 |
823 | public boolean isMarkedForEvacuation() {
824 | return status == STATUS_FOR_EVACUATION;
825 | }
826 |
827 | public boolean markForEvacuation() {
828 | return STATUS.compareAndSet(this, STATUS_STORAGE, STATUS_FOR_EVACUATION);
829 | }
830 |
831 | public boolean markForRelease() {
832 | return STATUS.compareAndSet(this, STATUS_FOR_EVACUATION, STATUS_RELEASED);
833 | }
834 |
835 | public int getMemUsage() {
836 | return memUsed;
837 | }
838 |
839 | public void updateMemUsage(int delta) {
840 | MEM_USED.getAndAdd(this, delta);
841 | }
842 |
843 | public int intAt(int offs) {
844 | return bytes.getInt(offs);
845 | }
846 |
847 | public void putInt(int offs, int value) {
848 | bytes.putInt(offs, value);
849 | }
850 |
851 | public ByteChunk subChunk(int offs, int len) {
852 | byte[] chunk = new byte[len];
853 | ByteBuffer bb = bytes.duplicate();
854 | bb.position(bb.position() + offs);
855 | bb.get(chunk);
856 | return new ByteChunk(chunk);
857 | }
858 |
859 | public void putBytes(int offs, ByteChunk chunk) {
860 | ByteBuffer bb = bytes.duplicate();
861 | bb.position(bb.position() + offs);
862 | bb.put(chunk.array(), chunk.offset(), chunk.lenght());
863 | }
864 |
865 | public void release() {
866 | allocator.release(bufferId);
867 | }
868 | }
869 |
870 | public void dumpPage(Page page) {
871 | System.err.println("Page dump, page " + page.getPageNo());
872 | int offs = PAGE_HEADER;
873 | while(offs < pageSize) {
874 | int size = page.intAt(offs);
875 | if (size == 0) {
876 | System.err.println("End of page 0x" + Integer.toHexString(offs) + "(" + offs + ")");
877 | break;
878 | }
879 | System.err.println("Chunk 0x" + Integer.toHexString(offs) + "(" + offs + ")" + ", size=" + size(size) + ", erased=" + erased(size));
880 | size = align(size(size));
881 | ByteChunk chunk = page.subChunk(offs, size);
882 | System.err.println(chunk.toString());
883 | offs += size;
884 | }
885 | }
886 | }
887 |
--------------------------------------------------------------------------------
/jmemstore/src/main/java/org/gridkit/offheap/jmemstore/PagedBinaryStoreManager.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import java.lang.Thread.State;
20 | import java.util.ArrayList;
21 | import java.util.List;
22 | import java.util.concurrent.TimeUnit;
23 | import java.util.concurrent.locks.LockSupport;
24 |
25 | /**
26 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
27 | */
28 | public class PagedBinaryStoreManager implements BinaryStoreManager {
29 |
30 | static int EMPTY = 0;
31 |
32 | static int HASH_POS = 0;
33 | static int KEY_SIZE_POS = 4;
34 | static int VALUE_SIZE_POS = 8;
35 | static int DATA_POS = 12;
36 |
37 | static int ALLOC_NEW_VALUE = 0;
38 | static int ALLOC_NEW_LIST = 0;
39 | static int ALLOC_RELOCATE_VALUE = 0;
40 | static int ALLOC_HASH_SEGMENT = 1;
41 |
42 | private static long MEM_DIAG_REPORT_PERIOD = TimeUnit.SECONDS.toNanos(10);
43 |
44 | private final String name;
45 | private List tables = new ArrayList();
46 | private MemoryStoreBackend pageManager;
47 | private Thread maintenanceDaemon;
48 |
49 | public PagedBinaryStoreManager(String name, MemoryStoreBackend pageManager) {
50 | this.name = name;
51 | this.pageManager = pageManager;
52 | this.maintenanceDaemon = createMaintenanceThread();
53 | }
54 |
55 | private Thread createMaintenanceThread() {
56 | Thread thread = new Thread(new Runnable() {
57 | @Override
58 | public void run() {
59 | maintenanceCycle();
60 | }
61 | });
62 | thread.setName("PagedMemoryBinaryStore-" + name + "-ServiceThread");
63 | thread.setDaemon(true);
64 | return thread;
65 | }
66 |
67 | @Override
68 | public synchronized BinaryKeyValueStore createKeyValueStore() {
69 | BinaryHashTable hash = new BinaryHashTable(pageManager, 512);
70 | tables.add(hash);
71 | if (maintenanceDaemon.getState() == State.NEW) {
72 | maintenanceDaemon.start();
73 | }
74 | return hash;
75 | }
76 |
77 | @Override
78 | public synchronized BinaryKeyValueStore createKeyValueStore(int segments) {
79 | BinaryHashTable hash = new BinaryHashTable(pageManager, segments);
80 | tables.add(hash);
81 | if (maintenanceDaemon.getState() == State.NEW) {
82 | maintenanceDaemon.start();
83 | }
84 | return hash;
85 | }
86 |
87 | @Override
88 | public synchronized IdentityMapping createIdentityMapping() {
89 | IdentityMappingHashTable hash = new IdentityMappingHashTable(pageManager, 512);
90 | tables.add(hash);
91 | if (maintenanceDaemon.getState() == State.NEW) {
92 | maintenanceDaemon.start();
93 | }
94 | return hash;
95 | }
96 |
97 | @Override
98 | public synchronized void destroy(MemoryConsumer store) {
99 | // TODO check owner
100 | int n = tables.indexOf(store);
101 | tables.remove(n);
102 | store.destroy();
103 | }
104 |
105 | @SuppressWarnings("deprecation")
106 | public synchronized void close() {
107 | List tables = new ArrayList(this.tables);
108 | for(MemoryConsumer table: tables) {
109 | destroy(table);
110 | }
111 | if (maintenanceDaemon.getState() != State.NEW) {
112 | // TODO graceful death
113 | maintenanceDaemon.stop();
114 | // try {
115 | // maintenanceDaemon.join();
116 | // } catch (InterruptedException e) {
117 | // // ignore
118 | // }
119 | }
120 | }
121 |
122 | private void maintenanceCycle() {
123 | int n = 0;
124 | int idle = 0;
125 | long diagTimestamp = System.nanoTime();
126 |
127 | int[] evacuationHashes = new int[1024];
128 |
129 | MemoryConsumer[] tableSet = new MemoryConsumer[0];
130 | while(true) {
131 |
132 | if (n % 500 == 0) {
133 | synchronized(this) {
134 | tableSet = tables.toArray(tableSet);
135 | }
136 | }
137 |
138 | if (diagTimestamp + MEM_DIAG_REPORT_PERIOD < System.nanoTime()) {
139 | pageManager.dumpStatistics();
140 | synchronized (this) {
141 | int x = 0;
142 | for(MemoryConsumer consumer : tables) {
143 | StringBuilder buf = new StringBuilder();
144 | // buf.append("Hashtable #" + x).append("\n");
145 | // buf.append("Size: ").append(table.size()).append("\n");
146 | // buf.append("Capacity: ").append(table.getTableCapacity()).append("\n");
147 | // buf.append("Load factor: ").append(String.format("%f", 1.0d * table.size() / table.getTableCapacity())).append("\n");
148 | // buf.append("Hash slots usage: ").append(String.format("%f", 1.0d - 1.0d * table.getTableGapNumber() / table.getTableCapacity())).append("\n");
149 |
150 | if (consumer instanceof BinaryHashTable) {
151 | BinaryHashTable table = (BinaryHashTable) consumer;
152 | buf.append("Hashtable #" + x);
153 | buf.append(" | ");
154 | buf.append("Size: ").append(table.size());
155 | buf.append(" | ");
156 | buf.append("Capacity: ").append(table.getTableCapacity());
157 | buf.append(" | ");
158 | buf.append("Load factor: ").append(String.format("%f", 1.0d * table.size() / table.getTableCapacity()));
159 | // buf.append(" | ");
160 | // buf.append("Hash slots usage: ").append(String.format("%f", 1.0d - 1.0d * table.getTableGapNumber() / table.getTableCapacity()));
161 | System.out.println(buf.toString());
162 | }
163 | ++x;
164 | }
165 | }
166 | diagTimestamp = System.nanoTime();
167 | }
168 |
169 | if (tableSet.length == 0) {
170 | ++idle;
171 | }
172 | else {
173 | int len = pageManager.collectHashesForEvacuation(evacuationHashes, 0);
174 | if (len == 0) {
175 | ++idle;
176 | }
177 | else {
178 | evacuateEntries(tableSet, evacuationHashes, len);
179 | Thread.yield();
180 | }
181 | }
182 |
183 | ++n;
184 |
185 | if (idle > 10) {
186 | LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(300));
187 | idle = 0;
188 | }
189 | }
190 | }
191 |
192 | private void evacuateEntries(MemoryConsumer[] tableSet, int[] evacuationHashes, int hashCount) {
193 | for(MemoryConsumer table: tableSet) {
194 | // table.tableLock.readLock().lock();
195 | try {
196 | table.recycleHashes(evacuationHashes, hashCount);
197 | }
198 | finally {
199 | // table.tableLock.readLock().unlock();
200 | }
201 | }
202 |
203 | }
204 |
205 | }
206 |
--------------------------------------------------------------------------------
/jmemstore/src/test/java/org/gridkit/offheap/jmemstore/BaseBinaryStoreManagerTest.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import java.util.HashMap;
20 | import java.util.Map;
21 | import java.util.Random;
22 |
23 | import junit.framework.Assert;
24 |
25 | import org.gridkit.offheap.jmemstore.BinaryKeyValueStore;
26 | import org.gridkit.offheap.jmemstore.BinaryStoreManager;
27 | import org.gridkit.offheap.jmemstore.ByteChunk;
28 | import org.junit.Test;
29 |
30 | /**
31 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
32 | */
33 | public abstract class BaseBinaryStoreManagerTest {
34 |
35 | @Test
36 | public void test_basics() {
37 |
38 | BinaryStoreManager storeMan = createSmallPageManager();
39 |
40 | BinaryKeyValueStore store = storeMan.createKeyValueStore();
41 |
42 | {
43 | ByteChunk key = new ByteChunk("AAAA".getBytes());
44 | ByteChunk value = new ByteChunk("BBBB".getBytes());
45 |
46 | store.put(key, value);
47 |
48 | Assert.assertEquals(1, store.size());
49 |
50 | ByteChunk value2 = store.get(key);
51 |
52 | Assert.assertTrue(value.sameBytes(value2));
53 |
54 | ByteChunk value3 = new ByteChunk("CCCC".getBytes());
55 | store.put(key, value3);
56 |
57 | Assert.assertEquals(1, store.size());
58 |
59 | value2 = store.get(key);
60 | Assert.assertTrue(value3.sameBytes(value2));
61 |
62 | store.remove(key);
63 |
64 | Assert.assertEquals(0, store.size());
65 | Assert.assertTrue(store.get(key) == null);
66 | }
67 |
68 | {
69 | ByteChunk key1 = new ByteChunk(new byte[8]); // this way we can fabricate same hash codes
70 | ByteChunk key2 = new ByteChunk(new byte[9]); // this way we can fabricate same hash codes
71 | key1.putInt(0, -1);
72 | key1.putInt(4, 100);
73 | key2.putInt(0, -1);
74 | key2.set(4, (byte) 0x00);
75 | key2.putInt(5, 100);
76 |
77 | store.put(key1, key1);
78 | store.put(key2, key2);
79 | Assert.assertEquals(2, store.size());
80 |
81 | ByteChunk val;
82 | val = store.get(key1);
83 | Assert.assertTrue(key1.sameBytes(val));
84 |
85 | val = store.get(key2);
86 | Assert.assertTrue(key2.sameBytes(val));
87 |
88 | store.remove(key1);
89 | Assert.assertEquals(1, store.size());
90 | Assert.assertTrue(store.get(key1) == null);
91 |
92 | val = store.get(key2);
93 | Assert.assertTrue(key2.sameBytes(val));
94 |
95 | store.clear();
96 |
97 | Assert.assertEquals(0, store.size());
98 | Assert.assertNull(store.get(key1));
99 | Assert.assertNull(store.get(key2));
100 | }
101 |
102 | storeMan.close();
103 | }
104 |
105 | // protected BinaryStoreManager createSmallPageManager() {
106 | // PageLogManager pageManager = new PageLogManager(8 << 10, 16, 2);
107 | //
108 | // PagedMemoryBinaryStoreManager storeMan = new PagedMemoryBinaryStoreManager("test_basics", pageManager);
109 | // return storeMan;
110 | // }
111 |
112 | protected abstract BinaryStoreManager createSmallPageManager();
113 |
114 | @Test
115 | public void large_consistency_test_basics() {
116 |
117 | BinaryStoreManager storeMan = createLargeStoreManager();
118 |
119 | BinaryKeyValueStore store = storeMan.createKeyValueStore();
120 |
121 | Random rnd = new Random(1);
122 | Map refMap = new HashMap();
123 |
124 | int objNum = 10000;
125 | int holeNum = 2000;
126 |
127 | for(int n = 0; n != 100000; ++n) {
128 |
129 |
130 | if (n > 85000 && objNum > 0) {
131 | --objNum;
132 | ++holeNum;
133 | }
134 |
135 | if (n % 500 == 0) {
136 | compare(refMap, store, objNum, holeNum);
137 | }
138 |
139 |
140 | int size = refMap.size();
141 | if (size > objNum) {
142 | if (n > 85000 || ((size - objNum) >= rnd.nextInt(holeNum))) {
143 | while(true) {
144 | String key;
145 | if (size < (objNum + holeNum ) / 8) {
146 | key = refMap.keySet().iterator().next();
147 | }
148 | else{
149 | key = randomKey(rnd, objNum, holeNum);
150 | }
151 | boolean hit = refMap.remove(key) != null;
152 | // System.out.println("remove(" + key + ")");
153 | store.remove(toByteChunk(key));
154 | Assert.assertEquals(refMap.size(), store.size());
155 | if (hit) {
156 | break;
157 | }
158 | }
159 | continue;
160 | }
161 | }
162 |
163 | String key = randomKey(rnd, objNum, holeNum);
164 | String val = randomString(rnd.nextInt(10) + 20, rnd);
165 |
166 | if (refMap.containsKey(key)) {
167 | // System.out.println("insert(" + key + ", " + val + "), size=" + refMap.size());
168 | }
169 | else {
170 | // System.out.println("update(" + key + ", " + val + "), size=" + refMap.size());
171 | }
172 | if (key.equals("108071")) {
173 | new String();
174 | }
175 | if (refMap.containsKey("109497")) {
176 | ByteChunk bc;
177 | if ((bc = store.get(toByteChunk("109497"))) == null) {
178 | Assert.assertFalse(true);
179 | }
180 | store.put(toByteChunk("109497"), bc);
181 | }
182 | refMap.put(key, val);
183 | if (n == 26) {
184 | new String();
185 | }
186 | store.put(toByteChunk(key), toByteChunk(val));
187 | if (refMap.size() != store.size()) {
188 | Assert.assertEquals(refMap.size(), store.size());
189 | }
190 | ByteChunk bval = store.get(toByteChunk(key));
191 | Assert.assertTrue(toByteChunk(val).sameBytes(bval));
192 |
193 | if (refMap.containsKey("109497")) {
194 | if (store.get(toByteChunk("109497")) == null) {
195 | Assert.assertFalse(true);
196 | }
197 | }
198 |
199 | if (n == 90000) {
200 | // test clear correctness
201 | store.clear();
202 | refMap.clear();
203 | }
204 | }
205 |
206 | compare(refMap, store, objNum, holeNum);
207 |
208 | storeMan.close();
209 | }
210 |
211 | // protected BinaryStoreManager createLargeStoreManager() {
212 | // PageLogManager pageManager = new PageLogManager(64 << 10, 256, 2);
213 | //
214 | // PagedMemoryBinaryStoreManager storeMan = new PagedMemoryBinaryStoreManager("test_basics", pageManager);
215 | // return storeMan;
216 | // }
217 |
218 | abstract protected BinaryStoreManager createLargeStoreManager();
219 |
220 | private static void compare(Map ref, BinaryKeyValueStore store, int objNum, int holeNum) {
221 | for(int i = 0; i != objNum + holeNum; ++i) {
222 | String key = String.valueOf(100000l + i);
223 |
224 | String val = ref.get(key);
225 | ByteChunk bval = store.get(toByteChunk(key));
226 |
227 | if (val == null) {
228 | Assert.assertTrue(bval == null);
229 | }
230 | else {
231 | if (bval == null) {
232 | System.out.println("Mismatch: Missing key " + key);
233 | Assert.assertFalse(true);
234 | }
235 | Assert.assertTrue(bval.sameBytes(toByteChunk(val)));
236 | }
237 | }
238 | }
239 |
240 | private static ByteChunk toByteChunk(String val) {
241 | return new ByteChunk(val.getBytes());
242 | }
243 |
244 | private static String randomKey(Random rnd, int objNum, int holeNum) {
245 | long key = 100000 + rnd.nextInt(objNum + holeNum);
246 | return String.valueOf(key);
247 | }
248 |
249 | static char[] CHARS_BUFFER = new char[1024];
250 | public static String randomString(int len, Random rnd) {
251 | if (len > 1024 || len < 0) {
252 | throw new IllegalArgumentException("String length exceeds buffer size");
253 | }
254 | for(int i = 0; i != len; ++i) {
255 | CHARS_BUFFER[i] = (char)('A' + rnd.nextInt(23));
256 | }
257 | return new String(CHARS_BUFFER, 0, len);
258 | }
259 | }
260 |
--------------------------------------------------------------------------------
/jmemstore/src/test/java/org/gridkit/offheap/jmemstore/CRC32SpeedTest.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import org.gridkit.offheap.jmemstore.BinHash;
20 | import org.gridkit.offheap.jmemstore.ByteChunk;
21 | import org.junit.Ignore;
22 |
23 | /**
24 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
25 | */
26 | @Ignore
27 | public class CRC32SpeedTest {
28 |
29 | public static void main(String[] args) {
30 | HashSpeedTest test = new HashSpeedTest() {
31 | @Override
32 | public int hash(byte[] data, int offs, int len) {
33 | return BinHash.hash(new ByteChunk(data, offs, len));
34 | }
35 | };
36 |
37 | test.init();
38 | test.start();
39 | }
40 |
41 | }
42 |
--------------------------------------------------------------------------------
/jmemstore/src/test/java/org/gridkit/offheap/jmemstore/HashSpeedTest.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import java.util.Random;
20 |
21 | import org.junit.Ignore;
22 |
23 | /**
24 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
25 | */
26 | @Ignore
27 | public abstract class HashSpeedTest {
28 |
29 | private byte[][] dataset = new byte[64][];
30 | private int datasetSize = 0;
31 | private long start;
32 | private int n;
33 | private long bytesProcessed;
34 | private int hashsum;
35 |
36 |
37 | public void init() {
38 | Random rnd = new Random(0);
39 | for(int i = 0; i != dataset.length; ++i) {
40 | int len = rnd.nextInt(100000) + 10000;
41 | dataset[i] = new byte[len];
42 | rnd.nextBytes(dataset[i]);
43 | datasetSize += len;
44 | }
45 |
46 | for(int i = 0; i != 1000; ++i) {
47 | hash(dataset[i % dataset.length]);
48 | }
49 | }
50 |
51 | public void start() {
52 | n = 0;
53 | start = System.currentTimeMillis();
54 | bytesProcessed = 0;
55 | hashsum = 0;
56 | while(true) {
57 | iterate();
58 | }
59 | }
60 |
61 | private void iterate() {
62 | while(true) {
63 | hashsum += hash(dataset[n % dataset.length]);
64 | bytesProcessed += dataset[n % dataset.length].length;
65 | ++n;
66 | if (n % 100000 == 0) {
67 | long now = System.currentTimeMillis();
68 | System.out.println("Processed " + (bytesProcessed >> 20) + "mb in " + ((now - start) / 1000) + "sec");
69 | System.out.println("Speed: " + ((bytesProcessed * 1000 / (now - start)) >> 20) + "mb/s");
70 | start = System.currentTimeMillis();
71 | bytesProcessed = 0;
72 | break;
73 | }
74 | }
75 | }
76 |
77 | public int hash(byte[] data) {
78 | return hash(data, 0, data.length);
79 | }
80 |
81 | public abstract int hash(byte[] data, int offs, int len);
82 | }
83 |
--------------------------------------------------------------------------------
/jmemstore/src/test/java/org/gridkit/offheap/jmemstore/IdentityMappingHashTableTest.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import org.gridkit.offheap.jmemstore.IdentityMapping;
20 | import org.gridkit.offheap.jmemstore.InHeapMemoryStoreBackend;
21 | import org.gridkit.offheap.jmemstore.MemoryConsumer;
22 | import org.gridkit.offheap.jmemstore.PagedBinaryStoreManager;
23 | import org.junit.Test;
24 |
25 | /**
26 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
27 | */
28 | public class IdentityMappingHashTableTest {
29 |
30 | static InHeapMemoryStoreBackend pageManager = new InHeapMemoryStoreBackend(64 << 10, 256, 2);
31 | static PagedBinaryStoreManager storeMan = new PagedBinaryStoreManager("IdentityMappingHashTableTest", pageManager);
32 |
33 | @Test
34 | public void complexTest() {
35 |
36 | IdentityMapping mapping = storeMan.createIdentityMapping();
37 |
38 | RandomIdentityMappingTester tester = new RandomIdentityMappingTester();
39 |
40 | tester.start(mapping);
41 | storeMan.destroy((MemoryConsumer) mapping);
42 |
43 | }
44 |
45 | @Test
46 | public void longTest() {
47 |
48 | IdentityMapping mapping = storeMan.createIdentityMapping();
49 |
50 | RandomIdentityMappingTester tester = new RandomIdentityMappingTester();
51 | tester.variety = 5000;
52 | tester.populationPhase = 10000;
53 | tester.mainPhase = 100000;
54 | tester.removalPhase = 20000;
55 |
56 | tester.start(mapping);
57 | storeMan.destroy((MemoryConsumer) mapping);
58 |
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/jmemstore/src/test/java/org/gridkit/offheap/jmemstore/MurmurSpeedTest.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import org.gridkit.offheap.jmemstore.BinHash;
20 | import org.junit.Ignore;
21 |
22 | /**
23 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
24 | */
25 | @Ignore
26 | public class MurmurSpeedTest {
27 |
28 | public static void main(String[] args) {
29 | HashSpeedTest test = new HashSpeedTest() {
30 | @Override
31 | public int hash(byte[] data, int offs, int len) {
32 | return BinHash.murmur3_hash(data, offs, len, -1);
33 | }
34 | };
35 |
36 | test.init();
37 | test.start();
38 | }
39 |
40 | }
41 |
--------------------------------------------------------------------------------
/jmemstore/src/test/java/org/gridkit/offheap/jmemstore/PagedMemoryBinaryStoreManager2OffHeapTest.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import org.gridkit.offheap.jmemstore.BinaryStoreManager;
20 | import org.gridkit.offheap.jmemstore.MemoryStoreBackend;
21 | import org.gridkit.offheap.jmemstore.OffHeapMemoryStoreBackend;
22 | import org.gridkit.offheap.jmemstore.PagedBinaryStoreManager;
23 | import org.junit.Ignore;
24 | import org.junit.Test;
25 |
26 | /**
27 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
28 | */
29 | public class PagedMemoryBinaryStoreManager2OffHeapTest extends BaseBinaryStoreManagerTest {
30 |
31 | @Test @Override
32 | public void test_basics() {
33 | // TODO Auto-generated method stub
34 | super.test_basics();
35 | }
36 |
37 | @Test @Override
38 | public void large_consistency_test_basics() {
39 | // TODO Auto-generated method stub
40 | super.large_consistency_test_basics();
41 | }
42 |
43 | @Ignore @Override
44 | protected BinaryStoreManager createLargeStoreManager() {
45 | // InHeapMemoryStoreBackend pageManager = new InHeapMemoryStoreBackend(64 << 10, 256, 2);
46 | MemoryStoreBackend pageManager = new OffHeapMemoryStoreBackend(64 << 10, 256, 2);
47 |
48 | PagedBinaryStoreManager storeMan = new PagedBinaryStoreManager("test_basics", pageManager);
49 | return storeMan;
50 | }
51 |
52 | @Ignore @Override
53 | protected BinaryStoreManager createSmallPageManager() {
54 | // InHeapMemoryStoreBackend pageManager = new InHeapMemoryStoreBackend(8 << 10, 16, 2);
55 | MemoryStoreBackend pageManager = new OffHeapMemoryStoreBackend(8 << 10, 16, 2);
56 |
57 | PagedBinaryStoreManager storeMan = new PagedBinaryStoreManager("test_basics", pageManager);
58 | return storeMan;
59 | }
60 |
61 |
62 |
63 | }
64 |
--------------------------------------------------------------------------------
/jmemstore/src/test/java/org/gridkit/offheap/jmemstore/PagedMemoryBinaryStoreManager2Test.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import org.gridkit.offheap.jmemstore.BinaryStoreManager;
20 | import org.gridkit.offheap.jmemstore.InHeapMemoryStoreBackend;
21 | import org.gridkit.offheap.jmemstore.PagedBinaryStoreManager;
22 | import org.junit.Ignore;
23 | import org.junit.Test;
24 |
25 | /**
26 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
27 | */
28 | public class PagedMemoryBinaryStoreManager2Test extends BaseBinaryStoreManagerTest {
29 |
30 | @Test @Override
31 | public void test_basics() {
32 | // TODO Auto-generated method stub
33 | super.test_basics();
34 | }
35 |
36 | @Test @Override
37 | public void large_consistency_test_basics() {
38 | // TODO Auto-generated method stub
39 | super.large_consistency_test_basics();
40 | }
41 |
42 | @Ignore @Override
43 | protected BinaryStoreManager createLargeStoreManager() {
44 | InHeapMemoryStoreBackend pageManager = new InHeapMemoryStoreBackend(64 << 10, 256, 2);
45 |
46 | PagedBinaryStoreManager storeMan = new PagedBinaryStoreManager("test_basics", pageManager);
47 | return storeMan;
48 | }
49 |
50 | @Ignore @Override
51 | protected BinaryStoreManager createSmallPageManager() {
52 | InHeapMemoryStoreBackend pageManager = new InHeapMemoryStoreBackend(8 << 10, 16, 2);
53 |
54 | PagedBinaryStoreManager storeMan = new PagedBinaryStoreManager("test_basics", pageManager);
55 | return storeMan;
56 | }
57 |
58 |
59 |
60 | }
61 |
--------------------------------------------------------------------------------
/jmemstore/src/test/java/org/gridkit/offheap/jmemstore/RandomIdentityMappingTester.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import java.util.Random;
20 |
21 | import org.gridkit.offheap.jmemstore.ByteChunk;
22 | import org.gridkit.offheap.jmemstore.IdentityMapping;
23 | import org.gridkit.offheap.jmemstore.IdentityMappingHashTable;
24 | import org.junit.Assert;
25 | import org.junit.Ignore;
26 |
27 | /**
28 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
29 | */
30 | @Ignore
31 | public class RandomIdentityMappingTester {
32 |
33 | public Random rnd = new Random(0);
34 | private Random rnd2 = new Random();
35 |
36 | public int variety = 1000;
37 | public int populationPhase = 10000;
38 | public int mainPhase = 50000;
39 | public int removalPhase = 20000;
40 |
41 |
42 | public void start(IdentityMapping mapping) {
43 |
44 | try {
45 |
46 | SimpleIdentityMapping refMap = new SimpleIdentityMapping();
47 |
48 | int n = 0;
49 | // population
50 | while(n < populationPhase) {
51 |
52 | if (probability(0.8)) {
53 | ByteChunk key = randomBytes(nextKey());
54 |
55 | if (key.toString().equals("[8ea5456f.c07d6f79.8bbbade2.30]")) {
56 | System.out.println("++" + key + " at " + refMap.cardinality(key));
57 | }
58 |
59 | int id = mapping.map(key);
60 |
61 | if (key.toString().equals("[8bea12ce.b9527aae.11377490.f1]")) {
62 | new String(); // ((IdentityMappingHashTable)mapping)._debug_dump();
63 | }
64 | refMap.map(key, id);
65 | }
66 | else {
67 | while(true) {
68 | ByteChunk key = randomBytes(nextKey());
69 | if (key.toString().equals("[8bea12ce.b9527aae.11377490.f1]")) {
70 | new String();
71 | }
72 | int id = refMap.getIdByChunk(key);
73 | if (id == IdentityMapping.UNMAPPED) {
74 | continue;
75 | }
76 |
77 | if (key.toString().equals("[8ea5456f.c07d6f79.8bbbade2.30]")) {
78 | System.out.println("--" + key + " at " + refMap.cardinality(key));
79 | }
80 |
81 | if (probability(0.5)) {
82 | mapping.unmap(id);
83 | refMap.unmap(id);
84 | }
85 | else {
86 | mapping.unmap(key);
87 | refMap.unmap(key);
88 | }
89 | break;
90 | }
91 | }
92 |
93 | ++n;
94 | if (n % 100 == 0) {
95 | compare(mapping, refMap);
96 | }
97 | Assert.assertEquals(refMap.size(), mapping.size());
98 | }
99 |
100 | n = 0;
101 | // population
102 | while(n < mainPhase) {
103 |
104 | if (probability(0.5)) {
105 | ByteChunk key = randomBytes(nextKey());
106 |
107 | if (key.toString().equals("[8ea5456f.c07d6f79.8bbbade2.30]")) {
108 | System.out.println("++" + key + " at " + refMap.cardinality(key));
109 | }
110 |
111 | int id = mapping.map(key);
112 | refMap.map(key, id);
113 | Assert.assertEquals(refMap.size(), mapping.size());
114 | }
115 | else {
116 | while(true) {
117 | ByteChunk key = randomBytes(nextKey());
118 | int id = refMap.getIdByChunk(key);
119 | if (id == IdentityMapping.UNMAPPED) {
120 | continue;
121 | }
122 |
123 | if (key.toString().equals("[8ea5456f.c07d6f79.8bbbade2.30]")) {
124 | System.out.println("--" + key + " at " + refMap.cardinality(id));
125 | }
126 | if (probability(0.5)) {
127 | mapping.unmap(id);
128 | refMap.unmap(id);
129 | if (refMap.size() != mapping.size()) {
130 | Assert.assertEquals(refMap.size(), mapping.size());
131 | }
132 | }
133 | else {
134 | mapping.unmap(key);
135 | refMap.unmap(key);
136 | Assert.assertEquals(refMap.size(), mapping.size());
137 | }
138 | break;
139 | }
140 | }
141 |
142 | ++n;
143 | if (n % 100 == 0) {
144 | compare(mapping, refMap);
145 | }
146 | Assert.assertEquals(refMap.size(), mapping.size());
147 | }
148 |
149 | n = 0;
150 | // removal phase
151 | while(n < removalPhase) {
152 |
153 | if (refMap.size() == 0) {
154 | break;
155 | }
156 |
157 | if (probability(0.2)) {
158 | ByteChunk key = randomBytes(nextKey());
159 | int id = mapping.map(key);
160 | refMap.map(key, id);
161 | }
162 | else {
163 | while(true) {
164 |
165 | ByteChunk key;
166 | int id;
167 | if (probability(0.8)) {
168 | key = randomBytes(nextKey());
169 | id = refMap.getIdByChunk(key);
170 | if (id == IdentityMapping.UNMAPPED) {
171 | continue;
172 | }
173 | }
174 | else {
175 | id = refMap.getValidId();
176 | key = refMap.getChunkById(id);
177 | }
178 |
179 | if (probability(0.5)) {
180 | mapping.unmap(id);
181 | refMap.unmap(id);
182 | }
183 | else {
184 | mapping.unmap(key);
185 | refMap.unmap(key);
186 | }
187 | break;
188 | }
189 | }
190 |
191 | ++n;
192 | if (n % 100 == 0) {
193 | compare(mapping, refMap);
194 | }
195 | Assert.assertEquals(refMap.size(), mapping.size());
196 | }
197 |
198 | compare(mapping, refMap);
199 | }
200 | catch(RuntimeException e) {
201 | ((IdentityMappingHashTable)mapping)._debug_dump();
202 | throw e;
203 | }
204 | catch(AssertionError e) {
205 | ((IdentityMappingHashTable)mapping)._debug_dump();
206 | throw e;
207 | }
208 | }
209 |
210 | public boolean probability(double x) {
211 | return rnd.nextDouble() < x;
212 | }
213 |
214 | public int nextKey() {
215 | return rnd.nextInt(variety);
216 | }
217 |
218 | public ByteChunk randomBytes(int n) {
219 | rnd2.setSeed(n);
220 | int len = 8 + rnd2.nextInt(8);
221 | byte[] chunk = new byte[len];
222 | rnd2.nextBytes(chunk);
223 | return new ByteChunk(chunk);
224 | }
225 |
226 | public void compare(IdentityMapping mapping, SimpleIdentityMapping refMapping) {
227 | for(int i = 0; i != variety; ++i) {
228 | ByteChunk key = randomBytes(i);
229 | int id = refMapping.getIdByChunk(key);
230 |
231 | Assert.assertEquals(id, mapping.getIdByChunk(key));
232 | if (id != IdentityMapping.UNMAPPED) {
233 | Assert.assertEquals(key.toString(), String.valueOf(mapping.getChunkById(id)));
234 | }
235 | }
236 | Assert.assertEquals(mapping.size(), refMapping.size());
237 | }
238 | }
239 |
--------------------------------------------------------------------------------
/jmemstore/src/test/java/org/gridkit/offheap/jmemstore/SimpleIdentityMapping.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import java.util.Collection;
20 | import java.util.HashMap;
21 | import java.util.Map;
22 |
23 | import org.gridkit.offheap.jmemstore.ByteChunk;
24 | import org.gridkit.offheap.jmemstore.IdentityMapping;
25 | import org.junit.Assert;
26 | import org.junit.Ignore;
27 |
28 | /**
29 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
30 | */
31 | @Ignore
32 | public class SimpleIdentityMapping {
33 |
34 | private Map s2i = new HashMap();
35 | private Map i2s = new HashMap();
36 | private Map refCount = new HashMap();
37 |
38 | public ByteChunk getChunkById(int id) {
39 | return toChunk(i2s.get(id));
40 | }
41 |
42 | public int getIdByChunk(ByteChunk chunk) {
43 | Integer x = s2i.get(toString(chunk));
44 | return x == null ? IdentityMapping.UNMAPPED : x;
45 | }
46 |
47 | public void map(ByteChunk chunk, int id) {
48 | String s = toString(chunk);
49 | Integer ref = refCount.get(id);
50 | if (ref == null) {
51 | refCount.put(id, 1);
52 | }
53 | else {
54 | if (!(s.equals(i2s.get(id))) || id != s2i.get(s)) {
55 | Assert.assertEquals(s, i2s.get(id));
56 | Assert.assertEquals(Integer.valueOf(id), s2i.get(s));
57 | }
58 | refCount.put(id, ref + 1);
59 | }
60 |
61 | s2i.put(s, id);
62 | i2s.put(id, s);
63 |
64 | if (s2i.size() != i2s.size()) {
65 | Assert.assertFalse(true);
66 | }
67 | }
68 |
69 | public int size() {
70 | return s2i.size();
71 | }
72 |
73 | public void unmap(int id) {
74 | String s = i2s.get(id);
75 | Integer ref = refCount.get(id);
76 | if (ref.intValue() == 1) {
77 | refCount.remove(id);
78 | i2s.remove(id);
79 | s2i.remove(s);
80 | }
81 | else {
82 | refCount.put(id, ref - 1);
83 | }
84 |
85 | if (s2i.size() != i2s.size()) {
86 | Assert.assertFalse(true);
87 | }
88 | }
89 |
90 | public void unmap(ByteChunk chunk) {
91 | int id = s2i.get(toString(chunk));
92 | Integer ref = refCount.get(id);
93 | if (ref.intValue() == 1) {
94 | refCount.remove(id);
95 | i2s.remove(id);
96 | s2i.remove(toString(chunk));
97 | }
98 | else {
99 | refCount.put(id, ref - 1);
100 | }
101 |
102 | if (s2i.size() != i2s.size()) {
103 | Assert.assertFalse(true);
104 | }
105 | }
106 |
107 | public int cardinality(int id) {
108 | Integer x = refCount.get(id);
109 | return x == null ? 0 : x;
110 | }
111 |
112 | public int cardinality(ByteChunk chunk) {
113 | Integer x = refCount.get(getIdByChunk(chunk));
114 | return x == null ? 0 : x;
115 | }
116 |
117 | public int getValidId() {
118 | if (!i2s.isEmpty()) {
119 | return i2s.keySet().iterator().next();
120 | }
121 | else {
122 | return IdentityMapping.UNMAPPED;
123 | }
124 | }
125 |
126 | public Collection keySet() {
127 | return s2i.keySet();
128 | }
129 |
130 | private ByteChunk toChunk(String text) {
131 | if (text == null) {
132 | return null;
133 | }
134 | int len = text.length() / 2;
135 | byte[] bytes = new byte[len];
136 | for(int i = 0; i != len; ++i) {
137 | int val = Character.digit(text.charAt(2 * i), 16) << 4;
138 | val += Character.digit(text.charAt(2 * i + 1), 16);
139 | bytes[i] = (byte) val;
140 | }
141 | return new ByteChunk(bytes);
142 | }
143 |
144 | private static char[] HEX = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
145 |
146 | private String toString(ByteChunk chunk) {
147 | if (chunk == null) {
148 | return null;
149 | }
150 | StringBuffer buf = new StringBuffer();
151 | for(int i = 0; i != chunk.lenght(); ++i) {
152 | int val = chunk.at(i);
153 | buf.append(HEX[(val & 0xFF) >> 4]);
154 | buf.append(HEX[val & 0xF]);
155 | }
156 | return buf.toString();
157 | }
158 | }
159 |
--------------------------------------------------------------------------------
/jmemstore/src/test/java/org/gridkit/offheap/jmemstore/SplitHashTest.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2011 Alexey Ragozin
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | *
16 | */
17 | package org.gridkit.offheap.jmemstore;
18 |
19 | import org.gridkit.offheap.jmemstore.BinHash;
20 | import org.junit.Test;
21 |
22 | import junit.framework.Assert;
23 |
24 | /**
25 | * @author Alexey Ragozin (alexey.ragozin@gmail.com)
26 | */
27 | public class SplitHashTest {
28 |
29 | @Test
30 | public void test1() {
31 | Assert.assertEquals(511, BinHash.splitHash(-1, 512));
32 | }
33 |
34 | @Test
35 | public void test2() {
36 | Assert.assertEquals(510, BinHash.splitHash(-2, 512));
37 | }
38 |
39 | @Test
40 | public void test3() {
41 | Assert.assertEquals(0, BinHash.splitHash(0, 512));
42 | }
43 |
44 | @Test
45 | public void test4() {
46 | Assert.assertEquals(1, BinHash.splitHash(1, 512));
47 | }
48 |
49 | @Test
50 | public void test5() {
51 | Assert.assertEquals(511, BinHash.splitHash(511, 512));
52 | }
53 |
54 | @Test
55 | public void test6() {
56 | Assert.assertEquals(255, BinHash.splitHash(511, 511));
57 | }
58 |
59 | @Test
60 | public void test7() {
61 | Assert.assertEquals(256, BinHash.splitHash(256, 511));
62 | }
63 |
64 | @Test
65 | public void test8() {
66 | Assert.assertEquals(256, BinHash.splitHash(-256, 511));
67 | }
68 |
69 | @Test
70 | public void sizeAllignTest() {
71 | int segmentCount = 4;
72 | int capacity = 0;
73 | while(capacity < 100) {
74 | // assumed newCap = capacity + 1
75 |
76 | int slot = capacity % segmentCount;
77 | int oldSegSize = alignSegmentSize(capacity / segmentCount);
78 | int newSegSize = alignSegmentSize(1 + (capacity / segmentCount));
79 | if (oldSegSize != newSegSize) {
80 | System.out.println("Resize slot " + slot + ": " + oldSegSize + " -> " + newSegSize);
81 |
82 | }
83 | ++capacity;
84 | System.out.println("Slot " + slot + ", size=" + getSegmentSize(slot, capacity, segmentCount));
85 | }
86 | }
87 |
88 | private int getSegmentSize(int n, int capacity, int segmentCount) {
89 | int size = (capacity / segmentCount + (n < (capacity % segmentCount) ? 1 : 0));
90 | return alignSegmentSize(size);
91 | }
92 |
93 | private int alignSegmentSize(int cap) {
94 | if (cap == 0) {
95 | return 0;
96 | }
97 | ++cap;
98 | int allignment = 0xF;
99 | if (cap > 256) {
100 | allignment = 0x3F;
101 | }
102 | else if (cap > 1024) {
103 | allignment = 0xFF;
104 | }
105 | else if (cap > 4096) {
106 | allignment = 0x3FF;
107 | }
108 | cap = (cap + allignment) & (~allignment);
109 | --cap;
110 | return cap;
111 | }
112 |
113 | // @Test
114 | // public void capacity() {
115 | // int capacity = 305530;
116 | //
117 | // int nRound = Integer.highestOneBit(capacity);
118 | // int nSplit = (capacity) & ~nRound;
119 | // int nLast = capacity;
120 | //
121 | // System.out.println("nRound: " + nRound);
122 | // System.out.println("nSplit: " + nSplit);
123 | // System.out.println("nLast: " + nLast);
124 | //
125 | // }
126 | }
127 |
--------------------------------------------------------------------------------