├── src ├── Main.java ├── diskmanager │ ├── Page.java │ ├── DiskManager.java │ ├── DiskRequest.java │ ├── DiskFile.java │ ├── RandomAccessDiskFile.java │ └── BasicDiskManager.java ├── bufferpool │ ├── Replacer.java │ ├── Guard.java │ ├── ReadGuard.java │ ├── WriteGuard.java │ ├── Frame.java │ ├── LRU.java │ └── BufferPool.java ├── indexmanager │ ├── Index.java │ ├── IndexManager.java │ └── CollectionMan.java ├── types │ ├── Json.java │ ├── Types.java │ ├── Compare.java │ ├── Template.java │ ├── GenericByteReader.java │ ├── Array.java │ ├── CompareableArray.java │ ├── Compositekey.java │ └── Key.java ├── globals │ └── Globals.java ├── test │ ├── types │ │ └── ArrayTest.java │ ├── diskmanager │ │ └── DiskManagerTest.java │ ├── btree │ │ ├── IndexTest.java │ │ └── BtreeTest.java │ └── bufferpool │ │ ├── FrameTest.java │ │ ├── GuardTest.java │ │ ├── ReplacerTest.java │ │ └── BufferPoolTest.java ├── page │ ├── TreeNodeHeader.java │ ├── InternalNode.java │ └── LeafNode.java └── btree │ ├── BtreeHeader.java │ ├── Cursor.java │ └── Btree.java ├── storage └── README.md ├── .gitignore ├── notes.txt ├── scripts └── format.bash ├── .github └── workflows │ └── ci.yml ├── LICENSE ├── pom.xml └── README.md /src/Main.java: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /storage/README.md: -------------------------------------------------------------------------------- 1 | place holder -------------------------------------------------------------------------------- /src/diskmanager/Page.java: -------------------------------------------------------------------------------- 1 | package diskmanager; 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | bin 3 | storage/*.btree 4 | target 5 | lib -------------------------------------------------------------------------------- /notes.txt: -------------------------------------------------------------------------------- 1 | to compile 2 | javac -d bin $(find src -name "*.java") // will compile all in src 3 | 4 | to run 5 | java -cp bin Main 6 | -------------------------------------------------------------------------------- /scripts/format.bash: -------------------------------------------------------------------------------- 1 | USERNAME=$(whoami) 2 | USERDIR="/home/$USERNAME" 3 | JavaFormater="$USERDIR/google-java-format-1.27.0-all-deps.jar" 4 | java -jar $JavaFormater -i src/*/*.java src/*/*/*.java -------------------------------------------------------------------------------- /src/bufferpool/Replacer.java: -------------------------------------------------------------------------------- 1 | package bufferpool; 2 | 3 | public interface Replacer { 4 | public void recordAccess(int frameId); 5 | 6 | public int evict(); 7 | 8 | public void setEvictable(int frameId, boolean evictable); 9 | 10 | public void deleteFrame(int frameId); 11 | } 12 | -------------------------------------------------------------------------------- /src/indexmanager/Index.java: -------------------------------------------------------------------------------- 1 | package indexmanager; 2 | 3 | import types.Compositekey; 4 | 5 | public interface Index { 6 | public Compositekey get(Compositekey key) throws Exception; 7 | 8 | public boolean insert(Compositekey key, Compositekey value) throws Exception; 9 | 10 | public boolean delete(Compositekey key) throws Exception; 11 | } 12 | -------------------------------------------------------------------------------- /src/types/Json.java: -------------------------------------------------------------------------------- 1 | package types; 2 | 3 | public class Json implements Comparable { 4 | private byte[] data; 5 | public int compareTo(Json rhs) { 6 | for(int i = 0; i < Types.JSON_SIZE; i ++) { 7 | if(data[i] < rhs.data[i]) { 8 | return -1; 9 | } 10 | 11 | if(data[i] > rhs.data[i]) { 12 | return 1; 13 | } 14 | } 15 | return 0; 16 | } 17 | 18 | 19 | } 20 | -------------------------------------------------------------------------------- /src/globals/Globals.java: -------------------------------------------------------------------------------- 1 | package globals; 2 | 3 | public class Globals { 4 | public static final int PAGE_SIZE = 2 * 4096; // 8KB 5 | public static final int CLUSTER_PAGE_SIZE = 4 * 4096; // 16KB 6 | public static final int PRE_ALLOCATED_PAGES_COUNT = 1024; // 8Mb 7 | public static final long INVALID_PAGE_ID = -1; 8 | public static final int INVALID_Frame_ID = -1; 9 | public static final String STORAGE_DIR = "storage/"; 10 | } 11 | -------------------------------------------------------------------------------- /src/diskmanager/DiskManager.java: -------------------------------------------------------------------------------- 1 | package diskmanager; 2 | 3 | import java.io.Closeable; 4 | import java.io.IOException; 5 | 6 | public interface DiskManager extends Closeable { 7 | public void pushRequest(DiskRequest request) throws InterruptedException, NullPointerException; 8 | 9 | public long allocatePage(String fileName) throws IOException, NullPointerException; 10 | 11 | public int getFileCount(); 12 | 13 | public long getPageCount(String fileName); 14 | 15 | public void open(String fileName) throws IOException, NullPointerException; 16 | } 17 | -------------------------------------------------------------------------------- /src/diskmanager/DiskRequest.java: -------------------------------------------------------------------------------- 1 | package diskmanager; 2 | 3 | import java.util.concurrent.CompletableFuture; 4 | 5 | public class DiskRequest { 6 | String fileName; 7 | long pageID; 8 | byte[] data; 9 | boolean isWrite; 10 | CompletableFuture finish; 11 | 12 | public DiskRequest(String fileName, long pageID, byte[] data, boolean isWrite) { 13 | this.fileName = fileName; 14 | this.pageID = pageID; 15 | this.data = data; 16 | this.isWrite = isWrite; 17 | this.finish = new CompletableFuture(); 18 | } 19 | 20 | public CompletableFuture getFuture() { 21 | return finish; 22 | } 23 | 24 | public byte[] getData() { 25 | return data; 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/bufferpool/Guard.java: -------------------------------------------------------------------------------- 1 | package bufferpool; 2 | 3 | import java.nio.ByteBuffer; 4 | import java.util.concurrent.locks.Lock; 5 | 6 | /** to access the data of a frame writing or reading in throw this */ 7 | public class Guard { 8 | 9 | protected int frameId; 10 | protected Frame frame; 11 | protected Replacer replacer; 12 | protected Lock bpmLatch; 13 | 14 | public Guard(int frameId, Frame frame, Replacer replacer, Lock bpmLatch) { 15 | this.frameId = frameId; 16 | this.frame = frame; 17 | this.replacer = replacer; 18 | this.bpmLatch = bpmLatch; 19 | } 20 | 21 | public ByteBuffer getData() { 22 | return ByteBuffer.wrap(frame.getData()).asReadOnlyBuffer(); 23 | } 24 | 25 | public int getFrameId() { 26 | return frameId; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/bufferpool/ReadGuard.java: -------------------------------------------------------------------------------- 1 | package bufferpool; 2 | 3 | import globals.Globals; 4 | import java.io.Closeable; 5 | import java.util.concurrent.locks.Lock; 6 | 7 | public class ReadGuard extends Guard implements Closeable { 8 | 9 | public ReadGuard(int frameId, Frame frame, Replacer replacer, Lock bpmLatch) { 10 | super(frameId, frame, replacer, bpmLatch); 11 | try { 12 | boolean locked = frame.lockRead(); 13 | if (!locked) { 14 | this.frameId = Globals.INVALID_Frame_ID; 15 | } 16 | } catch (Exception e) { 17 | this.frameId = Globals.INVALID_Frame_ID; 18 | } 19 | } 20 | 21 | public void close() { 22 | bpmLatch.lock(); 23 | int pinCount = frame.removePin(); 24 | if (pinCount == 0) { 25 | replacer.setEvictable(frameId, true); 26 | } 27 | bpmLatch.unlock(); 28 | frame.unlockRead(); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/test/types/ArrayTest.java: -------------------------------------------------------------------------------- 1 | package test.types; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | 5 | import javax.naming.directory.InvalidAttributesException; 6 | import org.junit.Test; 7 | import types.Array; 8 | import types.Compositekey; 9 | import types.Template; 10 | 11 | public class ArrayTest { 12 | 13 | @Test 14 | public void testBasic() throws InvalidAttributesException { 15 | byte[] buffer = new byte[4096]; 16 | Template type = new Template(Integer.class); 17 | int n = 4096 / 4; 18 | Array arr = new Array(type, buffer, 0, 0, n); 19 | for (int i = 0; i < n; i++) { 20 | Compositekey key = new Compositekey(type); 21 | key.set(0, i, Integer.class); 22 | arr.insert(i, key); 23 | } 24 | 25 | for (int i = 0; i < n; i++) { 26 | Compositekey key = arr.get(i); 27 | int val = key.getVal(0); 28 | assertEquals(i, val); 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/bufferpool/WriteGuard.java: -------------------------------------------------------------------------------- 1 | package bufferpool; 2 | 3 | import globals.Globals; 4 | import java.util.concurrent.locks.Lock; 5 | 6 | public class WriteGuard extends Guard { 7 | public WriteGuard(int frameId, Frame frame, Replacer replacer, Lock bpmLatch) { 8 | super(frameId, frame, replacer, bpmLatch); 9 | try { 10 | boolean locked = frame.lockWrite(); 11 | if (!locked) { 12 | this.frameId = Globals.INVALID_Frame_ID; 13 | } 14 | } catch (Exception e) { 15 | this.frameId = Globals.INVALID_Frame_ID; 16 | } 17 | } 18 | 19 | public void close() { 20 | bpmLatch.lock(); 21 | int pinCount = frame.removePin(); 22 | if (pinCount == 0) { 23 | replacer.setEvictable(frameId, true); 24 | } 25 | bpmLatch.unlock(); 26 | frame.unlockWrite(); 27 | } 28 | 29 | public byte[] getDataMut() { 30 | frame.setDirty(true); 31 | return frame.getData(); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Java CI with Maven 2 | 3 | on: 4 | push: 5 | branches: 6 | - master # Trigger only on push to master branch (e.g., after merging a PR) 7 | pull_request: 8 | branches: 9 | - master 10 | jobs: 11 | build: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - name: Checkout code 17 | uses: actions/checkout@v3 18 | 19 | - name: Set up JDK 20 | uses: actions/setup-java@v3 21 | with: 22 | java-version: '21' # Or 11, 21, etc. 23 | distribution: 'temurin' # Recommended distribution 24 | 25 | - name: Cache Maven packages 26 | uses: actions/cache@v3 27 | with: 28 | path: ~/.m2 29 | key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} 30 | restore-keys: | 31 | ${{ runner.os }}-maven- 32 | 33 | 34 | - name: Build and test with Maven 35 | run: mvn test -Dtest=BtreeBenchmark 36 | -------------------------------------------------------------------------------- /src/diskmanager/DiskFile.java: -------------------------------------------------------------------------------- 1 | package diskmanager; 2 | 3 | import java.io.Closeable; 4 | import java.io.IOException; 5 | 6 | public interface DiskFile extends Closeable { 7 | 8 | /** 9 | * allocate new page in the file 10 | * 11 | * @return the page id of the allocated page 12 | * @throws IOException 13 | */ 14 | public long allocatePage() throws IOException; 15 | 16 | /** 17 | * reads the content of the page into the destination 18 | * 19 | * @param pageID 20 | * @param data 21 | * @throws IOException 22 | */ 23 | public void readPage(long pageID, byte[] dst) throws IOException; 24 | 25 | /** 26 | * writes the content of the src buffer into disk into the page (page id) 27 | * 28 | * @param pageID 29 | * @param src 30 | * @throws IOException 31 | */ 32 | public void writePage(long pageID, byte[] src) throws IOException; 33 | 34 | /** 35 | * @return the page count of the file 36 | */ 37 | public long getPageCnt(); 38 | 39 | /** 40 | * @return the total file size in bytes 41 | */ 42 | public long getFileSize(); 43 | } 44 | -------------------------------------------------------------------------------- /src/types/Types.java: -------------------------------------------------------------------------------- 1 | package types; 2 | 3 | public class Types { 4 | 5 | public static final short INT_SIZE = 4; 6 | public static final short LONG_SIZE = 8; 7 | public static final short DOUBLE_SIZE = 8; 8 | public static final short SHORT_SIZE = 2; 9 | public static final short BYTE_SIZE = 1; 10 | public static final short FLOAT_SIZE = 4; 11 | public static final short JSON_SIZE = 2 * 1024; // 2KB 12 | // Add more types as needed 13 | 14 | public static short getSize(Class type) { 15 | if (type == Integer.class) { 16 | return INT_SIZE; 17 | } else if (type == Long.class) { 18 | return LONG_SIZE; 19 | } else if (type == Double.class) { 20 | return DOUBLE_SIZE; 21 | } else if (type == Short.class) { 22 | return SHORT_SIZE; 23 | } else if (type == Byte.class) { 24 | return BYTE_SIZE; 25 | } else if (type == Float.class) { 26 | return FLOAT_SIZE; 27 | } else if (type == Json.class) { 28 | return JSON_SIZE; 29 | } 30 | // Add more types as needed 31 | throw new IllegalArgumentException("Unsupported type: " + type.getName()); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Mohamed Fathy Mohamed Hassan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/types/Compare.java: -------------------------------------------------------------------------------- 1 | package types; 2 | 3 | // generic compare class to compare two objects of type T where T is primitive type 4 | public class Compare { 5 | public static int compare(T a, T b) { 6 | if (a instanceof Integer) { 7 | return Integer.compare((Integer) a, (Integer) b); 8 | } else if (a instanceof Long) { 9 | return Long.compare((Long) a, (Long) b); 10 | } else if (a instanceof Double) { 11 | return Double.compare((Double) a, (Double) b); 12 | } else if (a instanceof Short) { 13 | return Short.compare((Short) a, (Short) b); 14 | } else if (a instanceof Byte) { 15 | return Byte.compare((Byte) a, (Byte) b); 16 | } else if (a instanceof Float) { 17 | return Float.compare((Float) a, (Float) b); 18 | } else if (a instanceof String) { 19 | return ((String) a).compareTo((String) b); 20 | } else if (a instanceof Character) { 21 | return Character.compare((Character) a, (Character) b); 22 | } else if (a instanceof Boolean) { 23 | return Boolean.compare((Boolean) a, (Boolean) b); 24 | } 25 | throw new IllegalArgumentException("Unsupported type: " + a.getClass().getName()); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/indexmanager/IndexManager.java: -------------------------------------------------------------------------------- 1 | package indexmanager; 2 | 3 | import java.io.Closeable; 4 | import java.io.IOException; 5 | import java.util.HashMap; 6 | import java.util.Map; 7 | 8 | 9 | /** 10 | * index naming would be in this format 11 | * "collectionName-fieldName" 12 | */ 13 | public class IndexManager implements Closeable{ 14 | private static Map collections; 15 | public IndexManager(){ 16 | collections = new HashMap(); 17 | } 18 | 19 | public void close() { 20 | for(CollectionMan man: collections.values()) { 21 | try { 22 | man.close(); 23 | } catch (IOException e) { 24 | e.printStackTrace(); 25 | } 26 | } 27 | } 28 | 29 | public boolean hasIndex(String collection, String indexName) { 30 | if(!collections.containsKey(collection)) { 31 | return false; 32 | } 33 | 34 | CollectionMan man = collections.get(collection); 35 | return man.hasIndex(indexName); 36 | } 37 | 38 | public String getIndexForField(String collection, String field) { 39 | if(!collections.containsKey(collection)) { 40 | return null; 41 | } 42 | CollectionMan man = collections.get(collection); 43 | return man.getIndexForField(field); 44 | } 45 | 46 | } -------------------------------------------------------------------------------- /src/page/TreeNodeHeader.java: -------------------------------------------------------------------------------- 1 | package page; 2 | 3 | import java.nio.ByteBuffer; 4 | 5 | /** 6 | * TreeNodeHeader class represents a node in a B+ tree. It is a generic class that can be used for 7 | * both internal and leaf nodes. The keys are used to navigate the tree, while the values are used 8 | * to store data. on disk layout: | 2bytes | 1byte | 8 bytes | keys... | values... | | KeysN | Type 9 | * | pageId | key1 | key2 | ... | keyN | value1 | value2 | ... | valueN | 10 | */ 11 | public class TreeNodeHeader { 12 | protected short keysN; 13 | protected long pageId; 14 | protected boolean isLeaf; 15 | protected ByteBuffer buffer; 16 | 17 | public TreeNodeHeader() {} 18 | 19 | public TreeNodeHeader(long pageId, boolean isLeaf) { 20 | keysN = 0; 21 | this.isLeaf = isLeaf; 22 | this.pageId = pageId; 23 | } 24 | 25 | public TreeNodeHeader(byte[] rowData) { 26 | buffer = ByteBuffer.wrap(rowData); 27 | this.keysN = buffer.getShort(); 28 | this.isLeaf = buffer.get() == 1; 29 | this.pageId = buffer.getLong(); 30 | } 31 | 32 | // getters and setters 33 | 34 | public short getKeysN() { 35 | return keysN; 36 | } 37 | 38 | public long getPageId() { 39 | return pageId; 40 | } 41 | 42 | public boolean isLeaf() { 43 | return isLeaf; 44 | } 45 | 46 | public void setKeysN(short keysN) { 47 | this.keysN = keysN; 48 | } 49 | 50 | public void setPageId(long pageId) { 51 | this.pageId = pageId; 52 | } 53 | 54 | public void setLeaf(boolean isLeaf) { 55 | this.isLeaf = isLeaf; 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/btree/BtreeHeader.java: -------------------------------------------------------------------------------- 1 | package btree; 2 | 3 | import globals.Globals; 4 | import java.nio.ByteBuffer; 5 | 6 | public class BtreeHeader { 7 | private long pageId; 8 | private long rootPageId; 9 | private short height; 10 | ByteBuffer buffer; 11 | 12 | public BtreeHeader() { 13 | this.pageId = -1; 14 | this.rootPageId = -1; 15 | this.height = 0; 16 | } 17 | 18 | public BtreeHeader(long pageId, long rootPageId, short height) { 19 | this.pageId = pageId; 20 | this.rootPageId = rootPageId; 21 | this.height = height; 22 | writeHeader(); 23 | } 24 | 25 | public BtreeHeader(byte[] rowData) { 26 | buffer = ByteBuffer.wrap(rowData); 27 | this.pageId = buffer.getLong(); 28 | this.rootPageId = buffer.getLong(); 29 | this.height = buffer.getShort(); 30 | } 31 | 32 | public BtreeHeader(ByteBuffer data) { 33 | buffer = data; 34 | this.pageId = buffer.getLong(); 35 | this.rootPageId = buffer.getLong(); 36 | this.height = buffer.getShort(); 37 | } 38 | 39 | public void writeHeader() { 40 | buffer.rewind(); 41 | buffer.putLong(pageId); 42 | buffer.putLong(rootPageId); 43 | buffer.putShort(height); 44 | } 45 | 46 | public long getPageId() { 47 | return pageId; 48 | } 49 | 50 | public void setPageId(long pageId) { 51 | this.pageId = pageId; 52 | writeHeader(); 53 | } 54 | 55 | public long getRootPageId() { 56 | return rootPageId; 57 | } 58 | 59 | public void setRootPageId(long rootPageId) { 60 | this.rootPageId = rootPageId; 61 | writeHeader(); 62 | } 63 | 64 | public short getHeight() { 65 | return height; 66 | } 67 | 68 | public void setHeight(short height) { 69 | this.height = height; 70 | writeHeader(); 71 | } 72 | 73 | public boolean isEmpty() { 74 | return pageId == Globals.INVALID_PAGE_ID; 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/types/Template.java: -------------------------------------------------------------------------------- 1 | package types; 2 | 3 | public class Template { 4 | public Class>[] classes; 5 | public short byteSize; 6 | 7 | public Template(Class>... classes) { 8 | this.classes = classes; 9 | this.byteSize = byteSize(); 10 | } 11 | 12 | public short getByteSize() { 13 | return byteSize; 14 | } 15 | 16 | public Class> getClass(int index) { 17 | return classes[index]; 18 | } 19 | 20 | private short byteSize() { 21 | short size = 0; 22 | for (int i = 0; i < classes.length; i++) { 23 | size += typeByteSize(classes[i]); 24 | } 25 | return size; 26 | } 27 | 28 | public short typeByteSize(int index) { 29 | Class type = classes[index]; 30 | if (type == Integer.class) { 31 | return Integer.BYTES; 32 | } else if (type == Long.class) { 33 | return Long.BYTES; 34 | } else if (type == Double.class) { 35 | return Double.BYTES; 36 | } else if (type == Short.class) { 37 | return Short.BYTES; 38 | } else if (type == Byte.class) { 39 | return Byte.BYTES; 40 | } else if (type == Float.class) { 41 | return Float.BYTES; 42 | } else if (type == Json.class) { 43 | return Types.JSON_SIZE; 44 | } 45 | throw new IllegalArgumentException("Unsupported type: " + type.getName()); 46 | } 47 | 48 | public short typeByteSize(Class type) { 49 | if (type == Integer.class) { 50 | return Integer.BYTES; 51 | } else if (type == Long.class) { 52 | return Long.BYTES; 53 | } else if (type == Double.class) { 54 | return Double.BYTES; 55 | } else if (type == Short.class) { 56 | return Short.BYTES; 57 | } else if (type == Byte.class) { 58 | return Byte.BYTES; 59 | } else if (type == Float.class) { 60 | return Float.BYTES; 61 | } else if (type == Json.class) { 62 | return Types.JSON_SIZE; 63 | } 64 | throw new IllegalArgumentException("Unsupported type: " + type.getName()); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/btree/Cursor.java: -------------------------------------------------------------------------------- 1 | package btree; 2 | 3 | import bufferpool.BufferPool; 4 | import bufferpool.ReadGuard; 5 | import globals.Globals; 6 | import javax.naming.directory.InvalidAttributesException; 7 | import page.LeafNode; 8 | import types.Compositekey; 9 | 10 | public class Cursor { 11 | private ReadGuard guard; 12 | private BufferPool bufferpool; 13 | private Btree btree; 14 | private LeafNode node; 15 | private int index; 16 | 17 | public Cursor(Btree btree, ReadGuard guard, LeafNode node) { 18 | this.btree = btree; 19 | this.bufferpool = btree.getBufferPool(); 20 | this.guard = guard; 21 | this.node = node; 22 | } 23 | 24 | public Pair get() throws InvalidAttributesException { 25 | Pair curr = 26 | new Pair(node.getKey(index), node.getValue(index)); 27 | return curr; 28 | } 29 | 30 | public void next() throws Exception { 31 | if (isEnd()) { 32 | throw new Exception("current curser is the end of the b+tree"); 33 | } 34 | index++; 35 | if (index < node.getKeysN()) { 36 | return; 37 | } 38 | // go to the next leaf 39 | long nextPageId = node.getNextLeafNode(); 40 | if (nextPageId == Globals.INVALID_PAGE_ID) { 41 | end(); 42 | return; 43 | } 44 | while (true) { 45 | ReadGuard nextGuard = bufferpool.getReadGuard(btree.getFileName(), nextPageId); 46 | if (nextGuard == null) { 47 | Thread.sleep(10); 48 | continue; 49 | } 50 | LeafNode nextNode = 51 | new LeafNode(btree.getKeyType(), btree.getValueType(), nextGuard.getData()); 52 | 53 | node = nextNode; 54 | guard.close(); 55 | guard = nextGuard; 56 | index = 0; 57 | break; 58 | } 59 | } 60 | 61 | public void end() { 62 | index = -1; 63 | } 64 | 65 | public boolean isEnd() { 66 | return index == -1; 67 | } 68 | 69 | public class Pair { 70 | public final U first; 71 | public final V second; 72 | 73 | public Pair(U first, V second) { 74 | this.first = first; 75 | this.second = second; 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/indexmanager/CollectionMan.java: -------------------------------------------------------------------------------- 1 | package indexmanager; 2 | 3 | import java.io.Closeable; 4 | import java.io.FileInputStream; 5 | import java.io.FileNotFoundException; 6 | import java.io.FileOutputStream; 7 | import java.io.IOException; 8 | import java.io.ObjectInputStream; 9 | import java.io.ObjectOutputStream; 10 | import java.nio.file.Files; 11 | import java.nio.file.Path; 12 | import java.nio.file.Paths; 13 | import java.util.HashSet; 14 | import java.util.Set; 15 | 16 | import globals.*; 17 | 18 | public class CollectionMan implements Closeable { 19 | private final static String fileSuffix = ".man"; 20 | private static String fileName; 21 | private static String collection; 22 | private static ObjectInputStream reader; 23 | private static ObjectOutputStream writer; 24 | private static Set indexes; 25 | 26 | public CollectionMan(String collectionName) throws IOException, ClassNotFoundException{ 27 | collection = collectionName; 28 | fileName = collectionName + fileSuffix; 29 | initRW(); 30 | } 31 | 32 | public void initRW() throws IOException, ClassNotFoundException{ 33 | try { 34 | reader = new ObjectInputStream(new FileInputStream(Globals.STORAGE_DIR + fileName)); 35 | writer = new ObjectOutputStream(new FileOutputStream(Globals.STORAGE_DIR + fileName)); 36 | read(); 37 | } catch (FileNotFoundException e) { 38 | indexes = new HashSet(); 39 | Path filePath = Paths.get(Globals.STORAGE_DIR + fileName); 40 | Files.createFile(filePath); 41 | reader = new ObjectInputStream(new FileInputStream(Globals.STORAGE_DIR + fileName)); 42 | writer = new ObjectOutputStream(new FileOutputStream(Globals.STORAGE_DIR + fileName)); 43 | } 44 | } 45 | 46 | public void close() throws IOException { 47 | flush(); 48 | } 49 | 50 | private static void flush() throws IOException { 51 | writer.writeObject(indexes); 52 | } 53 | 54 | @SuppressWarnings("unchecked") 55 | private static void read() throws IOException, ClassNotFoundException { 56 | indexes = (Set) reader.readObject(); 57 | } 58 | 59 | public boolean hasIndex(String indexName) { 60 | return indexes.contains(indexName); 61 | } 62 | 63 | public String getIndexForField(String field) { 64 | String exepectedIndexName = collection + "-" + field; 65 | if (indexes.contains(exepectedIndexName)) { 66 | return exepectedIndexName; 67 | } 68 | return null; 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | com.diskmanager 8 | diskmanager 9 | 1.0-SNAPSHOT 10 | 11 | 12 | 11 13 | 11 14 | UTF-8 15 | 16 | 17 | 18 | 19 | junit 20 | junit 21 | 4.13.2 22 | 23 | 24 | org.hamcrest 25 | hamcrest-core 26 | 1.3 27 | 28 | 29 | 30 | 31 | src 32 | src/test 33 | 34 | 35 | org.apache.maven.plugins 36 | maven-surefire-plugin 37 | 2.22.2 38 | 39 | 40 | 1 41 | false 42 | 43 | -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005 44 | 45 | 46 | 47 | org.apache.maven.plugins 48 | maven-compiler-plugin 49 | 3.8.1 50 | 51 | ${maven.compiler.source} 52 | ${maven.compiler.target} 53 | true 54 | true 55 | true 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /src/bufferpool/Frame.java: -------------------------------------------------------------------------------- 1 | package bufferpool; 2 | 3 | import globals.Globals; 4 | import java.util.concurrent.TimeUnit; 5 | import java.util.concurrent.atomic.AtomicInteger; 6 | import java.util.concurrent.locks.ReadWriteLock; 7 | import java.util.concurrent.locks.ReentrantReadWriteLock; 8 | 9 | public class Frame { 10 | 11 | private int frameId; 12 | private long pageId; 13 | private String fileName; 14 | private boolean dirty; 15 | private byte[] data; 16 | private AtomicInteger pinCount; 17 | private ReadWriteLock latch; 18 | 19 | public Frame() {} 20 | 21 | public Frame(int frameId) { 22 | this.frameId = frameId; 23 | this.data = new byte[Globals.PAGE_SIZE]; 24 | pinCount = new AtomicInteger(); 25 | latch = new ReentrantReadWriteLock(true); 26 | } 27 | 28 | public Frame(int frameId, int pageId, String fileName) { 29 | this.frameId = frameId; 30 | this.pageId = pageId; 31 | this.fileName = fileName; 32 | this.data = new byte[Globals.PAGE_SIZE]; 33 | pinCount = new AtomicInteger(); 34 | latch = new ReentrantReadWriteLock(true); 35 | } 36 | 37 | public void newFrame(long pageId, String fileName) { 38 | dirty = false; 39 | this.pageId = pageId; 40 | this.fileName = fileName; 41 | pinCount = new AtomicInteger(); 42 | } 43 | 44 | public String getFileName() { 45 | return fileName; 46 | } 47 | 48 | public long getPageId() { 49 | return pageId; 50 | } 51 | 52 | public byte[] getData() { 53 | return data; 54 | } 55 | 56 | public boolean isDirty() { 57 | return dirty; 58 | } 59 | 60 | public int getFrameId() { 61 | return frameId; 62 | } 63 | 64 | public int addPin() { 65 | return pinCount.incrementAndGet(); 66 | } 67 | 68 | public int removePin() { 69 | return pinCount.decrementAndGet(); 70 | } 71 | 72 | public int getPinCount() { 73 | return pinCount.get(); 74 | } 75 | 76 | public void setDirty(boolean dirty) { 77 | this.dirty = dirty; 78 | } 79 | 80 | public boolean lockRead() { 81 | try { 82 | return latch.readLock().tryLock(10, TimeUnit.MILLISECONDS); 83 | } catch (Exception e) { 84 | return false; 85 | } 86 | } 87 | 88 | public boolean lockWrite() { 89 | try { 90 | return latch.writeLock().tryLock(50, TimeUnit.MILLISECONDS); 91 | } catch (Exception e) { 92 | return false; 93 | } 94 | } 95 | 96 | public void unlockRead() { 97 | latch.readLock().unlock(); 98 | } 99 | 100 | public void unlockWrite() { 101 | latch.writeLock().unlock(); 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/types/GenericByteReader.java: -------------------------------------------------------------------------------- 1 | package types; 2 | 3 | import java.nio.ByteBuffer; 4 | 5 | // Functional interface for converting bytes to a specific type T 6 | @FunctionalInterface 7 | interface ByteConverter { 8 | T convert(byte[] bytes) throws Exception; 9 | } 10 | 11 | public class GenericByteReader { 12 | 13 | // Generic method to read bytes from disk and convert to type T 14 | public static T convert(byte[] data, ByteConverter converter) throws Exception { 15 | return converter.convert(data); // Convert bytes to type T 16 | } 17 | 18 | // generic method to convert a specific type T to bytes 19 | public static byte[] convertToBytes(T value) { 20 | if (value instanceof Integer) { 21 | return ByteBuffer.allocate(4).putInt((Integer) value).array(); 22 | } else if (value instanceof Long) { 23 | return ByteBuffer.allocate(8).putLong((Long) value).array(); 24 | } else if (value instanceof Double) { 25 | return ByteBuffer.allocate(8).putDouble((Double) value).array(); 26 | } else if (value instanceof Short) { 27 | return ByteBuffer.allocate(2).putShort((Short) value).array(); 28 | } else if (value instanceof Byte) { 29 | return new byte[] {(Byte) value}; 30 | } else if (value instanceof Float) { 31 | return ByteBuffer.allocate(4).putFloat((Float) value).array(); 32 | } else { 33 | throw new IllegalArgumentException("Unsupported type: " + value.getClass().getName()); 34 | } 35 | } 36 | 37 | // Helper method to create converters for primitive wrapper types 38 | public static ByteConverter primitiveConverter(Class targetType) { 39 | return bytes -> { 40 | ByteBuffer buffer = ByteBuffer.wrap(bytes); 41 | if (targetType == Integer.class && bytes.length >= 4) { 42 | return targetType.cast(buffer.getInt()); 43 | } else if (targetType == Long.class && bytes.length >= 8) { 44 | return targetType.cast(buffer.getLong()); 45 | } else if (targetType == Double.class && bytes.length >= 8) { 46 | return targetType.cast(buffer.getDouble()); 47 | } else if (targetType == Short.class && bytes.length >= 2) { 48 | return targetType.cast(buffer.getShort()); 49 | } else if (targetType == Byte.class && bytes.length >= 1) { 50 | return targetType.cast(buffer.get()); 51 | } else if (targetType == Float.class && bytes.length >= 4) { 52 | return targetType.cast(buffer.getFloat()); 53 | } else { 54 | throw new IllegalArgumentException( 55 | "Unsupported type " 56 | + targetType.getName() 57 | + " or insufficient bytes (length: " 58 | + bytes.length 59 | + ")"); 60 | } 61 | }; 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/diskmanager/RandomAccessDiskFile.java: -------------------------------------------------------------------------------- 1 | package diskmanager; 2 | 3 | import globals.Globals; 4 | import java.io.FileNotFoundException; 5 | import java.io.IOException; 6 | import java.io.RandomAccessFile; 7 | import java.nio.ByteBuffer; 8 | import java.nio.channels.FileChannel; 9 | import java.nio.file.Files; 10 | import java.nio.file.Path; 11 | import java.nio.file.Paths; 12 | 13 | public class RandomAccessDiskFile implements DiskFile { 14 | 15 | private Path filePath; 16 | private RandomAccessFile file; // file length always will be a multiply of pageSize 17 | private FileChannel channel; 18 | private long pageSize; // page size in kb 19 | private long pageCnt; // number of pages in the file 20 | private long fileSize; 21 | 22 | public RandomAccessDiskFile(String filePath) throws IOException { 23 | this.filePath = Paths.get(filePath); 24 | try { // try to open file 25 | this.file = new RandomAccessFile(filePath, "rw"); 26 | this.channel = file.getChannel(); 27 | } catch (FileNotFoundException e) { // create the file if does not exist 28 | Files.createFile(this.filePath); 29 | this.file = new RandomAccessFile(filePath, "rw"); 30 | this.channel = file.getChannel(); 31 | } 32 | 33 | this.pageSize = Globals.PAGE_SIZE; // page size in bytes 34 | init(); 35 | } 36 | 37 | public void close() throws IOException { 38 | file.close(); 39 | } 40 | 41 | private void init() throws IOException { 42 | fileSize = file.length(); 43 | pageCnt = fileSize / pageSize; 44 | } 45 | 46 | public synchronized long allocatePage() 47 | throws IOException { // allocate a page and returns it's id 48 | file.seek(fileSize); 49 | byte[] data = new byte[Globals.PAGE_SIZE]; 50 | file.write(data); 51 | pageCnt++; 52 | fileSize += pageSize; 53 | return pageCnt - 1; 54 | } 55 | 56 | public byte[] readPage(long pageID) throws IOException { 57 | if (pageID >= pageCnt) { 58 | throw new IOException("pageId: " + pageID); 59 | } 60 | 61 | byte[] buffer = new byte[(int) pageSize]; 62 | int read = channel.read(ByteBuffer.wrap(buffer), pageID * pageSize); 63 | if (read != pageSize) { 64 | throw new IOException(); 65 | } 66 | return buffer; 67 | } 68 | 69 | public void readPage(long pageID, byte[] dst) throws IOException { 70 | if (pageID >= pageCnt) { 71 | throw new IOException("pageId: " + pageID); 72 | } 73 | 74 | int read = channel.read(ByteBuffer.wrap(dst), pageID * pageSize); 75 | if (read != pageSize) { 76 | throw new IOException(); 77 | } 78 | } 79 | 80 | public void writePage(long pageID, byte[] src) throws IOException { 81 | int wrote = channel.write(ByteBuffer.wrap(src), pageID * pageSize); 82 | if (wrote != pageSize) { 83 | throw new IOException(); 84 | } 85 | } 86 | 87 | // geters 88 | public long getPageCnt() { 89 | return pageCnt; 90 | } 91 | 92 | public long getFileSize() { 93 | return fileSize; 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/types/Array.java: -------------------------------------------------------------------------------- 1 | package types; 2 | 3 | import java.nio.ByteBuffer; 4 | import javax.naming.directory.InvalidAttributesException; 5 | 6 | // static array 7 | public class Array { 8 | 9 | private int length; 10 | private final int capacity; 11 | private final int offsetBytes; 12 | private final ByteBuffer buf; 13 | private final Template template; 14 | 15 | public Array(Template template, byte[] data, int offsetBytes, int length, int capacity) { 16 | this.buf = ByteBuffer.wrap(data); 17 | this.offsetBytes = offsetBytes; 18 | this.length = length; 19 | this.capacity = capacity; 20 | this.template = template; 21 | } 22 | 23 | public Array(Template template, ByteBuffer data, int offsetBytes, int length, int capacity) { 24 | this.buf = data; 25 | this.offsetBytes = offsetBytes; 26 | this.length = length; 27 | this.capacity = capacity; 28 | this.template = template; 29 | } 30 | 31 | public int length() { 32 | return length; 33 | } 34 | 35 | /** Read element i (0 ≤ i < length) */ 36 | public Compositekey get(int i) throws InvalidAttributesException { 37 | checkIndex(i); 38 | buf.position(offsetBytes + i * template.getByteSize()); 39 | Compositekey keyUtil = new Compositekey(template); 40 | return keyUtil.read(buf); 41 | } 42 | 43 | /** Write element i (0 ≤ i < length) */ 44 | public void set(int i, Compositekey value) { 45 | checkIndex(i); 46 | buf.position(offsetBytes + i * template.getByteSize()); 47 | value.write(buf); 48 | } 49 | 50 | public void insert(int i, Compositekey value) throws InvalidAttributesException { 51 | if (length + 1 > capacity) throw new ArrayIndexOutOfBoundsException("Array is full"); 52 | checkIndex(i); 53 | length++; 54 | // shift elements [i, length) to the right 55 | for (int j = length - 1; j > i; j--) { 56 | set(j, get(j - 1)); 57 | } 58 | // insert new element 59 | set(i, value); 60 | } 61 | 62 | public void pushBack(Compositekey value) throws InvalidAttributesException { 63 | if (length + 1 > capacity) throw new ArrayIndexOutOfBoundsException("Array is full"); 64 | 65 | set(length, value); 66 | length++; 67 | } 68 | 69 | public void delete(int index) throws InvalidAttributesException { 70 | checkIndex(index); 71 | for (int i = index; i < length - 1; i++) { 72 | set(i, get(i + 1)); 73 | } 74 | length--; 75 | } 76 | 77 | private void checkIndexInbound(int i) { 78 | if (i < 0 || i >= length) throw new IndexOutOfBoundsException(i + "/" + length); 79 | } 80 | 81 | private void checkIndex(int i) { 82 | if (i < 0 || i >= capacity) 83 | throw new IndexOutOfBoundsException(i + "/c" + capacity + "/l" + length); 84 | } 85 | 86 | // getters and setters 87 | public ByteBuffer getBuf() { 88 | return buf; 89 | } 90 | 91 | public int getOffsetBytes() { 92 | return offsetBytes; 93 | } 94 | 95 | public int getLength() { 96 | return length; 97 | } 98 | 99 | public int getCapacity() { 100 | return capacity; 101 | } 102 | 103 | public void setLength(int length) { 104 | this.length = length; 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/types/CompareableArray.java: -------------------------------------------------------------------------------- 1 | package types; 2 | 3 | import java.nio.ByteBuffer; 4 | import javax.naming.directory.InvalidAttributesException; 5 | 6 | public class CompareableArray extends Array { 7 | public CompareableArray( 8 | Template template, byte[] data, int offsetBytes, int length, int capacity) { 9 | super(template, data, offsetBytes, length, capacity); 10 | } 11 | 12 | public CompareableArray( 13 | Template template, ByteBuffer data, int offsetBytes, int length, int capacity) { 14 | super(template, data, offsetBytes, length, capacity); 15 | } 16 | 17 | public int upperBound(Compositekey key, int low, int high) throws InvalidAttributesException { 18 | if (high < low) { 19 | return low; 20 | } 21 | int index = high; 22 | while (low <= high) { 23 | int mid = (low + high) / 2; 24 | Compositekey midKey = get(mid); 25 | if (midKey.compareTo(key) > 0) { 26 | index = mid; 27 | high = mid - 1; 28 | } else { 29 | low = mid + 1; 30 | } 31 | } 32 | Compositekey curr = get(index); 33 | if (curr.compareTo(key) <= 0) { 34 | index++; 35 | } 36 | return index; 37 | } 38 | 39 | public int lowerBound(Compositekey key, int low, int high) throws InvalidAttributesException { 40 | if (high < low) { 41 | return low; 42 | } 43 | int index = high; 44 | while (low <= high) { 45 | int mid = (low + high) / 2; 46 | Compositekey midKey = get(mid); 47 | if (midKey.compareTo(key) >= 0) { 48 | index = mid; 49 | high = mid - 1; 50 | } else { 51 | low = mid + 1; 52 | } 53 | } 54 | Compositekey curr = get(index); 55 | if (curr.compareTo(key) < 0) { 56 | index++; 57 | } 58 | return index; 59 | } 60 | 61 | public int upperBound(Compositekey key) throws InvalidAttributesException { 62 | return upperBound(key, 0, length() - 1); 63 | } 64 | 65 | public int upperBound(Compositekey key, int low) throws InvalidAttributesException { 66 | return upperBound(key, low, length() - 1); 67 | } 68 | 69 | public int lowerBound(Compositekey key) throws InvalidAttributesException { 70 | return lowerBound(key, 0, length() - 1); 71 | } 72 | 73 | public int lowerBound(Compositekey key, int low) throws InvalidAttributesException { 74 | return lowerBound(key, low, length() - 1); 75 | } 76 | 77 | public int binarySearch(Compositekey key, int low, int high) throws InvalidAttributesException { 78 | while (low <= high) { 79 | int mid = (low + high) / 2; 80 | Compositekey midKey = get(mid); 81 | if (midKey.compareTo(key) == 0) { 82 | return mid; // key found 83 | } else if (midKey.compareTo(key) < 0) { 84 | low = mid + 1; 85 | } else { 86 | high = mid - 1; 87 | } 88 | } 89 | return 0; // key not found 90 | } 91 | 92 | public int binarySearch(Compositekey key) throws InvalidAttributesException { 93 | return binarySearch(key, 0, length() - 1); 94 | } 95 | 96 | public int binarySearch(Compositekey key, int low) throws InvalidAttributesException { 97 | return binarySearch(key, low, length() - 1); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/types/Compositekey.java: -------------------------------------------------------------------------------- 1 | package types; 2 | 3 | import java.nio.ByteBuffer; 4 | import javax.naming.directory.InvalidAttributesException; 5 | 6 | public class Compositekey extends Template implements Comparable { 7 | 8 | // List of Types 9 | private Key[] keys; 10 | 11 | @SuppressWarnings("unchecked") 12 | public Compositekey(Template classes, Key... keys) throws InvalidAttributesException { 13 | super(classes.classes); 14 | if (classes.classes.length != keys.length) { 15 | throw new InvalidAttributesException(); 16 | } 17 | this.keys = new Key[keys.length]; 18 | for (int i = 0; i < keys.length; i++) { 19 | this.keys[i] = new Key(getClass(i)); 20 | } 21 | } 22 | 23 | public Compositekey(Class>... classes) { 24 | super(classes); 25 | this.keys = new Key[classes.length]; 26 | for (int i = 0; i < keys.length; i++) { 27 | this.keys[i] = new Key(getClass(i)); 28 | } 29 | } 30 | 31 | public Compositekey(Template classes) { 32 | super(classes.classes); 33 | this.keys = new Key[this.classes.length]; 34 | for (int i = 0; i < keys.length; i++) { 35 | this.keys[i] = new Key(getClass(i)); 36 | } 37 | } 38 | 39 | public int compareTo(Compositekey rhs) { 40 | for (int i = 0; i < keys.length; i++) { 41 | Key key1 = get(i); 42 | Key key2 = rhs.get(i); 43 | int cmp = key1.compareTo(key2); 44 | if (cmp != 0) { 45 | return cmp; 46 | } 47 | } 48 | return 0; 49 | } 50 | 51 | public Compositekey read(ByteBuffer buf) throws InvalidAttributesException { 52 | for (int i = 0; i < keys.length; i++) { 53 | keys[i].readVal(buf); 54 | } 55 | return this; 56 | } 57 | 58 | public void write(ByteBuffer buf) { 59 | for (int i = 0; i < keys.length; i++) { 60 | keys[i].write(buf); 61 | } 62 | } 63 | 64 | public Key get(int index) { 65 | return keys[index]; 66 | } 67 | 68 | public void set(int index, byte[] val) { 69 | keys[index].set(val); 70 | } 71 | 72 | public void set(int index, T val, Class type) { 73 | ByteBuffer buf; 74 | if (type == Integer.class) { 75 | buf = ByteBuffer.wrap(new byte[Integer.BYTES]); 76 | buf.putInt((int) val); 77 | } else if (type == Long.class) { 78 | buf = ByteBuffer.wrap(new byte[Long.BYTES]); 79 | buf.putLong((long) val); 80 | } else if (type == Double.class) { 81 | buf = ByteBuffer.wrap(new byte[Double.BYTES]); 82 | buf.putDouble((double) val); 83 | } else if (type == Short.class) { 84 | buf = ByteBuffer.wrap(new byte[Short.BYTES]); 85 | buf.putShort((Short) val); 86 | } else if (type == Byte.class) { 87 | buf = ByteBuffer.wrap(new byte[Byte.BYTES]); 88 | buf.put((Byte) val); 89 | } else if (type == Float.class) { 90 | buf = ByteBuffer.wrap(new byte[Float.BYTES]); 91 | buf.putFloat((Float) val); 92 | } else { 93 | throw new IllegalArgumentException("Unsupported type: " + type.getName()); 94 | } 95 | keys[index].set(buf.array()); 96 | } 97 | 98 | @SuppressWarnings("unchecked") 99 | public T getVal(int index) { 100 | return (T) keys[index].getVal(); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/test/diskmanager/DiskManagerTest.java: -------------------------------------------------------------------------------- 1 | package test.diskmanager; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import diskmanager.BasicDiskManager; 6 | import diskmanager.DiskManager; 7 | import diskmanager.DiskRequest; 8 | import java.io.IOException; 9 | import java.util.Arrays; 10 | import org.junit.After; 11 | import org.junit.Before; 12 | import org.junit.Test; 13 | 14 | public class DiskManagerTest { 15 | private DiskManager diskManager; 16 | private static final String TEST_FILE = "test.db"; 17 | private static final int PAGE_SIZE = 4096; 18 | 19 | @Before 20 | public void setUp() throws Exception { 21 | diskManager = new BasicDiskManager(); 22 | } 23 | 24 | @After 25 | public void tearDown() throws Exception { 26 | if (diskManager.getFileCount() >= 0) { 27 | diskManager.close(); 28 | } 29 | // Clean up test file if it exists 30 | java.io.File testFile = new java.io.File("storage/" + TEST_FILE); 31 | if (testFile.exists()) { 32 | testFile.delete(); 33 | } 34 | } 35 | 36 | @Test 37 | public void testFileCreationAndCount() throws IOException { 38 | assertEquals(0, diskManager.getFileCount()); 39 | diskManager.open(TEST_FILE); 40 | assertEquals(1, diskManager.getFileCount()); 41 | 42 | // Opening same file should not increase count 43 | diskManager.open(TEST_FILE); 44 | assertEquals(1, diskManager.getFileCount()); 45 | } 46 | 47 | @Test 48 | public void testPageAllocation() throws IOException { 49 | long pageId = diskManager.allocatePage(TEST_FILE); 50 | assertEquals(0, pageId); // First page should be 0 51 | 52 | long secondPageId = diskManager.allocatePage(TEST_FILE); 53 | assertEquals(1, secondPageId); // Second page should be 1 54 | } 55 | 56 | @Test 57 | public void testReadWriteRequests() throws Exception { 58 | // Allocate a page first 59 | long pageId = diskManager.allocatePage(TEST_FILE); 60 | 61 | // Prepare test data 62 | byte[] writeData = new byte[PAGE_SIZE]; 63 | Arrays.fill(writeData, (byte) 42); 64 | 65 | // Write request 66 | DiskRequest writeRequest = new DiskRequest(TEST_FILE, pageId, writeData, true); 67 | diskManager.pushRequest(writeRequest); 68 | 69 | assertTrue(writeRequest.getFuture().get()); // Wait for write to complete 70 | 71 | // Read back the data 72 | byte[] readData = new byte[PAGE_SIZE]; 73 | DiskRequest readRequest = new DiskRequest(TEST_FILE, pageId, readData, false); 74 | diskManager.pushRequest(readRequest); 75 | 76 | assertTrue(readRequest.getFuture().get()); // Wait for read to complete 77 | assertArrayEquals(writeData, readData); 78 | } 79 | 80 | @Test 81 | public void testConcurrentRequests() throws Exception { 82 | int numRequests = 10; 83 | DiskRequest[] requests = new DiskRequest[numRequests]; 84 | long pageId = diskManager.allocatePage(TEST_FILE); 85 | 86 | // Launch multiple concurrent write requests 87 | for (int i = 0; i < numRequests; i++) { 88 | byte[] data = new byte[PAGE_SIZE]; 89 | Arrays.fill(data, (byte) i); 90 | DiskRequest request = new DiskRequest(TEST_FILE, pageId, data, true); 91 | requests[i] = request; 92 | diskManager.pushRequest(request); 93 | } 94 | 95 | // Wait for all requests to complete 96 | for (DiskRequest request : requests) { 97 | assertTrue(request.getFuture().get()); 98 | } 99 | } 100 | 101 | @Test(expected = NullPointerException.class) 102 | public void testOperationsAfterClose() throws IOException, NullPointerException { 103 | diskManager.close(); 104 | // This should throw NullPointerException 105 | diskManager.open(TEST_FILE); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/bufferpool/LRU.java: -------------------------------------------------------------------------------- 1 | package bufferpool; 2 | 3 | import java.util.HashMap; 4 | import java.util.LinkedList; 5 | import java.util.List; 6 | import java.util.Map; 7 | import java.util.concurrent.locks.Lock; 8 | import java.util.concurrent.locks.ReentrantLock; 9 | 10 | /** implementation of the LRU-K replacer */ 11 | public class LRU implements Replacer { 12 | private int k; 13 | private long currentTime; 14 | private Map frames; 15 | private Lock latch; 16 | 17 | public LRU(int k) { 18 | this.k = k; 19 | currentTime = 0; 20 | frames = new HashMap(); 21 | latch = new ReentrantLock(); 22 | } 23 | 24 | /** 25 | * record access at the currentTime to the frame 26 | * 27 | * @param frameId 28 | */ 29 | public void recordAccess(int frameId) { 30 | latch.lock(); 31 | if (!frames.containsKey(frameId)) { 32 | frames.put(frameId, new FrameMeta(frameId)); 33 | } 34 | FrameMeta frame = frames.get(frameId); 35 | frame.recordAccess(currentTime, k); 36 | currentTime++; 37 | latch.unlock(); 38 | } 39 | 40 | /** 41 | * evict the frame with the largest k-distance in case if two frames tie with the INF we break the 42 | * tie with who has the latest access and evict the other also we skip any unevictable frames 43 | * 44 | * @return the frameid to be evicted if can not evict any returns -1 45 | */ 46 | public int evict() { 47 | latch.lock(); 48 | int victim = -1; 49 | long victimRecentAccess = -1; 50 | long max = -1; 51 | for (Map.Entry current : frames.entrySet()) { 52 | int frameId = current.getKey(); 53 | FrameMeta frame = current.getValue(); 54 | if (!frame.isEvictable()) { 55 | continue; 56 | } 57 | 58 | long kDistance = frame.kDistance(currentTime, k); 59 | if (kDistance > max) { 60 | victim = frameId; 61 | max = kDistance; 62 | victimRecentAccess = frame.recentAccess(); 63 | } else if (kDistance == Long.MAX_VALUE && max == kDistance) { 64 | long recentAccess = frame.recentAccess(); 65 | if (recentAccess < victimRecentAccess) { 66 | victim = frameId; 67 | victimRecentAccess = recentAccess; 68 | } 69 | } 70 | } 71 | 72 | if (victim == -1) { 73 | latch.unlock(); 74 | return -1; 75 | } 76 | 77 | frames.remove(victim); 78 | 79 | latch.unlock(); 80 | return victim; 81 | } 82 | 83 | /** 84 | * set a frame to a state [evictable -> true, unevictable -> false] 85 | * 86 | * @param frameId 87 | * @param evictable 88 | */ 89 | public void setEvictable(int frameId, boolean evictable) { 90 | latch.lock(); 91 | if (!frames.containsKey(frameId)) { 92 | latch.unlock(); 93 | return; 94 | } 95 | 96 | FrameMeta frame = frames.get(frameId); 97 | frame.setEvictable(evictable); 98 | latch.unlock(); 99 | } 100 | 101 | public void deleteFrame(int frameId) { 102 | latch.lock(); 103 | if (!frames.containsKey(frameId)) { 104 | latch.unlock(); 105 | return; 106 | } 107 | frames.remove(frameId); 108 | latch.unlock(); 109 | } 110 | 111 | private class FrameMeta { 112 | private boolean evictable; 113 | private List timeStamps; 114 | 115 | public FrameMeta(int frameId) { 116 | timeStamps = new LinkedList(); 117 | evictable = true; 118 | } 119 | 120 | public void recordAccess(long time, int k) { 121 | if (timeStamps.size() == k) { 122 | timeStamps.removeLast(); 123 | } 124 | timeStamps.addFirst(time); 125 | } 126 | 127 | public long kDistance(long current, int k) { 128 | if (timeStamps.size() < k) { 129 | return Long.MAX_VALUE; 130 | } 131 | 132 | return current - timeStamps.getLast(); 133 | } 134 | 135 | public long recentAccess() { 136 | return timeStamps.getFirst(); 137 | } 138 | 139 | public void setEvictable(boolean evictable) { 140 | this.evictable = evictable; 141 | } 142 | 143 | public boolean isEvictable() { 144 | return evictable; 145 | } 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /src/test/btree/IndexTest.java: -------------------------------------------------------------------------------- 1 | package test.btree; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | import static org.junit.Assert.fail; 5 | 6 | import btree.Btree; 7 | import bufferpool.BufferPool; 8 | import diskmanager.BasicDiskManager; 9 | import diskmanager.DiskManager; 10 | import globals.Globals; 11 | import java.util.ArrayList; 12 | import java.util.Collections; 13 | import java.util.List; 14 | import org.junit.After; 15 | import org.junit.Before; 16 | import org.junit.Test; 17 | import types.Compositekey; 18 | import types.Template; 19 | 20 | public class IndexTest { 21 | 22 | private Btree btree; 23 | private BufferPool bufferPool; 24 | private DiskManager diskManager; 25 | private static final int MAX_PAGES = 4000; // Example max pages 26 | private static final int K = 30; 27 | private static final String btreeFilePath = "test.btree"; // Example file path for the B-tree 28 | Template keyType; 29 | Template valueType; 30 | 31 | @Before 32 | public void setUp() { 33 | CleanUp(); 34 | // Initialize the DiskManager 35 | diskManager = new BasicDiskManager(); 36 | // Initialize the buffer pool with a size of 10 pages 37 | bufferPool = new BufferPool(MAX_PAGES, K, diskManager); 38 | keyType = new Template(Integer.class, Integer.class); 39 | valueType = new Template(Integer.class); 40 | // Initialize the B-tree 41 | btree = new Btree(keyType, valueType, btreeFilePath, Globals.INVALID_PAGE_ID, bufferPool); 42 | } 43 | 44 | @After 45 | public void CleanUp() { 46 | // monitor.interrupt(); 47 | // delete the test file 48 | java.io.File file = new java.io.File("storage/" + btreeFilePath); 49 | if (file.exists()) { 50 | if (!file.delete()) { 51 | fail("Failed to delete test file: " + btreeFilePath); 52 | } 53 | } 54 | } 55 | 56 | private Compositekey makeCompositekey(int age, int salary, Template type) { 57 | Compositekey key = new Compositekey(type); 58 | key.set(0, age, Integer.class); 59 | key.set(1, salary, Integer.class); 60 | return key; 61 | } 62 | 63 | private Compositekey makeCompositekey(int val, Template type) { 64 | Compositekey key = new Compositekey(type); 65 | key.set(0, val, Integer.class); 66 | return key; 67 | } 68 | 69 | @Test 70 | public void testCompositeKey() throws Exception { 71 | double startTime = (double) System.currentTimeMillis(); 72 | int keysnumber = 1_000_000; 73 | for (int i = 0; i < keysnumber; i++) { 74 | try { 75 | btree.insert(makeCompositekey(i, keysnumber - i, keyType), makeCompositekey(i, valueType)); 76 | } catch (Exception e) { 77 | e.printStackTrace(); 78 | fail(); 79 | } 80 | } 81 | 82 | for (int i = 0; i < keysnumber; i++) { 83 | try { 84 | Compositekey val = btree.get(makeCompositekey(i, keysnumber - i, keyType)); 85 | assertEquals(0, val.compareTo(makeCompositekey(i, valueType))); 86 | } catch (Exception e) { 87 | e.printStackTrace(); 88 | fail(); 89 | } 90 | } 91 | double endTime = System.currentTimeMillis(); 92 | double fTime = (endTime - startTime) / (double) 1000; 93 | System.out.println("test testCompositeKey done in: " + fTime + "s"); 94 | } 95 | 96 | @Test 97 | public void testConcurrencyRand() throws Exception { 98 | int itrs = 1; 99 | int keysnumber = 1_000_000; 100 | for (int itr = 1; itr <= itrs; itr++) { 101 | setUp(); 102 | double startTime = (double) System.currentTimeMillis(); 103 | 104 | int writersCnt = 100; 105 | int readersCnt = 100; 106 | List threads = new ArrayList<>(); 107 | int op = 10000; 108 | for (int i = 0; i < writersCnt; i++) { 109 | final int end = op * i; 110 | Thread writer = 111 | new Thread( 112 | () -> { 113 | for (int key = end - op; key < end; key++) { 114 | try { 115 | btree.insert( 116 | makeCompositekey(key, keysnumber - key, keyType), 117 | makeCompositekey(key, valueType)); 118 | } catch (Exception e) { 119 | e.printStackTrace(); 120 | fail(); 121 | } 122 | } 123 | }); 124 | threads.add(writer); 125 | } 126 | 127 | Collections.shuffle(threads); 128 | for (Thread thread : threads) { 129 | thread.start(); 130 | } 131 | 132 | for (Thread thread : threads) { 133 | thread.join(); 134 | } 135 | threads = new ArrayList<>(); 136 | 137 | for (int i = 0; i < readersCnt; i++) { 138 | final int end = op * i; 139 | Thread reader = 140 | new Thread( 141 | () -> { 142 | for (int key = end - op; key < end; key++) { 143 | try { 144 | Compositekey val = 145 | btree.get(makeCompositekey(key, keysnumber - key, keyType)); 146 | if (val.compareTo(makeCompositekey(key, valueType)) != 0) { 147 | System.out.println("found :" + val.getVal(0)); 148 | System.out.println("expected :" + key); 149 | } 150 | assertEquals(0, val.compareTo(makeCompositekey(key, valueType))); 151 | } catch (Exception e) { 152 | System.out.println("thread " + end / op + ": expected ->" + key); 153 | e.printStackTrace(); 154 | fail(); 155 | } 156 | } 157 | }); 158 | threads.add(reader); 159 | } 160 | 161 | Collections.shuffle(threads); 162 | for (Thread thread : threads) { 163 | thread.start(); 164 | } 165 | 166 | for (Thread thread : threads) { 167 | thread.join(); 168 | } 169 | 170 | double endTime = System.currentTimeMillis(); 171 | endTime = System.currentTimeMillis(); 172 | double fTime = (endTime - startTime) / (double) 1000; 173 | System.out.println("test testConcurrency done itr " + itr + " : " + fTime + "s"); 174 | } 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/test/bufferpool/FrameTest.java: -------------------------------------------------------------------------------- 1 | package test.bufferpool; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import bufferpool.Frame; 6 | import java.util.concurrent.CountDownLatch; 7 | import java.util.concurrent.CyclicBarrier; 8 | import java.util.concurrent.atomic.AtomicBoolean; 9 | import java.util.concurrent.atomic.AtomicInteger; 10 | import org.junit.Before; 11 | import org.junit.Test; 12 | 13 | public class FrameTest { 14 | private Frame frame; 15 | private static final int FRAME_ID = 1; 16 | private static final int PAGE_ID = 2; 17 | private static final String FILE_NAME = "test.db"; 18 | private static final int PAGE_SIZE = 4096; 19 | 20 | @Before 21 | public void setUp() { 22 | frame = new Frame(FRAME_ID, PAGE_ID, FILE_NAME); 23 | } 24 | 25 | @Test 26 | public void testConstructor() { 27 | assertEquals(FRAME_ID, frame.getFrameId()); 28 | assertEquals(PAGE_ID, frame.getPageId()); 29 | assertEquals(FILE_NAME, frame.getFileName()); 30 | assertFalse(frame.isDirty()); 31 | assertEquals(0, frame.getPinCount()); 32 | assertEquals(PAGE_SIZE, frame.getData().length); 33 | } 34 | 35 | @Test 36 | public void testNewFrame() { 37 | long newPageId = 3; 38 | String newFileName = "new.db"; 39 | frame.newFrame(newPageId, newFileName); 40 | 41 | assertEquals(newPageId, frame.getPageId()); 42 | assertEquals(newFileName, frame.getFileName()); 43 | assertEquals(0, frame.getPinCount()); 44 | } 45 | 46 | @Test 47 | public void testPinCount() { 48 | assertEquals(0, frame.getPinCount()); 49 | assertEquals(1, frame.addPin()); 50 | assertEquals(2, frame.addPin()); 51 | assertEquals(1, frame.removePin()); 52 | assertEquals(0, frame.removePin()); 53 | } 54 | 55 | @Test 56 | public void testDirtyFlag() { 57 | assertFalse(frame.isDirty()); 58 | frame.setDirty(true); 59 | assertTrue(frame.isDirty()); 60 | frame.setDirty(false); 61 | assertFalse(frame.isDirty()); 62 | } 63 | 64 | @Test 65 | public void testDataAccess() { 66 | byte[] data = frame.getData(); 67 | assertEquals(PAGE_SIZE, data.length); 68 | 69 | // Modify data 70 | data[0] = 42; 71 | assertArrayEquals(data, frame.getData()); 72 | } 73 | 74 | @Test 75 | public void testConcurrentPinCount() throws InterruptedException { 76 | int numThreads = 10; 77 | int pinsPerThread = 100; 78 | CountDownLatch startLatch = new CountDownLatch(1); 79 | CountDownLatch doneLatch = new CountDownLatch(numThreads); 80 | 81 | Thread[] threads = new Thread[numThreads]; 82 | for (int i = 0; i < numThreads; i++) { 83 | threads[i] = 84 | new Thread( 85 | () -> { 86 | try { 87 | startLatch.await(); 88 | for (int j = 0; j < pinsPerThread; j++) { 89 | frame.addPin(); 90 | frame.removePin(); 91 | } 92 | } catch (InterruptedException e) { 93 | Thread.currentThread().interrupt(); 94 | } finally { 95 | doneLatch.countDown(); 96 | } 97 | }); 98 | threads[i].start(); 99 | } 100 | 101 | startLatch.countDown(); 102 | doneLatch.await(); 103 | 104 | assertEquals(0, frame.getPinCount()); 105 | } 106 | 107 | @Test 108 | public void testConcurrentReadLocks() throws InterruptedException { 109 | int numReaders = 5; 110 | CyclicBarrier barrier = new CyclicBarrier(numReaders); 111 | CountDownLatch doneLatch = new CountDownLatch(numReaders); 112 | AtomicInteger activeReaders = new AtomicInteger(0); 113 | AtomicBoolean error = new AtomicBoolean(false); 114 | 115 | Thread[] readers = new Thread[numReaders]; 116 | for (int i = 0; i < numReaders; i++) { 117 | readers[i] = 118 | new Thread( 119 | () -> { 120 | try { 121 | barrier.await(); 122 | frame.lockRead(); 123 | int currentReaders = activeReaders.incrementAndGet(); 124 | // Multiple readers should be able to read simultaneously 125 | if (currentReaders < 1) { 126 | error.set(true); 127 | } 128 | Thread.sleep(10); 129 | activeReaders.decrementAndGet(); 130 | frame.unlockRead(); 131 | } catch (Exception e) { 132 | error.set(true); 133 | } finally { 134 | doneLatch.countDown(); 135 | } 136 | }); 137 | readers[i].start(); 138 | } 139 | 140 | doneLatch.await(); 141 | assertFalse("Concurrent read lock error occurred", error.get()); 142 | } 143 | 144 | @Test 145 | public void testReadWriteLockExclusion() throws InterruptedException { 146 | CountDownLatch writerStarted = new CountDownLatch(1); 147 | CountDownLatch readerDone = new CountDownLatch(1); 148 | AtomicBoolean error = new AtomicBoolean(false); 149 | 150 | // Start writer thread 151 | Thread writer = 152 | new Thread( 153 | () -> { 154 | try { 155 | frame.lockWrite(); 156 | writerStarted.countDown(); 157 | try { 158 | Thread.sleep(100); 159 | } catch (InterruptedException e) { 160 | Thread.currentThread().interrupt(); 161 | } finally { 162 | frame.unlockWrite(); 163 | } 164 | } catch (Exception e) { 165 | fail(); 166 | } 167 | }); 168 | 169 | // Start reader thread 170 | Thread reader = 171 | new Thread( 172 | () -> { 173 | try { 174 | writerStarted.await(); 175 | frame.lockRead(); 176 | frame.unlockRead(); 177 | } catch (InterruptedException e) { 178 | Thread.currentThread().interrupt(); 179 | } finally { 180 | readerDone.countDown(); 181 | } 182 | }); 183 | 184 | writer.start(); 185 | reader.start(); 186 | 187 | readerDone.await(); 188 | assertFalse("Reader acquired lock while writer was active", error.get()); 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /src/types/Key.java: -------------------------------------------------------------------------------- 1 | package types; 2 | 3 | import java.nio.ByteBuffer; 4 | 5 | // types and interfaces 6 | public class Key { 7 | byte[] val; 8 | Class> type; 9 | MemoryCodec> codec; 10 | 11 | public Key(byte[] val, Class> type) { 12 | this.val = val; 13 | this.type = type; 14 | codec = getCodec(type); 15 | } 16 | 17 | public Key(Class> type) { 18 | this.type = type; 19 | codec = getCodec(type); 20 | } 21 | 22 | public byte[] get() { 23 | return val; 24 | } 25 | 26 | public void set(byte[] val) { 27 | this.val = val; 28 | } 29 | 30 | public int compareTo(Key rhs) { 31 | if (type != rhs.type) { 32 | return Integer.MIN_VALUE; 33 | } 34 | return getVal().compareTo(rhs.getVal()); 35 | } 36 | 37 | @SuppressWarnings("unchecked") 38 | public > T getVal() { 39 | if (val == null) { 40 | throw new NullPointerException(); 41 | } 42 | ByteBuffer buffer = ByteBuffer.wrap(val); 43 | if (type == Integer.class) { 44 | return (T) Integer.valueOf(buffer.getInt()); 45 | } else if (type == Long.class) { 46 | return (T) Long.valueOf(buffer.getLong()); 47 | } else if (type == Double.class) { 48 | return (T) Double.valueOf(buffer.getDouble()); 49 | } else if (type == Short.class) { 50 | return (T) Short.valueOf(buffer.getShort()); 51 | } else if (type == Byte.class) { 52 | return (T) Byte.valueOf(buffer.get()); 53 | } else if (type == Float.class) { 54 | return (T) Float.valueOf(buffer.getFloat()); 55 | } else if (type == Json.class) { 56 | return (T) Json.valueOf(val) 57 | } 58 | throw new IllegalArgumentException("Unsupported type: " + type.getName()); 59 | } 60 | 61 | public int byteSize() { 62 | return codec.byteSize(); 63 | } 64 | 65 | public void readVal(ByteBuffer buf) { 66 | val = codec.readRaw(buf); 67 | } 68 | 69 | public void write(ByteBuffer buf) { 70 | codec.write(buf, val); 71 | } 72 | 73 | public interface MemoryCodec { 74 | /** Number of bytes per element */ 75 | int byteSize(); 76 | 77 | /** Read the element at array‐index i from the buffer */ 78 | T read(ByteBuffer buf); 79 | 80 | /** Read the element at array‐index i from the buffer */ 81 | byte[] readRaw(ByteBuffer buf); 82 | 83 | /** Write the element at array‐index i into the buffer */ 84 | void write(ByteBuffer buf, byte[] value); 85 | } 86 | 87 | // coders for primitive types // Todo: add StringCodec 88 | @SuppressWarnings("unchecked") 89 | public static MemoryCodec getCodec(Class type) { 90 | if (type == Integer.class) { 91 | return (MemoryCodec) new IntCodec(); 92 | } else if (type == Long.class) { 93 | return (MemoryCodec) new LongCodec(); 94 | } else if (type == Double.class) { 95 | return (MemoryCodec) new DoubleCodec(); 96 | } else if (type == Short.class) { 97 | return (MemoryCodec) new ShortCodec(); 98 | } else if (type == Byte.class) { 99 | return (MemoryCodec) new ByteCodec(); 100 | } else if (type == Float.class) { 101 | return (MemoryCodec) new FloatCodec(); 102 | } 103 | throw new IllegalArgumentException("Unsupported type: " + type.getName()); 104 | } 105 | 106 | public static class IntCodec implements MemoryCodec { 107 | public int byteSize() { 108 | return Integer.BYTES; 109 | } 110 | 111 | public Integer read(ByteBuffer buf) { 112 | return buf.getInt(); 113 | } 114 | 115 | public byte[] readRaw(ByteBuffer buf) { 116 | byte[] data = new byte[byteSize()]; 117 | buf.get(data); 118 | return data; 119 | } 120 | 121 | public void write(ByteBuffer buf, byte[] value) { 122 | buf.put(value); 123 | } 124 | } 125 | 126 | public static class LongCodec implements MemoryCodec { 127 | public int byteSize() { 128 | return Long.BYTES; 129 | } 130 | 131 | public Long read(ByteBuffer buf) { 132 | return buf.getLong(); 133 | } 134 | 135 | public byte[] readRaw(ByteBuffer buf) { 136 | byte[] data = new byte[byteSize()]; 137 | buf.get(data); 138 | return data; 139 | } 140 | 141 | public void write(ByteBuffer buf, byte[] value) { 142 | buf.put(value); 143 | } 144 | } 145 | 146 | public static class DoubleCodec implements MemoryCodec { 147 | public int byteSize() { 148 | return Double.BYTES; 149 | } 150 | 151 | public Double read(ByteBuffer buf) { 152 | return buf.getDouble(); 153 | } 154 | 155 | public byte[] readRaw(ByteBuffer buf) { 156 | byte[] data = new byte[byteSize()]; 157 | buf.get(data); 158 | return data; 159 | } 160 | 161 | public void write(ByteBuffer buf, byte[] value) { 162 | buf.put(value); 163 | } 164 | } 165 | 166 | public static class ShortCodec implements MemoryCodec { 167 | public int byteSize() { 168 | return Short.BYTES; 169 | } 170 | 171 | public Short read(ByteBuffer buf) { 172 | return buf.getShort(); 173 | } 174 | 175 | public byte[] readRaw(ByteBuffer buf) { 176 | byte[] data = new byte[byteSize()]; 177 | buf.get(data); 178 | return data; 179 | } 180 | 181 | public void write(ByteBuffer buf, byte[] value) { 182 | buf.put(value); 183 | } 184 | } 185 | 186 | public static class ByteCodec implements MemoryCodec { 187 | public int byteSize() { 188 | return Byte.BYTES; 189 | } 190 | 191 | public Byte read(ByteBuffer buf) { 192 | return buf.get(); 193 | } 194 | 195 | public byte[] readRaw(ByteBuffer buf) { 196 | byte[] data = new byte[byteSize()]; 197 | buf.get(data); 198 | return data; 199 | } 200 | 201 | public void write(ByteBuffer buf, byte[] value) { 202 | buf.put(value); 203 | } 204 | } 205 | 206 | public static class FloatCodec implements MemoryCodec { 207 | public int byteSize() { 208 | return Float.BYTES; 209 | } 210 | 211 | public Float read(ByteBuffer buf) { 212 | return buf.getFloat(); 213 | } 214 | 215 | public byte[] readRaw(ByteBuffer buf) { 216 | byte[] data = new byte[byteSize()]; 217 | buf.get(data); 218 | return data; 219 | } 220 | 221 | public void write(ByteBuffer buf, byte[] value) { 222 | buf.put(value); 223 | } 224 | } 225 | } 226 | -------------------------------------------------------------------------------- /src/test/bufferpool/GuardTest.java: -------------------------------------------------------------------------------- 1 | package test.bufferpool; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import bufferpool.*; 6 | import java.util.concurrent.CountDownLatch; 7 | import java.util.concurrent.CyclicBarrier; 8 | import java.util.concurrent.TimeUnit; 9 | import java.util.concurrent.atomic.AtomicBoolean; 10 | import java.util.concurrent.atomic.AtomicInteger; 11 | import java.util.concurrent.locks.ReentrantLock; 12 | import org.junit.Before; 13 | import org.junit.Test; 14 | 15 | public class GuardTest { 16 | private Frame frame; 17 | private Replacer replacer; 18 | private ReentrantLock bpmLatch; 19 | private static final int FRAME_ID = 1; 20 | private static final int PAGE_ID = 2; 21 | private static final String FILE_NAME = "test.db"; 22 | private static final int K_DISTANCE = 3; 23 | 24 | @Before 25 | public void setUp() { 26 | frame = new Frame(FRAME_ID, PAGE_ID, FILE_NAME); 27 | replacer = new LRU(K_DISTANCE); 28 | bpmLatch = new ReentrantLock(); 29 | } 30 | 31 | @Test 32 | public void testBasicGuardCreation() { 33 | ReadGuard readGuard = new ReadGuard(FRAME_ID, frame, replacer, bpmLatch); 34 | assertEquals(1, frame.getPinCount()); 35 | readGuard.close(); 36 | assertEquals(0, frame.getPinCount()); 37 | 38 | WriteGuard writeGuard = new WriteGuard(FRAME_ID, frame, replacer, bpmLatch); 39 | assertEquals(1, frame.getPinCount()); 40 | writeGuard.close(); 41 | assertEquals(0, frame.getPinCount()); 42 | } 43 | 44 | @Test 45 | public void testReadWriteGuardDataAccess() { 46 | WriteGuard writeGuard = new WriteGuard(FRAME_ID, frame, replacer, bpmLatch); 47 | byte[] data = writeGuard.getDataMut(); 48 | data[0] = 42; 49 | assertTrue(frame.isDirty()); 50 | writeGuard.close(); 51 | 52 | ReadGuard readGuard = new ReadGuard(FRAME_ID, frame, replacer, bpmLatch); 53 | assertEquals(42, readGuard.getData().get(0)); 54 | readGuard.close(); 55 | } 56 | 57 | @Test 58 | public void testMultipleReadGuards() throws InterruptedException { 59 | int numReaders = 5; 60 | CyclicBarrier barrier = new CyclicBarrier(numReaders); 61 | CountDownLatch doneLatch = new CountDownLatch(numReaders); 62 | AtomicInteger activeReaders = new AtomicInteger(0); 63 | AtomicBoolean error = new AtomicBoolean(false); 64 | 65 | Thread[] readers = new Thread[numReaders]; 66 | for (int i = 0; i < numReaders; i++) { 67 | readers[i] = 68 | new Thread( 69 | () -> { 70 | try { 71 | barrier.await(); 72 | ReadGuard guard = new ReadGuard(FRAME_ID, frame, replacer, bpmLatch); 73 | int readerscount = activeReaders.incrementAndGet(); 74 | if (readerscount < 1) { 75 | error.set(true); 76 | } 77 | Thread.sleep(10); 78 | activeReaders.decrementAndGet(); 79 | guard.close(); 80 | } catch (Exception e) { 81 | error.set(true); 82 | } finally { 83 | doneLatch.countDown(); 84 | } 85 | }); 86 | readers[i].start(); 87 | } 88 | 89 | assertTrue(doneLatch.await(1, TimeUnit.SECONDS)); 90 | assertFalse("Concurrent read guard error occurred", error.get()); 91 | assertEquals(0, frame.getPinCount()); 92 | } 93 | 94 | @Test 95 | public void testWriteGuardExclusion() throws InterruptedException { 96 | CountDownLatch writerStarted = new CountDownLatch(1); 97 | CountDownLatch readerStarted = new CountDownLatch(1); 98 | AtomicBoolean error = new AtomicBoolean(false); 99 | 100 | // Start writer thread 101 | Thread writer = 102 | new Thread( 103 | () -> { 104 | WriteGuard guard = new WriteGuard(FRAME_ID, frame, replacer, bpmLatch); 105 | writerStarted.countDown(); 106 | try { 107 | Thread.sleep(100); 108 | } catch (InterruptedException e) { 109 | Thread.currentThread().interrupt(); 110 | } finally { 111 | guard.close(); 112 | } 113 | }); 114 | 115 | // Start reader thread 116 | Thread reader = 117 | new Thread( 118 | () -> { 119 | try { 120 | writerStarted.await(); 121 | ReadGuard guard = new ReadGuard(FRAME_ID, frame, replacer, bpmLatch); 122 | error.set(true); // Should not reach here while writer holds lock 123 | readerStarted.countDown(); 124 | guard.close(); 125 | } catch (InterruptedException e) { 126 | Thread.currentThread().interrupt(); 127 | } 128 | }); 129 | 130 | writer.start(); 131 | reader.start(); 132 | 133 | // Wait for a short time to ensure reader is blocked 134 | assertFalse( 135 | "Reader should be blocked by writer", readerStarted.await(50, TimeUnit.MILLISECONDS)); 136 | assertFalse("Reader should not have acquired lock while writer active", error.get()); 137 | } 138 | 139 | @Test 140 | public void testEvictionStateManagement() { 141 | WriteGuard guard = new WriteGuard(FRAME_ID, frame, replacer, bpmLatch); 142 | 143 | // Frame should not be evictable while guard is active 144 | assertEquals(-1, replacer.evict()); 145 | 146 | // Frame should become evictable after guard is closed 147 | guard.close(); 148 | assertEquals(FRAME_ID, replacer.evict()); 149 | } 150 | 151 | @Test 152 | public void testConcurrentGuardCreation() throws InterruptedException { 153 | int numThreads = 10; 154 | CountDownLatch startLatch = new CountDownLatch(1); 155 | CountDownLatch doneLatch = new CountDownLatch(2 * numThreads); 156 | AtomicInteger totalPinCount = new AtomicInteger(0); 157 | 158 | Thread[] threads = new Thread[2 * numThreads]; 159 | for (int i = 0; i < numThreads; i++) { 160 | threads[i] = 161 | new Thread( 162 | () -> { 163 | try { 164 | startLatch.await(); 165 | ReadGuard guard = new ReadGuard(FRAME_ID, frame, replacer, bpmLatch); 166 | totalPinCount.incrementAndGet(); 167 | Thread.sleep(10); 168 | guard.close(); 169 | totalPinCount.decrementAndGet(); 170 | } catch (InterruptedException e) { 171 | Thread.currentThread().interrupt(); 172 | } finally { 173 | doneLatch.countDown(); 174 | } 175 | }); 176 | threads[i].start(); 177 | 178 | threads[i + numThreads] = 179 | new Thread( 180 | () -> { 181 | try { 182 | startLatch.await(); 183 | WriteGuard guard = new WriteGuard(FRAME_ID, frame, replacer, bpmLatch); 184 | totalPinCount.incrementAndGet(); 185 | Thread.sleep(10); 186 | guard.close(); 187 | totalPinCount.decrementAndGet(); 188 | } catch (InterruptedException e) { 189 | Thread.currentThread().interrupt(); 190 | } finally { 191 | doneLatch.countDown(); 192 | } 193 | }); 194 | threads[i + numThreads].start(); 195 | } 196 | 197 | startLatch.countDown(); 198 | assertTrue(doneLatch.await(1, TimeUnit.SECONDS)); 199 | assertEquals(0, totalPinCount.get()); 200 | assertEquals(0, frame.getPinCount()); 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /src/diskmanager/BasicDiskManager.java: -------------------------------------------------------------------------------- 1 | package diskmanager; 2 | 3 | import globals.Globals; 4 | import java.io.IOException; 5 | import java.util.LinkedList; 6 | import java.util.List; 7 | import java.util.Map; 8 | import java.util.concurrent.BlockingQueue; 9 | import java.util.concurrent.ConcurrentHashMap; 10 | import java.util.concurrent.LinkedBlockingQueue; 11 | import java.util.concurrent.locks.Lock; 12 | import java.util.concurrent.locks.ReentrantLock; 13 | 14 | public class BasicDiskManager implements DiskManager { 15 | 16 | private Map files; 17 | private Map> filesFreePages; 18 | private Map resizeLocks; 19 | private int fileCount; 20 | private BlockingQueue requestQueue; 21 | private Thread mainThread; 22 | 23 | private final String storageDir = "storage/"; 24 | 25 | /** defualt constructre todo: make new constructors based on a config */ 26 | public BasicDiskManager() throws NullPointerException { 27 | this.files = new ConcurrentHashMap(); 28 | this.filesFreePages = new ConcurrentHashMap>(); 29 | this.resizeLocks = new ConcurrentHashMap(); 30 | this.fileCount = 0; 31 | this.requestQueue = new LinkedBlockingQueue(100); // capped to 100 requests 32 | mainThread = 33 | new Thread( 34 | () -> { // start the worker that would fetch the requests and start a thread for each 35 | try { 36 | run(); 37 | } catch (InterruptedException e) { 38 | Thread.currentThread().interrupt(); // Restore interrupted status 39 | e.printStackTrace(); // Log the exception 40 | } 41 | }); 42 | mainThread.start(); 43 | } 44 | 45 | /** 46 | * closes all files and invalidate the requestQueue and the files map used as a destractor 47 | * 48 | * @throws NullPointerException if the object used after calling the close method for the first 49 | * time 50 | */ 51 | public void close() throws NullPointerException { 52 | mainThread.interrupt(); 53 | for (DiskRequest diskRequest : requestQueue) { // notify all that requests are not done 54 | diskRequest.finish.complete(false); 55 | } 56 | requestQueue = null; 57 | files.forEach( 58 | (key, value) -> { 59 | try { 60 | value.close(); 61 | } catch (IOException e) { 62 | Thread.currentThread().interrupt(); // Restore interrupted status 63 | e.printStackTrace(); // Log the exception 64 | } 65 | }); 66 | files = null; 67 | fileCount = -1; 68 | mainThread = null; 69 | } 70 | 71 | /** 72 | * the main threads run here start by fetching a request from the queue spin a thread to hundle it 73 | * 74 | * @throws InterruptedException while ferching a request 75 | * @throws NullPointerException if used after a close call 76 | */ 77 | private void run() throws InterruptedException, NullPointerException { 78 | while (!Thread.currentThread().isInterrupted()) { 79 | try { 80 | DiskRequest currentRequest = requestQueue.take(); 81 | new Thread( 82 | () -> { 83 | DiskFile file = this.files.get(currentRequest.fileName); 84 | if (file == null) { 85 | try { 86 | open(currentRequest.fileName); 87 | file = this.files.get(currentRequest.fileName); 88 | } catch (Exception e) { 89 | Thread.currentThread().interrupt(); // Restore interrupted status 90 | e.printStackTrace(); // Log the exception 91 | return; 92 | } 93 | } 94 | 95 | if (currentRequest.isWrite) { 96 | try { 97 | file.writePage(currentRequest.pageID, currentRequest.data); 98 | currentRequest.finish.complete(true); 99 | } catch (IOException e) { 100 | e.printStackTrace(); // Log the exception 101 | Thread.currentThread().interrupt(); // Restore interrupted status 102 | } 103 | } else { 104 | try { 105 | file.readPage(currentRequest.pageID, currentRequest.data); 106 | currentRequest.finish.complete(true); 107 | } catch (IOException e) { 108 | e.printStackTrace(); // Log the exception 109 | Thread.currentThread().interrupt(); // Restore interrupted status 110 | } 111 | } 112 | }) 113 | .start(); 114 | } catch (InterruptedException e) { 115 | Thread.currentThread().interrupt(); 116 | break; 117 | } 118 | } 119 | } 120 | 121 | /** 122 | * puts the user request into the queue to be processed 123 | * 124 | * @param request the request metadata 125 | * @throws InterruptedException while pushing a new request into the queue 126 | * @throws NullPointerException if used after a close call 127 | */ 128 | public void pushRequest(DiskRequest request) throws InterruptedException, NullPointerException { 129 | requestQueue.put(request); 130 | } 131 | 132 | /** 133 | * allocate a new page in the passed file 134 | * 135 | * @param fileName the file which will be extended 136 | * @return pageId of the allocated page 137 | * @throws IOException while allocating a page 138 | * @throws NullPointerException if used after a close call 139 | */ 140 | public long allocatePage(String fileName) throws IOException, NullPointerException { 141 | if (!files.containsKey(fileName)) { 142 | DiskFile file = new RandomAccessDiskFile(storageDir + fileName); 143 | filesFreePages.put(fileName, new LinkedList()); 144 | files.put(fileName, file); 145 | resizeLocks.put(fileName, new ReentrantLock()); 146 | fileCount++; 147 | } 148 | // check the free page list 149 | Lock lock = resizeLocks.get(fileName); 150 | lock.lock(); 151 | List freePages = filesFreePages.get(fileName); 152 | if (freePages.isEmpty()) { 153 | for (int i = 0; i < Globals.PRE_ALLOCATED_PAGES_COUNT; i++) { 154 | freePages.add(files.get(fileName).allocatePage()); 155 | } 156 | } 157 | long pageID = freePages.getFirst(); 158 | freePages.removeFirst(); 159 | lock.unlock(); 160 | return pageID; 161 | } 162 | 163 | /** 164 | * @return fileCount - number of opened files curruntly held by the disk maneger 165 | */ 166 | public int getFileCount() { 167 | return fileCount; 168 | } 169 | 170 | /** 171 | * get the number of pages in the file 172 | * 173 | * @param fileName 174 | * @return 175 | */ 176 | public long getPageCount(String fileName) { 177 | DiskFile file = files.get(fileName); 178 | if (file == null) { 179 | return 0; 180 | } 181 | return file.getPageCnt(); 182 | } 183 | 184 | /** 185 | * opens a file if not already opened before 186 | * 187 | * @param fileName the name of the file to be opened 188 | * @throws IOException while opening the file 189 | * @throws NullPointerException if used after a close call 190 | */ 191 | public void open(String fileName) throws IOException, NullPointerException { 192 | if (files.containsKey(fileName)) { 193 | return; 194 | } 195 | 196 | DiskFile file = new RandomAccessDiskFile(storageDir + fileName); 197 | DiskFile prev = files.put(fileName, file); 198 | if (prev == null) { 199 | fileCount++; 200 | } 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /src/test/bufferpool/ReplacerTest.java: -------------------------------------------------------------------------------- 1 | package test.bufferpool; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import bufferpool.LRU; 6 | import bufferpool.Replacer; 7 | import java.util.ArrayList; 8 | import java.util.List; 9 | import java.util.Random; 10 | import java.util.concurrent.atomic.AtomicInteger; 11 | import java.util.concurrent.locks.Lock; 12 | import java.util.concurrent.locks.ReentrantLock; 13 | import org.junit.Before; 14 | import org.junit.Test; 15 | 16 | public class ReplacerTest { 17 | private Replacer replacer; 18 | private static final int K_DISTANCE = 5; 19 | 20 | @Before 21 | public void setUp() { 22 | replacer = new LRU(K_DISTANCE); 23 | } 24 | 25 | @Test 26 | public void testBasicEviction() { 27 | // Record accesses for two frames 28 | replacer.recordAccess(0); 29 | replacer.recordAccess(1); 30 | 31 | // Frame 0 should be evicted as it was accessed earlier 32 | assertEquals(0, replacer.evict()); 33 | } 34 | 35 | @Test 36 | public void testNoEvictionWhenAllUnevictable() { 37 | // Record accesses and make frames unevictable 38 | replacer.recordAccess(0); 39 | replacer.recordAccess(1); 40 | replacer.setEvictable(0, false); 41 | replacer.setEvictable(1, false); 42 | 43 | // Should return -1 when no frames can be evicted 44 | assertEquals(-1, replacer.evict()); 45 | } 46 | 47 | @Test 48 | public void testEvictionWithKDistance() { 49 | // Simulate K accesses for frame 0 50 | for (int i = 0; i < K_DISTANCE; i++) { 51 | replacer.recordAccess(0); 52 | } 53 | 54 | // Single access for frame 1 55 | replacer.recordAccess(1); 56 | 57 | // Frame 1 should be evicted as it has fewer accesses 58 | assertEquals(1, replacer.evict()); 59 | } 60 | 61 | @Test 62 | public void testFrameDeletion() { 63 | // Record access and then delete frame 64 | replacer.recordAccess(0); 65 | replacer.deleteFrame(0); 66 | 67 | // After deletion, no frames should be available for eviction 68 | assertEquals(-1, replacer.evict()); 69 | } 70 | 71 | @Test 72 | public void testEvictableStateTransitions() { 73 | replacer.recordAccess(0); 74 | 75 | // Make frame unevictable 76 | replacer.setEvictable(0, false); 77 | assertEquals(-1, replacer.evict()); 78 | 79 | // Make frame evictable again 80 | replacer.setEvictable(0, true); 81 | assertEquals(0, replacer.evict()); 82 | } 83 | 84 | @Test 85 | public void testTieBreakingWithInfiniteKDistance() { 86 | // Record single access for multiple frames 87 | replacer.recordAccess(0); 88 | replacer.recordAccess(1); 89 | replacer.recordAccess(2); 90 | 91 | // All frames have infinite k-distance (less than K accesses) 92 | // Should evict frame 0 as it has the earliest recent access 93 | assertEquals(0, replacer.evict()); 94 | } 95 | 96 | @Test 97 | public void testConcurrentRecordAccess() throws InterruptedException { 98 | int numThreads = 10; 99 | Thread[] threads = new Thread[numThreads]; 100 | 101 | // Create threads that will concurrently record access 102 | for (int i = 0; i < numThreads; i++) { 103 | final int frameId = i; 104 | threads[i] = 105 | new Thread( 106 | () -> { 107 | for (int j = 0; j < 100; j++) { 108 | replacer.recordAccess(frameId); 109 | } 110 | }); 111 | } 112 | 113 | // Start all threads 114 | for (Thread thread : threads) { 115 | thread.start(); 116 | } 117 | 118 | // Wait for all threads to complete 119 | for (Thread thread : threads) { 120 | thread.join(); 121 | } 122 | 123 | // Verify that the first frame has the most recent access and won't be evicted 124 | assertNotEquals(-1, replacer.evict()); 125 | } 126 | 127 | @Test 128 | public void testConcurrentEviction() throws InterruptedException { 129 | // Setup initial state 130 | for (int i = 0; i < 5; i++) { 131 | replacer.recordAccess(i); 132 | } 133 | 134 | int numThreads = 3; 135 | Thread[] threads = new Thread[numThreads]; 136 | final int[] evictedFrames = new int[numThreads]; 137 | 138 | // Create threads that will try to evict frames concurrently 139 | for (int i = 0; i < numThreads; i++) { 140 | final int threadIndex = i; 141 | threads[i] = 142 | new Thread( 143 | () -> { 144 | evictedFrames[threadIndex] = replacer.evict(); 145 | }); 146 | } 147 | 148 | // Start all threads 149 | for (Thread thread : threads) { 150 | thread.start(); 151 | } 152 | 153 | // Wait for all threads to complete 154 | for (Thread thread : threads) { 155 | thread.join(); 156 | } 157 | 158 | // Verify that each frame was evicted exactly once 159 | boolean[] evicted = new boolean[5]; 160 | for (int frameId : evictedFrames) { 161 | if (frameId != -1) { 162 | assertFalse("Frame " + frameId + " was evicted multiple times", evicted[frameId]); 163 | evicted[frameId] = true; 164 | } 165 | } 166 | } 167 | 168 | @Test 169 | public void testConcurrentEvictableStateTransitions() throws InterruptedException { 170 | final int frameId = 0; 171 | replacer.recordAccess(frameId); 172 | 173 | Thread setterThread = 174 | new Thread( 175 | () -> { 176 | for (int i = 0; i < 100; i++) { 177 | replacer.recordAccess(frameId); 178 | } 179 | }); 180 | 181 | Thread evictorThread = 182 | new Thread( 183 | () -> { 184 | for (int i = 0; i < 100; i++) { 185 | replacer.evict(); 186 | } 187 | }); 188 | 189 | setterThread.start(); 190 | evictorThread.start(); 191 | 192 | setterThread.join(); 193 | evictorThread.join(); 194 | 195 | // Final state should be consistent 196 | replacer.setEvictable(frameId, true); 197 | int evictedFrame = replacer.evict(); 198 | assertTrue( 199 | "Frame should either be evicted or not exist", 200 | evictedFrame == frameId || evictedFrame == -1); 201 | } 202 | 203 | @Test 204 | public void testConcurrentEvictionRecording() throws InterruptedException { 205 | // Setup initial state 206 | for (int i = 0; i < 5; i++) { 207 | replacer.recordAccess(i); 208 | replacer.recordAccess(i); 209 | replacer.recordAccess(i); 210 | } 211 | 212 | class Triple { 213 | int frameId; 214 | int time; 215 | char type; 216 | 217 | Triple(int frameId, int time, char type) { 218 | this.frameId = frameId; 219 | this.time = time; 220 | this.type = type; 221 | } 222 | } 223 | 224 | int numThreads = 10; 225 | Thread[] evicting = new Thread[numThreads]; 226 | Thread[] recording = new Thread[numThreads]; 227 | AtomicInteger currentTime = new AtomicInteger(0); 228 | List timeLine = new ArrayList<>(); 229 | Lock listLock = new ReentrantLock(); 230 | // Create threads that will try to evict frames concurrently 231 | for (int i = 0; i < numThreads; i++) { 232 | final int threadIndex = i; 233 | evicting[i] = 234 | new Thread( 235 | () -> { 236 | int evictedFrames = replacer.evict(); 237 | int time = currentTime.incrementAndGet(); 238 | listLock.lock(); 239 | timeLine.add(new Triple(evictedFrames, time, 'E')); 240 | listLock.unlock(); 241 | }); 242 | } 243 | Random random = new Random(); 244 | // Create threads that will record access concurrently 245 | for (int i = 0; i < numThreads; i++) { 246 | recording[i] = 247 | new Thread( 248 | () -> { 249 | int frameId = random.nextInt(5); 250 | replacer.recordAccess(frameId); 251 | int time = currentTime.incrementAndGet(); 252 | listLock.lock(); 253 | timeLine.add(new Triple(frameId, time, 'R')); 254 | listLock.unlock(); 255 | }); 256 | } 257 | 258 | // Start all threads 259 | for (Thread thread : recording) { 260 | thread.start(); 261 | } 262 | 263 | // Wait for all threads to complete 264 | for (Thread thread : evicting) { 265 | thread.join(); 266 | } 267 | 268 | timeLine.sort((a, b) -> Integer.compare(a.time, b.time)); 269 | // Verify that each frame was evicted exactly once 270 | char[] state = new char[5]; 271 | for (Triple t : timeLine) { 272 | assertFalse(state[t.frameId] == 'E' && t.type == 'E'); 273 | state[t.frameId] = t.type; 274 | } 275 | } 276 | } 277 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Disk Manager - B+ Tree Storage Engine 2 | 3 | A high-performance, concurrent disk-based storage engine written in Java, implementing a B+ tree indexing structure with sophisticated buffer pool management and asynchronous disk I/O operations. 4 | 5 | ## Project Overview 6 | 7 | This project is a complete database storage engine that provides: 8 | - **B+ Tree Indexing**: Efficient data storage and retrieval with logarithmic time complexity 9 | - **Buffer Pool Management**: Intelligent memory management with LRU eviction policies 10 | - **Concurrent Access Control**: Thread-safe operations with fine-grained locking 11 | - **Asynchronous Disk I/O**: Non-blocking disk operations for improved performance 12 | - **Type System**: Support for multiple data types with efficient serialization 13 | 14 | ## Architecture Overview 15 | 16 | The system follows a layered architecture with clear separation of concerns: 17 | 18 | ``` 19 | ┌─────────────────────────────────────────────────────────────┐ 20 | │ Application Layer │ 21 | ├─────────────────────────────────────────────────────────────┤ 22 | │ Index Manager │ 23 | │ (Collection Management) │ 24 | ├─────────────────────────────────────────────────────────────┤ 25 | │ B+ Tree │ 26 | │ (Indexing & Query Processing) │ 27 | ├─────────────────────────────────────────────────────────────┤ 28 | │ Buffer Pool │ 29 | │ (Memory Management) │ 30 | ├─────────────────────────────────────────────────────────────┤ 31 | │ Disk Manager │ 32 | │ (Persistent Storage) │ 33 | └─────────────────────────────────────────────────────────────┘ 34 | ``` 35 | 36 | ## Core Components 37 | 38 | ### 1. Disk Manager (`src/diskmanager/`) 39 | 40 | **Purpose**: Handles all disk I/O operations with asynchronous request processing. 41 | 42 | **Key Features**: 43 | - **Asynchronous I/O**: Uses a blocking queue and thread pool for non-blocking disk operations 44 | - **File Management**: Automatic file creation and management in the `storage/` directory 45 | - **Page Allocation**: Pre-allocates pages in chunks (1024 pages = 8MB) for better performance 46 | - **Concurrent Access**: Thread-safe operations with per-file resize locks 47 | 48 | **Key Classes**: 49 | - `BasicDiskManager`: Main implementation with request queue processing 50 | - `DiskRequest`: Encapsulates read/write requests with completion futures 51 | - `RandomAccessDiskFile`: File abstraction for page-based I/O 52 | 53 | ### 2. Buffer Pool (`src/bufferpool/`) 54 | 55 | **Purpose**: Manages memory efficiently with caching and eviction policies. 56 | 57 | **Key Features**: 58 | - **LRU Eviction**: Least Recently Used replacement policy with configurable K parameter 59 | - **Pin/Unpin Mechanism**: Pages can be pinned to prevent eviction during operations 60 | - **Guard System**: Read/Write guards provide safe concurrent access to pages 61 | - **Dirty Page Management**: Automatic flushing of modified pages to disk 62 | 63 | **Key Classes**: 64 | - `BufferPool`: Main buffer pool implementation 65 | - `Frame`: Represents a page in memory with metadata 66 | - `LRU`: Implements the LRU replacement algorithm 67 | - `ReadGuard`/`WriteGuard`: Provide safe concurrent access to pages 68 | 69 | ### 3. B+ Tree (`src/btree/`) 70 | 71 | **Purpose**: Primary indexing structure providing efficient data storage and retrieval. 72 | 73 | **Key Features**: 74 | - **Optimistic Concurrency**: Uses optimistic locking for better performance 75 | - **Node Splitting**: Automatic node splitting when capacity is exceeded 76 | - **Range Queries**: Support for cursor-based range scanning 77 | - **Composite Keys**: Support for multi-column keys through `Compositekey` 78 | 79 | **Key Classes**: 80 | - `Btree`: Main B+ tree implementation with insert/search/delete operations 81 | - `BtreeHeader`: Manages tree metadata (root page ID, height) 82 | - `Cursor`: Provides iterator-style access for range queries 83 | 84 | ### 4. Page Management (`src/page/`) 85 | 86 | **Purpose**: Manages the structure of internal and leaf nodes in the B+ tree. 87 | 88 | **Key Features**: 89 | - **Node Types**: Separate implementations for internal and leaf nodes 90 | - **Variable-Length Records**: Efficient storage of variable-sized data 91 | - **Split Operations**: Sophisticated node splitting with sibling redistribution 92 | - **Linked Leaf Nodes**: Leaf nodes are linked for efficient range queries 93 | 94 | **Key Classes**: 95 | - `LeafNode`: Stores actual key-value pairs 96 | - `InternalNode`: Stores routing information for tree navigation 97 | - `TreeNodeHeader`: Common header structure for all node types 98 | 99 | ### 5. Type System (`src/types/`) 100 | 101 | **Purpose**: Provides efficient serialization and comparison for various data types. 102 | 103 | **Key Features**: 104 | - **Primitive Types**: Support for Integer, Long, Double, Float, Short, Byte 105 | - **Composite Keys**: Multi-column keys with proper comparison semantics 106 | - **Memory Codecs**: Efficient binary serialization/deserialization 107 | - **Type Safety**: Compile-time type checking with generic templates 108 | 109 | **Key Classes**: 110 | - `Compositekey`: Multi-column key implementation 111 | - `Template`: Type definition for keys and values 112 | - `Key`: Individual key component with type-specific codecs 113 | 114 | ### 6. Index Manager (`src/indexmanager/`) 115 | 116 | **Purpose**: Provides higher-level index management and collection support. 117 | 118 | **Key Features**: 119 | - **Collection Management**: Organizes indexes by collection name 120 | - **Index Naming**: Uses "collectionName-fieldName" naming convention 121 | - **Lifecycle Management**: Handles index creation and cleanup 122 | 123 | ## Configuration 124 | 125 | Key system parameters are defined in `src/globals/Globals.java`: 126 | 127 | ```java 128 | public static final int PAGE_SIZE = 2 * 4096; // 8KB pages 129 | public static final int CLUSTER_PAGE_SIZE = 4 * 4096; // 16KB clusters 130 | public static final int PRE_ALLOCATED_PAGES_COUNT = 1024; // 8MB pre-allocation 131 | public static final long INVALID_PAGE_ID = -1; // Invalid page marker 132 | ``` 133 | 134 | ## Performance Characteristics 135 | 136 | - **Time Complexity**: O(log n) for search, insert, and delete operations 137 | - **Space Complexity**: Configurable buffer pool size with LRU eviction 138 | - **Concurrency**: Optimistic locking with fine-grained page-level locks 139 | - **I/O Efficiency**: Page-based storage with asynchronous disk operations 140 | 141 | ## Usage Example 142 | 143 | ```java 144 | // Initialize components 145 | DiskManager diskManager = new BasicDiskManager(); 146 | BufferPool bufferPool = new BufferPool(4000, 10, diskManager); 147 | 148 | // Create B+ tree 149 | Template keyType = new Template(Integer.class); 150 | Template valueType = new Template(String.class); 151 | Btree btree = new Btree(keyType, valueType, "myindex", 152 | Globals.INVALID_PAGE_ID, bufferPool); 153 | 154 | // Insert data 155 | Compositekey key = new Compositekey(keyType); 156 | key.set(0, 42, Integer.class); 157 | Compositekey value = new Compositekey(valueType); 158 | value.set(0, "Hello World", String.class); 159 | btree.insert(key, value); 160 | 161 | // Search data 162 | Compositekey result = btree.get(key); 163 | ``` 164 | 165 | ## Build and Run 166 | 167 | ### Prerequisites 168 | - Java 11 or higher 169 | - Maven 3.6 or higher 170 | 171 | ### Building 172 | ```bash 173 | # Compile all source files 174 | javac -d bin $(find src -name "*.java") 175 | 176 | # Or use Maven 177 | mvn compile 178 | ``` 179 | 180 | ### Running Tests 181 | ```bash 182 | # Run with Maven 183 | mvn test 184 | 185 | # Or run manually 186 | java -cp bin org.junit.runner.JUnitCore test.btree.BtreeTest 187 | ``` 188 | 189 | ### Running the Application 190 | ```bash 191 | java -cp bin Main 192 | ``` 193 | 194 | ## Storage Layout 195 | 196 | The system stores data in the `storage/` directory with the following structure: 197 | - Each index is stored as a separate file 198 | - Files use page-based layout (8KB pages) 199 | - Header pages contain metadata (root page ID, tree height) 200 | - Data pages contain either internal nodes or leaf nodes 201 | 202 | ## Thread Safety 203 | 204 | The system is designed for high concurrency: 205 | - **Buffer Pool**: Uses fine-grained locking with per-frame locks 206 | - **Disk Manager**: Asynchronous request processing with thread-safe queues 207 | - **B+ Tree**: Optimistic concurrency control with context-based locking 208 | 209 | ## Testing 210 | 211 | Comprehensive test suites are provided: 212 | - **Unit Tests**: Individual component testing 213 | - **Integration Tests**: Cross-component functionality 214 | - **Performance Tests**: Benchmarking with up to 1M operations 215 | - **Concurrency Tests**: Multi-threaded stress testing 216 | 217 | ## Future Enhancements 218 | 219 | Potential areas for improvement: 220 | - **Compression**: Page-level compression for better storage efficiency 221 | - **Logging**: Write-ahead logging for crash recovery 222 | - **Clustering**: Support for clustered indexes 223 | - **String Types**: Enhanced support for variable-length strings 224 | - **Transactions**: ACID transaction support 225 | 226 | ## License 227 | 228 | This project is licensed under the terms specified in the LICENSE file. -------------------------------------------------------------------------------- /src/bufferpool/BufferPool.java: -------------------------------------------------------------------------------- 1 | package bufferpool; 2 | 3 | import diskmanager.DiskManager; 4 | import diskmanager.DiskRequest; 5 | import globals.Globals; 6 | import java.io.Closeable; 7 | import java.io.IOException; 8 | import java.util.AbstractMap.SimpleEntry; 9 | import java.util.HashMap; 10 | import java.util.LinkedList; 11 | import java.util.List; 12 | import java.util.Map; 13 | import java.util.SortedSet; 14 | import java.util.TreeSet; 15 | import java.util.concurrent.CompletableFuture; 16 | import java.util.concurrent.ExecutionException; 17 | import java.util.concurrent.locks.Lock; 18 | import java.util.concurrent.locks.ReentrantLock; 19 | 20 | public class BufferPool implements Closeable { 21 | 22 | public class PageId extends SimpleEntry { 23 | public PageId(String key, Long value) { 24 | super(key, value); 25 | } 26 | } 27 | 28 | private int framesNumber; 29 | private int k; 30 | private Replacer replacer; 31 | private DiskManager diskManager; 32 | private Frame[] frames; 33 | private Map pages; 34 | private List freeFrames; 35 | private Map> deallocatedPages; 36 | private Lock bpmLatch; 37 | 38 | public BufferPool(int size, int k, DiskManager diskManager) { 39 | if (k < 0) { 40 | throw new IllegalArgumentException("k must be positive"); 41 | } 42 | if (size <= 0) { 43 | throw new IllegalArgumentException("size must be positive"); 44 | } 45 | if (diskManager == null) { 46 | throw new NullPointerException("disk manager can not be null"); 47 | } 48 | framesNumber = size; 49 | this.k = k; 50 | this.replacer = new LRU(this.k); 51 | this.diskManager = diskManager; 52 | frames = new Frame[framesNumber]; 53 | freeFrames = new LinkedList(); 54 | pages = new HashMap(); 55 | deallocatedPages = new HashMap>(); 56 | bpmLatch = new ReentrantLock(); 57 | 58 | for (int i = 0; i < size; i++) { 59 | freeFrames.addLast(i); 60 | frames[i] = new Frame(i); 61 | } 62 | } 63 | 64 | /** 65 | * close the buffer pool and all the frames 66 | * 67 | * @throws IOException 68 | */ 69 | public void close() throws IOException, NullPointerException { 70 | // flush all the frames 71 | for (int i = 0; i < framesNumber; i++) { 72 | Frame frame = frames[i]; 73 | if (frame.isDirty()) { 74 | boolean done; 75 | try { 76 | done = diskOp(frame, true); 77 | } catch (InterruptedException | ExecutionException e) { 78 | throw new IOException("Error during disk operation", e); 79 | } 80 | if (!done) { 81 | throw new IOException("can not flush"); 82 | } 83 | } 84 | } 85 | // close the disk manager 86 | diskManager.close(); 87 | // invalidate the buffer pool 88 | for (int i = 0; i < framesNumber; i++) { 89 | frames[i] = null; 90 | } 91 | frames = null; 92 | pages = null; 93 | freeFrames = null; 94 | deallocatedPages = null; 95 | bpmLatch = null; 96 | replacer = null; 97 | framesNumber = -1; 98 | diskManager = null; 99 | } 100 | 101 | /** 102 | * allocate a new page in the passed file and return the pageid if the file has deallocated pages 103 | * it returns one of them to prrevent Fragmentation 104 | * 105 | * @param fileName 106 | * @return pageId of allocated page 107 | * @throws IOException 108 | * @throws NullPointerException 109 | */ 110 | public long allocateNewPage(String fileName) throws IOException, NullPointerException { 111 | bpmLatch.lock(); 112 | if (deallocatedPages.containsKey(fileName)) { 113 | SortedSet pages = deallocatedPages.get(fileName); 114 | if (!pages.isEmpty()) { 115 | long pageId = pages.getLast(); 116 | pages.removeLast(); 117 | bpmLatch.unlock(); 118 | return pageId; 119 | } 120 | } 121 | bpmLatch.unlock(); 122 | return diskManager.allocatePage(fileName); 123 | } 124 | 125 | /** 126 | * create a disk request (read/write) push it to the queue and wait for complation 127 | * 128 | * @param frame frame in which will be read into/wrote to 129 | * @param isWrite tells if the request is read or write 130 | * @return true on success 131 | * @throws InterruptedException 132 | * @throws NullPointerException 133 | * @throws ExecutionException 134 | */ 135 | private boolean diskOp(Frame frame, boolean isWrite) 136 | throws InterruptedException, NullPointerException, ExecutionException { 137 | DiskRequest request = 138 | new DiskRequest(frame.getFileName(), frame.getPageId(), frame.getData(), isWrite); 139 | CompletableFuture finish = request.getFuture(); 140 | diskManager.pushRequest(request); 141 | return finish.get(); 142 | } 143 | 144 | /** 145 | * return a free frame from the free frame list or by evicting some unused frame 146 | * 147 | * @return the frame id 148 | * @throws Exception 149 | * @throws InterruptedException 150 | * @throws NullPointerException 151 | * @throws ExecutionException 152 | */ 153 | private int getFrame() 154 | throws Exception, InterruptedException, NullPointerException, ExecutionException { 155 | int frameId; 156 | // if a free frame already exists 157 | if (!freeFrames.isEmpty()) { 158 | frameId = freeFrames.getLast(); 159 | freeFrames.removeLast(); 160 | return frameId; 161 | } 162 | 163 | // try to evict 164 | frameId = replacer.evict(); 165 | if (frameId == -1) { 166 | return frameId; 167 | } 168 | Frame frame = frames[frameId]; 169 | if (frame.isDirty()) { 170 | boolean done = diskOp(frame, true); 171 | if (!done) { 172 | throw new Exception("can not flush"); 173 | } 174 | } 175 | // remove the page from the pages map 176 | PageId pid = new PageId(frame.getFileName(), frame.getPageId()); 177 | if (pages.containsKey(pid)) { 178 | pages.remove(pid); 179 | } 180 | return frameId; 181 | } 182 | 183 | private void recordAccess(Frame frame) { 184 | replacer.recordAccess(frame.getFrameId()); 185 | frame.addPin(); 186 | if (frame.getPinCount() == 1) { 187 | replacer.setEvictable(frame.getFrameId(), false); 188 | } 189 | } 190 | 191 | private boolean isPageIdValid(String fileName, long pageId) { 192 | if (deallocatedPages.containsKey(fileName)) { 193 | SortedSet pages = deallocatedPages.get(fileName); 194 | if (pages.contains(pageId)) { 195 | return false; 196 | } 197 | } 198 | if (diskManager.getPageCount(fileName) <= pageId) { 199 | return false; 200 | } 201 | return true; 202 | } 203 | 204 | /** 205 | * returns a guard(read/write) around a specific page after reading it to a free frame 206 | * 207 | * @param fileName 208 | * @param pageId 209 | * @param isWrite 210 | * @return 211 | * @throws Exception 212 | * @throws InterruptedException 213 | * @throws NullPointerException 214 | * @throws ExecutionException 215 | */ 216 | private Guard getGuard(String fileName, long pageId, boolean isWrite) 217 | throws Exception, InterruptedException, NullPointerException, ExecutionException { 218 | bpmLatch.lock(); 219 | if (pageId < 0) { 220 | bpmLatch.unlock(); 221 | throw new IllegalArgumentException("pageId must be positive"); 222 | } 223 | 224 | if (!isPageIdValid(fileName, pageId)) { 225 | bpmLatch.unlock(); 226 | throw new IllegalArgumentException("pageId is not valid"); 227 | } 228 | 229 | if (fileName == null) { 230 | bpmLatch.unlock(); 231 | throw new NullPointerException("fileName can not be null"); 232 | } 233 | 234 | if (fileName.isEmpty()) { 235 | bpmLatch.unlock(); 236 | throw new IllegalArgumentException("fileName can not be empty"); 237 | } 238 | 239 | PageId pid = new PageId(fileName, pageId); 240 | int frameId; 241 | if (pages.containsKey(pid)) { // if the page already in the buffer 242 | frameId = pages.get(pid); 243 | Frame frame = frames[frameId]; 244 | recordAccess(frame); 245 | bpmLatch.unlock(); 246 | Guard guard; 247 | if (isWrite) { 248 | guard = new WriteGuard(frameId, frame, replacer, bpmLatch); 249 | } else { 250 | guard = new ReadGuard(frameId, frame, replacer, bpmLatch); 251 | } 252 | 253 | if (guard.getFrameId() == Globals.INVALID_Frame_ID) { 254 | return null; 255 | } 256 | return guard; 257 | } 258 | 259 | frameId = getFrame(); 260 | if (frameId == -1) { 261 | bpmLatch.unlock(); 262 | return null; 263 | } 264 | 265 | Frame frame = frames[frameId]; 266 | frame.newFrame(pageId, fileName); 267 | if (!diskOp(frame, false)) { 268 | bpmLatch.unlock(); 269 | return null; 270 | } 271 | 272 | pages.put(pid, frameId); 273 | recordAccess(frame); 274 | bpmLatch.unlock(); 275 | Guard guard; 276 | if (isWrite) { 277 | guard = new WriteGuard(frameId, frame, replacer, bpmLatch); 278 | } else { 279 | guard = new ReadGuard(frameId, frame, replacer, bpmLatch); 280 | } 281 | if (guard.getFrameId() == Globals.INVALID_Frame_ID) { 282 | return null; 283 | } 284 | return guard; 285 | } 286 | 287 | /** 288 | * return a read guard 289 | * 290 | * @param fileName 291 | * @param pageId 292 | * @return 293 | * @throws Exception 294 | * @throws InterruptedException 295 | * @throws NullPointerException 296 | * @throws ExecutionException 297 | */ 298 | public ReadGuard getReadGuard(String fileName, long pageId) 299 | throws Exception, InterruptedException, NullPointerException, ExecutionException { 300 | Guard guard = getGuard(fileName, pageId, false); 301 | if (guard == null) { 302 | return null; 303 | } 304 | ReadGuard readGuard = (ReadGuard) guard; 305 | return readGuard; 306 | } 307 | 308 | /** 309 | * returns a write guard 310 | * 311 | * @param fileName 312 | * @param pageId 313 | * @return 314 | * @throws Exception 315 | * @throws InterruptedException 316 | * @throws NullPointerException 317 | * @throws ExecutionException 318 | */ 319 | public WriteGuard getWriteGuard(String fileName, long pageId) 320 | throws Exception, 321 | InterruptedException, 322 | NullPointerException, 323 | ExecutionException, 324 | IllegalArgumentException { 325 | Guard guard = getGuard(fileName, pageId, true); 326 | if (guard == null) { 327 | return null; 328 | } 329 | WriteGuard writeGuard = (WriteGuard) guard; 330 | return writeGuard; 331 | } 332 | 333 | public void deletePage(String fileName, long pageId) { 334 | // if page exists in the pool 335 | PageId pid = new PageId(fileName, pageId); 336 | bpmLatch.lock(); 337 | if (pages.containsKey(pid)) { 338 | int frameId = pages.get(pid); 339 | pages.remove(pid); 340 | replacer.deleteFrame(frameId); 341 | } 342 | 343 | if (!deallocatedPages.containsKey(fileName)) { 344 | deallocatedPages.put(fileName, new TreeSet()); 345 | } 346 | 347 | SortedSet fileFreePages = deallocatedPages.get(fileName); 348 | if (fileFreePages == null) { 349 | fileFreePages = new TreeSet(); 350 | } 351 | 352 | fileFreePages.add(pageId); 353 | bpmLatch.unlock(); 354 | } 355 | } 356 | -------------------------------------------------------------------------------- /src/page/InternalNode.java: -------------------------------------------------------------------------------- 1 | package page; 2 | 3 | import bufferpool.BufferPool; 4 | import bufferpool.WriteGuard; 5 | import globals.Globals; 6 | import java.nio.ByteBuffer; 7 | import javax.naming.directory.InvalidAttributesException; 8 | import types.Array; 9 | import types.CompareableArray; 10 | import types.Compositekey; 11 | import types.Template; 12 | 13 | /** 14 | * InternalNode class represents an internal node in a B+ tree. It extends the TreeNode class and is 15 | * used to store keys and child pointers. The keys are used to navigate the tree, while the child 16 | * pointers are pageIds that point to the child nodes. | null |key2 |key3 |...|keyN | 17 | * |pageId1|pageId2|pageId3|...|pageIdN| pageId_i points to the subtree where keys there sutisfy 18 | * key_i <= key < key_(i+1) for i = 0, key_0 = -inf for i = N, key_N = +inf 19 | */ 20 | public class InternalNode extends TreeNodeHeader { 21 | private final short headerSize = 22 | 2 + 1 + 8; // 2 bytes for keysN, 1 byte for type, 8 bytes for pageId 23 | private final Template keyType; 24 | private final Template valueType; 25 | private final short keySize; 26 | private final short valueSize; 27 | private final short maxKeysN; 28 | private final short minKeysN; 29 | private CompareableArray keys; 30 | private Array values; // pageIds of the child nodes 31 | private ByteBuffer buffer; 32 | 33 | public InternalNode(Template keyType, long pageId) { 34 | super(pageId, false); 35 | this.keyType = keyType; 36 | this.valueType = new Template(Long.class); 37 | keySize = keyType.getByteSize(); 38 | valueSize = valueType.getByteSize(); 39 | maxKeysN = (short) ((Globals.PAGE_SIZE - headerSize) / (keySize + valueSize)); 40 | minKeysN = (short) (maxKeysN / 2); 41 | } 42 | 43 | public InternalNode(Template keyType, ByteBuffer rawData) { 44 | this.keyType = keyType; 45 | this.valueType = new Template(Long.class); 46 | keySize = keyType.getByteSize(); 47 | valueSize = valueType.getByteSize(); 48 | maxKeysN = (short) ((Globals.PAGE_SIZE - headerSize) / (keySize + valueSize)); 49 | minKeysN = (short) (maxKeysN / 2); 50 | 51 | buffer = rawData; 52 | this.keysN = buffer.getShort(); 53 | this.isLeaf = buffer.get() == 1; 54 | this.pageId = buffer.getLong(); 55 | 56 | keys = new CompareableArray(new Compositekey(keyType), rawData, headerSize, keysN, maxKeysN); 57 | values = 58 | new Array( 59 | new Compositekey(valueType), rawData, headerSize + maxKeysN * keySize, keysN, maxKeysN); 60 | } 61 | 62 | public InternalNode(Template keyType, byte[] rawData) { 63 | this(keyType, ByteBuffer.wrap(rawData)); 64 | } 65 | 66 | public void writeHeader() { 67 | buffer.rewind(); 68 | buffer.putShort(keysN); 69 | buffer.put((byte) (isLeaf ? 1 : 0)); 70 | buffer.putLong(pageId); 71 | } 72 | 73 | public Compositekey getChildForKey(Compositekey key) throws InvalidAttributesException { 74 | int index = keys.lowerBound(key, 1); 75 | return values.get(index - 1); 76 | } 77 | 78 | public int getKeyIdx(Compositekey key) throws InvalidAttributesException { 79 | int index = keys.lowerBound(key, 1); 80 | return index; 81 | } 82 | 83 | public boolean insert(Compositekey key, Compositekey value) throws InvalidAttributesException { 84 | if (keysN == maxKeysN) { 85 | return false; // node is full 86 | } 87 | int index = keys.upperBound(key, 1); 88 | keys.insert(index, key); 89 | values.insert(index - 1, value); 90 | keysN++; 91 | writeHeader(); 92 | return true; 93 | } 94 | 95 | public void delete(int index) throws InvalidAttributesException { 96 | keys.delete(index); 97 | values.delete(index); 98 | if (keysN > 0) { 99 | keysN--; 100 | writeHeader(); 101 | } 102 | } 103 | 104 | public void deleteRespective(int index) throws InvalidAttributesException { 105 | keys.delete(index); 106 | values.delete(index - 1); 107 | if (keysN > 0) { 108 | keysN--; 109 | writeHeader(); 110 | } 111 | } 112 | 113 | public WriteGuard split(BufferPool bufferPool, String fileName) { 114 | if (keysN < minKeysN) { 115 | return null; 116 | } 117 | WriteGuard newGuard = null; 118 | try { 119 | long newPageId = bufferPool.allocateNewPage(fileName); 120 | newGuard = bufferPool.getWriteGuard(fileName, newPageId); 121 | InternalNode newNode = new InternalNode(keyType, newGuard.getDataMut()); 122 | newNode.setLeaf(false); 123 | newNode.setPageId(newPageId); 124 | 125 | for (int i = minKeysN; i < keysN; i++) { 126 | newNode.setKey(i - minKeysN, keys.get(i)); 127 | newNode.setValue(i - minKeysN, values.get(i)); 128 | } 129 | newNode.setCompositekeyN((short) (keysN - minKeysN)); 130 | setCompositekeyN(minKeysN); 131 | 132 | newNode.writeHeader(); 133 | writeHeader(); 134 | return newGuard; 135 | } catch (Exception e) { 136 | if (newGuard != null) { 137 | newGuard.close(); 138 | } 139 | e.printStackTrace(); 140 | return null; 141 | } 142 | } 143 | 144 | public boolean redistribute( 145 | String fileName, int index, InternalNode parent, BufferPool bufferPool) throws Exception { 146 | if (index > 1) { 147 | WriteGuard leftGuard = 148 | bufferPool.getWriteGuard(fileName, parent.getValue(index - 2).getVal(0)); 149 | if (leftGuard != null) { 150 | InternalNode leftNode = new InternalNode(keyType, leftGuard.getDataMut()); 151 | if (leftNode.getKeysN() > leftNode.getMinKeysN()) { // can redistribute 152 | // move the last value of the left node to the currunt node 153 | Compositekey value = leftNode.getValue(leftNode.getKeysN() - 1); 154 | Compositekey key = parent.getKey(index - 1); 155 | insert(key, value); 156 | // update the parent before deletion 157 | parent.setKey(index - 1, leftNode.getKey(leftNode.getKeysN() - 1)); 158 | // delete the last key value in left node 159 | leftNode.delete(leftNode.getKeysN() - 1); 160 | leftGuard.close(); 161 | return true; 162 | } 163 | // release the left node lock 164 | leftGuard.close(); 165 | } 166 | } 167 | 168 | // try to redistribute with the right sibling 169 | if (index < parent.getKeysN()) { 170 | WriteGuard rightGuard = 171 | bufferPool.getWriteGuard(fileName, parent.getValue(index).getVal(0)); 172 | if (rightGuard == null) { 173 | return false; 174 | } 175 | 176 | InternalNode rightNode = new InternalNode(keyType, rightGuard.getDataMut()); 177 | if (rightNode.getKeysN() > rightNode.getMinKeysN()) { 178 | Compositekey value = rightNode.getValue(0); 179 | Compositekey key = parent.getKey(index); 180 | pushBack(key, value); 181 | // update the parent 182 | parent.setKey(index, rightNode.getKey(1)); 183 | // delete key(1) and val(0) from right node 184 | rightNode.deleteRespective(1); 185 | // release right node 186 | rightGuard.close(); 187 | return true; 188 | } 189 | // release the right guard 190 | rightGuard.close(); 191 | } 192 | return false; 193 | } 194 | 195 | public boolean merge(String fileName, int index, InternalNode parent, BufferPool bufferPool) 196 | throws Exception { 197 | if (index > 1) { 198 | WriteGuard leftGuard = 199 | bufferPool.getWriteGuard(fileName, parent.getValue(index - 2).getVal(0)); 200 | if (leftGuard != null) { 201 | InternalNode leftNode = new InternalNode(keyType, leftGuard.getDataMut()); 202 | if (leftNode.getKeysN() + getKeysN() <= leftNode.getMaxKeysN()) { // can merge 203 | Compositekey value = getValue(0); 204 | Compositekey key = parent.getKey(index - 1); 205 | leftNode.pushBack(key, value); 206 | for (int i = 1; i < getKeysN(); i++) { 207 | leftNode.pushBack(getKey(i), getValue(i)); 208 | } 209 | // delete the page 210 | bufferPool.deletePage(fileName, getPageId()); 211 | // update the parent 212 | parent.delete(index - 1); 213 | // release the left node lock 214 | leftGuard.close(); 215 | return true; 216 | } 217 | // release the left node lock 218 | leftGuard.close(); 219 | } 220 | } 221 | 222 | // try to merge with the right sibling 223 | if (index < parent.getKeysN()) { 224 | WriteGuard rightGuard = 225 | bufferPool.getWriteGuard(fileName, parent.getValue(index).getVal(0)); 226 | if (rightGuard == null) { 227 | return false; 228 | } 229 | 230 | InternalNode rightNode = new InternalNode(keyType, rightGuard.getDataMut()); 231 | if (rightNode.getKeysN() + getKeysN() <= getMaxKeysN()) { 232 | Compositekey value = rightNode.getValue(0); 233 | Compositekey key = parent.getKey(index); 234 | pushBack(key, value); 235 | for (int i = 1; i < rightNode.getKeysN(); i++) { 236 | pushBack(rightNode.getKey(i), rightNode.getValue(i)); 237 | } 238 | // delete the page 239 | bufferPool.deletePage(fileName, rightNode.getPageId()); 240 | // update the parent 241 | parent.delete(index); 242 | // release the right node lock 243 | rightGuard.close(); 244 | return true; 245 | } 246 | // release the right guard 247 | rightGuard.close(); 248 | } 249 | 250 | return false; 251 | } 252 | 253 | public void pushBack(Compositekey key, Compositekey value) throws InvalidAttributesException { 254 | pushBackKey(key); 255 | pushBackValue(value); 256 | keysN++; 257 | writeHeader(); 258 | } 259 | 260 | public void pushBackKey(Compositekey key) throws InvalidAttributesException { 261 | keys.pushBack(key); 262 | } 263 | 264 | public void pushBackValue(Compositekey value) throws InvalidAttributesException { 265 | values.pushBack(value); 266 | } 267 | 268 | // Getters and Setters 269 | public short getHeaderSize() { 270 | return headerSize; 271 | } 272 | 273 | public short getMaxKeysN() { 274 | return maxKeysN; 275 | } 276 | 277 | public short getMinKeysN() { 278 | return minKeysN; 279 | } 280 | 281 | public short getKeySize() { 282 | return keySize; 283 | } 284 | 285 | public Template getKeyType() { 286 | return keyType; 287 | } 288 | 289 | public CompareableArray getCompositekey() { 290 | return keys; 291 | } 292 | 293 | public Array getValues() { 294 | return values; 295 | } 296 | 297 | public void setCompositekey(CompareableArray keys) { 298 | this.keys = keys; 299 | } 300 | 301 | public void setValues(Array values) { 302 | this.values = values; 303 | } 304 | 305 | public void setBuffer(ByteBuffer buffer) { 306 | this.buffer = buffer; 307 | } 308 | 309 | public ByteBuffer getBuffer() { 310 | return buffer; 311 | } 312 | 313 | public void setBuffer(byte[] rawData) { 314 | this.buffer = ByteBuffer.wrap(rawData); 315 | } 316 | 317 | public void setCompositekeyN(short keysN) { 318 | this.keysN = keysN; 319 | keys.setLength(keysN); 320 | values.setLength(keysN); 321 | writeHeader(); 322 | } 323 | 324 | public void setPageId(long pageId) { 325 | this.pageId = pageId; 326 | writeHeader(); 327 | } 328 | 329 | public void setLeaf(boolean isLeaf) { 330 | this.isLeaf = isLeaf; 331 | writeHeader(); 332 | } 333 | 334 | public Compositekey getKey(int index) throws InvalidAttributesException { 335 | return keys.get(index); 336 | } 337 | 338 | public Compositekey getValue(int index) throws InvalidAttributesException { 339 | return values.get(index); 340 | } 341 | 342 | public void setKey(int index, Compositekey key) { 343 | keys.set(index, key); 344 | } 345 | 346 | public void setValue(int index, Compositekey value) { 347 | values.set(index, value); 348 | } 349 | } 350 | -------------------------------------------------------------------------------- /src/page/LeafNode.java: -------------------------------------------------------------------------------- 1 | package page; 2 | 3 | import bufferpool.BufferPool; 4 | import bufferpool.WriteGuard; 5 | import globals.Globals; 6 | import java.nio.ByteBuffer; 7 | import javax.naming.directory.InvalidAttributesException; 8 | import types.Array; 9 | import types.CompareableArray; 10 | import types.Compositekey; 11 | import types.Template; 12 | 13 | /** 14 | * LeafNode class represents a leaf node in a B+ tree. It extends the TreeNode class and is used to 15 | * store keys and values. The keys are used to get the corresponding values (document in the primary 16 | * index, primary key in the secondary index). The leaf node is the last level of the B+ tree, where 17 | * the actual data is stored in the cluster index. The leaf node contains a pointer to the next leaf 18 | * node, which is used for sequential access. The last pageId points to the next leaf node. | key1 19 | * |key2 |key3 |...|keyN | |value1 |value2 |value3 |...|valueN | 20 | * 21 | *

the header is 2 bytes for the number of keys 1 byte for the type of the node -> 00000001 for 22 | * leaf node , 00000000 for internal node 8 bytes for the pageId 8 bytes for the next leaf node the 23 | * rest is the keys and values The keys are stored in the first part of the page, and the values are 24 | * stored in the second part. | 2bytes | 1byte | 8 bytes | 8 bytes | keys... | values... | | KeysN | 25 | * Type | pageId |nextLeafNode| key1 | key2 | ... | keyN | value1 | value2 | ... | valueN | 26 | */ 27 | public class LeafNode extends TreeNodeHeader { 28 | // The next leaf node in the linked list 29 | private long nextLeafNode; // this is the next 8 bytes of the header 30 | private final short headerSize = 31 | 2 + 1 + 8 + 8; // 2 bytes for keysN, 2 bytes for keySize, 2 bytes for valueSize, 1 32 | // byte for type, 8 bytes for pageId, 8 bytes for nextLeafNode 33 | private final Template keyType; 34 | private final Template valueType; 35 | private final short keySize; 36 | private final short valueSize; 37 | private final short maxKeysN; 38 | private final short minKeysN; 39 | private CompareableArray keys; 40 | private Array values; 41 | 42 | public LeafNode(Template keyType, Template valueType, long pageId) { 43 | super(pageId, true); 44 | this.keyType = keyType; 45 | this.valueType = valueType; 46 | keySize = keyType.getByteSize(); 47 | valueSize = valueType.getByteSize(); 48 | maxKeysN = (short) ((Globals.PAGE_SIZE - headerSize) / (keySize + valueSize)); 49 | minKeysN = (short) (maxKeysN / 2); 50 | nextLeafNode = Globals.INVALID_PAGE_ID; 51 | } 52 | 53 | public LeafNode(Template keyType, Template valueType, ByteBuffer rawData) { 54 | this.keyType = keyType; 55 | this.valueType = valueType; 56 | keySize = keyType.getByteSize(); 57 | valueSize = valueType.getByteSize(); 58 | maxKeysN = (short) ((Globals.PAGE_SIZE - headerSize) / (keySize + valueSize)); 59 | minKeysN = (short) (maxKeysN / 2); 60 | 61 | buffer = rawData; 62 | this.keysN = buffer.getShort(); 63 | this.isLeaf = buffer.get() == 1; 64 | this.pageId = buffer.getLong(); 65 | this.nextLeafNode = buffer.getLong(); 66 | 67 | keys = new CompareableArray(new Compositekey(keyType), rawData, headerSize, keysN, maxKeysN); 68 | values = 69 | new Array( 70 | new Compositekey(valueType), rawData, headerSize + maxKeysN * keySize, keysN, maxKeysN); 71 | } 72 | 73 | public LeafNode(Template keyType, Template valueType, byte[] rawData) { 74 | this(keyType, valueType, ByteBuffer.wrap(rawData)); 75 | } 76 | 77 | public void writeHeader() { 78 | buffer.rewind(); 79 | buffer.putShort(keysN); 80 | buffer.put((byte) (isLeaf ? 1 : 0)); 81 | buffer.putLong(pageId); 82 | buffer.putLong(nextLeafNode); 83 | } 84 | 85 | public int insert(Compositekey key, Compositekey value) throws InvalidAttributesException { 86 | if (keysN >= maxKeysN) { 87 | return 0; // node is full 88 | } 89 | 90 | int index = keys.upperBound(key); 91 | if (index > 0 && getKey(index - 1).compareTo(key) == 0) { 92 | return -1; // key already exists 93 | } 94 | keys.insert(index, key); 95 | values.insert(index, value); 96 | keysN++; 97 | writeHeader(); 98 | return 1; 99 | } 100 | 101 | public Compositekey get(Compositekey key) throws InvalidAttributesException { 102 | int index = keys.binarySearch(key); 103 | if (index == -1) { 104 | return null; // key not found 105 | } else { 106 | return values.get(index); // return the value 107 | } 108 | } 109 | 110 | public void delete(int index) throws InvalidAttributesException { 111 | keys.delete(index); 112 | values.delete(index); 113 | if (keysN > 0) { 114 | keysN--; 115 | writeHeader(); 116 | } 117 | } 118 | 119 | public boolean delete(Compositekey key) throws InvalidAttributesException { 120 | if (keysN == 0) { 121 | return false; 122 | } 123 | int index = keys.binarySearch(key); 124 | if (index != -1) { 125 | delete(index); 126 | } 127 | return keysN >= minKeysN; 128 | } 129 | 130 | public WriteGuard split(BufferPool bufferPool, String fileName) { 131 | if (keysN < maxKeysN) { 132 | return null; // node is not full 133 | } 134 | 135 | try { 136 | // create a new leaf node 137 | long newPageId = bufferPool.allocateNewPage(fileName); 138 | WriteGuard newGuard = bufferPool.getWriteGuard(fileName, newPageId); 139 | if (newGuard == null) { 140 | return null; 141 | } 142 | LeafNode newLeafNode = new LeafNode(keyType, valueType, newGuard.getDataMut()); 143 | newLeafNode.setLeaf(true); 144 | newLeafNode.setPageId(newPageId); 145 | // copy half of the keys and values to the new leaf node 146 | for (int i = minKeysN; i < keysN; i++) { 147 | newLeafNode.pushBackKey(keys.get(i)); 148 | newLeafNode.pushBackValue(values.get(i)); 149 | } 150 | newLeafNode.setKeysN((short) (keysN - minKeysN)); 151 | setKeysN(minKeysN); 152 | // set the next leaf node of the new leaf node 153 | newLeafNode.nextLeafNode = nextLeafNode; 154 | // set the next leaf node of the current leaf node 155 | nextLeafNode = newPageId; 156 | 157 | // write the new leaf node to the buffer pool 158 | newLeafNode.writeHeader(); 159 | writeHeader(); 160 | return newGuard; 161 | } catch (Exception e) { 162 | e.printStackTrace(); 163 | return null; 164 | } 165 | } 166 | 167 | public boolean redistribute( 168 | String fileName, int index, InternalNode parent, BufferPool bufferPool) throws Exception { 169 | if (index > 1) { 170 | WriteGuard leftGuard = 171 | bufferPool.getWriteGuard(fileName, parent.getValue(index - 2).getVal(0)); 172 | if (leftGuard != null) { 173 | LeafNode leftNode = new LeafNode(keyType, valueType, leftGuard.getDataMut()); 174 | if (leftNode.getKeysN() > leftNode.getMinKeysN()) { // can redistribute 175 | // move the last key and value of the left node to the currunt node 176 | insert( 177 | leftNode.getKey(leftNode.getKeysN() - 1), leftNode.getValue(leftNode.getKeysN() - 1)); 178 | leftNode.delete(leftNode.getKeysN() - 1); 179 | // update the parent node 180 | parent.setKey(index - 1, leftNode.getKey(leftNode.getKeysN() - 1)); 181 | // release the left node lock 182 | leftGuard.close(); 183 | return true; 184 | } 185 | // release the left node lock 186 | leftGuard.close(); 187 | } 188 | } 189 | 190 | // try to redistribute with the right sibling 191 | if (index < parent.getKeysN()) { 192 | WriteGuard rightGuard = 193 | bufferPool.getWriteGuard(fileName, parent.getValue(index).getVal(0)); 194 | if (rightGuard == null) { 195 | return false; 196 | } 197 | 198 | LeafNode rightNode = new LeafNode(keyType, valueType, rightGuard.getDataMut()); 199 | if (rightNode.getKeysN() > rightNode.getMinKeysN()) { 200 | // redistribute with the right sibling 201 | // move the first key and value of the right node to the currunt node 202 | insert(rightNode.getKey(0), rightNode.getValue(0)); 203 | rightNode.delete(0); 204 | // update the parent node 205 | parent.setKey(index, getKey(getKeysN() - 1)); 206 | // release right node 207 | rightGuard.close(); 208 | return true; 209 | } 210 | // release the right guard 211 | rightGuard.close(); 212 | } 213 | 214 | return false; 215 | } 216 | 217 | public boolean merge(String fileName, int index, InternalNode parent, BufferPool bufferPool) 218 | throws Exception { 219 | if (index > 1) { 220 | WriteGuard leftGuard = 221 | bufferPool.getWriteGuard(fileName, parent.getValue(index - 2).getVal(0)); 222 | if (leftGuard != null) { 223 | LeafNode leftNode = new LeafNode(keyType, valueType, leftGuard.getDataMut()); 224 | if (leftNode.getKeysN() + getKeysN() <= leftNode.getMaxKeysN()) { // can merge 225 | for (int i = 0; i < getKeysN(); i++) { 226 | leftNode.pushBack(getKey(i), getValue(i)); 227 | } 228 | leftNode.setNextLeafNode(getNextLeafNode()); 229 | // delete the page 230 | bufferPool.deletePage(fileName, getPageId()); 231 | // update the parent 232 | parent.delete(index - 1); 233 | // release the left node lock 234 | leftGuard.close(); 235 | return true; 236 | } 237 | // release the left node lock 238 | leftGuard.close(); 239 | } 240 | } 241 | 242 | // try to merge with the right sibling 243 | if (index < parent.getKeysN()) { 244 | WriteGuard rightGuard = 245 | bufferPool.getWriteGuard(fileName, parent.getValue(index).getVal(0)); 246 | if (rightGuard == null) { 247 | return false; 248 | } 249 | 250 | LeafNode rightNode = new LeafNode(keyType, valueType, rightGuard.getDataMut()); 251 | if (rightNode.getKeysN() + getKeysN() <= getMaxKeysN()) { 252 | for (int i = 0; i < rightNode.getKeysN(); i++) { 253 | pushBack(rightNode.getKey(i), rightNode.getValue(i)); 254 | } 255 | setNextLeafNode(rightNode.getNextLeafNode()); 256 | // delete the page 257 | bufferPool.deletePage(fileName, rightNode.getPageId()); 258 | // update the parent 259 | parent.delete(index); 260 | // release the right node lock 261 | rightGuard.close(); 262 | return true; 263 | } 264 | // release the right guard 265 | rightGuard.close(); 266 | } 267 | 268 | return false; 269 | } 270 | 271 | public void pushBack(Compositekey key, Compositekey value) throws InvalidAttributesException { 272 | pushBackKey(key); 273 | pushBackValue(value); 274 | keysN++; 275 | writeHeader(); 276 | } 277 | 278 | public void pushBackKey(Compositekey key) throws InvalidAttributesException { 279 | keys.pushBack(key); 280 | } 281 | 282 | public void pushBackValue(Compositekey value) throws InvalidAttributesException { 283 | values.pushBack(value); 284 | } 285 | 286 | // Getters and Setters 287 | 288 | public long getNextLeafNode() { 289 | return nextLeafNode; 290 | } 291 | 292 | public short getHeaderSize() { 293 | return headerSize; 294 | } 295 | 296 | public Template getKeyType() { 297 | return keyType; 298 | } 299 | 300 | public Template getValueType() { 301 | return valueType; 302 | } 303 | 304 | public short getKeySize() { 305 | return keySize; 306 | } 307 | 308 | public short getValueSize() { 309 | return valueSize; 310 | } 311 | 312 | public short getMaxKeysN() { 313 | return maxKeysN; 314 | } 315 | 316 | public short getMinKeysN() { 317 | return minKeysN; 318 | } 319 | 320 | public CompareableArray getKeys() { 321 | return keys; 322 | } 323 | 324 | public Array getValues() { 325 | return values; 326 | } 327 | 328 | public Compositekey getKey(int index) throws InvalidAttributesException { 329 | return keys.get(index); 330 | } 331 | 332 | public Compositekey getValue(int index) throws InvalidAttributesException { 333 | return values.get(index); 334 | } 335 | 336 | public void setKeys(CompareableArray keys) { 337 | this.keys = keys; 338 | } 339 | 340 | public void setValues(Array values) { 341 | this.values = values; 342 | } 343 | 344 | public void setKey(int index, Compositekey key) { 345 | keys.set(index, key); 346 | } 347 | 348 | public void setValue(int index, Compositekey value) { 349 | values.set(index, value); 350 | } 351 | 352 | public void setKeysN(short keysN) { 353 | this.keysN = keysN; 354 | keys.setLength(keysN); 355 | values.setLength(keysN); 356 | writeHeader(); 357 | } 358 | 359 | public void setPageId(long pageId) { 360 | this.pageId = pageId; 361 | writeHeader(); 362 | } 363 | 364 | public void setLeaf(boolean isLeaf) { 365 | this.isLeaf = isLeaf; 366 | writeHeader(); 367 | } 368 | 369 | public void setNextLeafNode(long nextLeafNode) { 370 | this.nextLeafNode = nextLeafNode; 371 | writeHeader(); 372 | } 373 | } 374 | -------------------------------------------------------------------------------- /src/test/bufferpool/BufferPoolTest.java: -------------------------------------------------------------------------------- 1 | package test.bufferpool; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import bufferpool.*; 6 | import diskmanager.BasicDiskManager; 7 | import diskmanager.DiskManager; 8 | import java.nio.ByteBuffer; 9 | import java.util.ArrayList; 10 | import java.util.Collections; 11 | import java.util.List; 12 | import org.junit.After; 13 | import org.junit.Before; 14 | import org.junit.Test; 15 | 16 | public class BufferPoolTest { 17 | private static final int bufferPoolSize = 2000; 18 | private static final int kDistance = 3; 19 | private static final String fileName = "test.db"; 20 | private BufferPool bufferPool; 21 | 22 | @Before 23 | public void setUp() { 24 | DiskManager diskManager = new BasicDiskManager(); 25 | bufferPool = new BufferPool(bufferPoolSize, kDistance, diskManager); 26 | } 27 | 28 | @After 29 | public void tearDown() { 30 | try { 31 | bufferPool.close(); 32 | } catch (Exception e) { 33 | fail("Exception should not be thrown: " + e.getMessage()); 34 | } 35 | // delete the test file 36 | java.io.File file = new java.io.File("storage/" + fileName); 37 | if (file.exists()) { 38 | if (!file.delete()) { 39 | fail("Failed to delete test file: " + fileName); 40 | } 41 | } 42 | } 43 | 44 | @Test 45 | public void testBufferPoolInitialization() { 46 | assertNotNull(bufferPool); 47 | } 48 | 49 | @Test 50 | public void testGetPage() { 51 | long pageId = -1; 52 | try { 53 | pageId = bufferPool.allocateNewPage(fileName); 54 | ReadGuard readGuard = bufferPool.getReadGuard(fileName, pageId); 55 | assertNotNull(readGuard); 56 | assertNotNull(readGuard.getData()); 57 | readGuard.close(); 58 | } catch (Exception e) { 59 | fail("Exception should not be thrown: " + e.getMessage()); 60 | } 61 | 62 | try { 63 | WriteGuard writeGuard = bufferPool.getWriteGuard(fileName, pageId); 64 | assertNotNull(writeGuard); 65 | assertNotNull(writeGuard.getData()); 66 | assertNotNull(writeGuard.getDataMut()); 67 | writeGuard.close(); 68 | } catch (Exception e) { 69 | fail("Exception should not be thrown: " + e.getMessage()); 70 | } 71 | } 72 | 73 | @Test 74 | public void testWriteRead() { 75 | long pageId = -1; 76 | byte[] msg = "Hello World".getBytes(); 77 | try { 78 | pageId = bufferPool.allocateNewPage(fileName); 79 | WriteGuard writeGuard = bufferPool.getWriteGuard(fileName, pageId); 80 | byte[] data = writeGuard.getDataMut(); 81 | System.arraycopy(msg, 0, data, 0, msg.length); 82 | writeGuard.close(); 83 | ReadGuard readGuard = bufferPool.getReadGuard(fileName, pageId); 84 | ByteBuffer readData = readGuard.getData(); 85 | byte[] readDatabuffer = new byte[msg.length]; 86 | readData.get(readDatabuffer); 87 | assertArrayEquals(msg, readDatabuffer); 88 | readGuard.close(); 89 | } catch (Exception e) { 90 | fail("Exception should not be thrown: " + e.getMessage()); 91 | } 92 | } 93 | 94 | @Test 95 | public void testDelete() { 96 | long pageId = -1; 97 | byte[] msg = "Hello World".getBytes(); 98 | try { 99 | // allocate a new page 100 | pageId = bufferPool.allocateNewPage(fileName); 101 | // write data to the page 102 | WriteGuard writeGuard = bufferPool.getWriteGuard(fileName, pageId); 103 | byte[] data = writeGuard.getDataMut(); 104 | System.arraycopy(msg, 0, data, 0, msg.length); 105 | writeGuard.close(); 106 | // read data from the page 107 | ReadGuard readGuard = bufferPool.getReadGuard(fileName, pageId); 108 | ByteBuffer readData = readGuard.getData(); 109 | byte[] readDatabuffer = new byte[msg.length]; 110 | readData.get(readDatabuffer); 111 | assertArrayEquals(msg, readDatabuffer); 112 | readGuard.close(); 113 | 114 | // delete the page 115 | bufferPool.deletePage(fileName, pageId); 116 | // try to read the deleted page 117 | try { 118 | readGuard = bufferPool.getReadGuard(fileName, pageId); 119 | fail("Exception should be thrown: Page not found"); 120 | } catch (Exception e) { 121 | assertTrue(e.getMessage().contains("pageId is not valid")); 122 | } 123 | 124 | // try to write to the deleted page 125 | try { 126 | writeGuard = bufferPool.getWriteGuard(fileName, pageId); 127 | fail("Exception should be thrown: Page not found"); 128 | } catch (Exception e) { 129 | assertTrue(e.getMessage().contains("pageId is not valid")); 130 | } 131 | 132 | // allocate a new page 133 | long newPageId = bufferPool.allocateNewPage(fileName); 134 | assertEquals(pageId, newPageId); 135 | // write data to the new page 136 | msg = "Hello World Again".getBytes(); 137 | writeGuard = bufferPool.getWriteGuard(fileName, newPageId); 138 | data = writeGuard.getDataMut(); 139 | System.arraycopy(msg, 0, data, 0, msg.length); 140 | writeGuard.close(); 141 | 142 | // read data from the new page 143 | readGuard = bufferPool.getReadGuard(fileName, newPageId); 144 | readData = readGuard.getData(); 145 | readDatabuffer = new byte[msg.length]; 146 | readData.get(readDatabuffer); 147 | assertArrayEquals(msg, readDatabuffer); 148 | readGuard.close(); 149 | } catch (Exception e) { 150 | fail("Exception should not be thrown: " + e.getMessage()); 151 | } 152 | } 153 | 154 | @Test 155 | public void testEviction() { 156 | int pageCount = 2 * bufferPoolSize; 157 | // allocate pages 158 | for (int i = 0; i < pageCount; i++) { 159 | try { 160 | long pageId = bufferPool.allocateNewPage(fileName); 161 | assertEquals(i, pageId); 162 | } catch (Exception e) { 163 | fail("Exception should not be thrown: " + e.getMessage()); 164 | } 165 | } 166 | 167 | // check if the pages are evicted correctly 168 | for (int i = 0; i < pageCount; i++) { 169 | byte[] msg = ("Hello World " + i + "\0").getBytes(); 170 | try { 171 | WriteGuard writeGuard = bufferPool.getWriteGuard(fileName, i); 172 | byte[] data = writeGuard.getDataMut(); 173 | System.arraycopy(msg, 0, data, 0, msg.length); 174 | writeGuard.close(); 175 | } catch (Exception e) { 176 | fail("Exception should not be thrown: " + e.getMessage()); 177 | } 178 | } 179 | 180 | // check if the pages are evicted correctly 181 | for (int round = 0; round <= kDistance; round++) { 182 | for (int i = 0; i < pageCount; i++) { 183 | String msg = ("Hello World " + i + "\0"); 184 | try { 185 | ReadGuard readGuard = bufferPool.getReadGuard(fileName, i); 186 | ByteBuffer readData = readGuard.getData(); 187 | byte[] readDatabuffer = new byte[readData.remaining()]; 188 | readData.get(readDatabuffer); 189 | String readMsg = new String(readDatabuffer); 190 | assertTrue(readMsg.startsWith(msg)); 191 | assertTrue(readMsg.length() > msg.length()); 192 | // check if the message is correct 193 | assertTrue(readMsg.contains(msg)); 194 | readGuard.close(); 195 | } catch (Exception e) { 196 | fail("Exception should not be thrown: " + e.getMessage()); 197 | } 198 | } 199 | } 200 | } 201 | 202 | class Writer implements Runnable { 203 | private String fileName; 204 | private long pageId; 205 | private byte[] msg; 206 | 207 | public Writer(String fileName, long pageId, byte[] msg) { 208 | this.fileName = fileName; 209 | this.pageId = pageId; 210 | this.msg = msg; 211 | } 212 | 213 | public void run() { 214 | try { 215 | WriteGuard writeGuard = bufferPool.getWriteGuard(fileName, pageId); 216 | while (writeGuard == null) { // ensure we get a write guard so each page is written 217 | writeGuard = bufferPool.getWriteGuard(fileName, pageId); 218 | } 219 | byte[] data = writeGuard.getDataMut(); 220 | System.arraycopy(msg, 0, data, 0, msg.length); 221 | writeGuard.close(); 222 | } catch (Exception e) { 223 | fail("Exception should not be thrown: " + e.getMessage()); 224 | } 225 | } 226 | } 227 | 228 | class Reader implements Runnable { 229 | private String fileName; 230 | private long pageId; 231 | 232 | public Reader(String fileName, long pageId) { 233 | this.fileName = fileName; 234 | this.pageId = pageId; 235 | } 236 | 237 | public void run() { 238 | try { 239 | // wait for a short time to allow the writer thread to finish 240 | ReadGuard readGuard = bufferPool.getReadGuard(fileName, pageId); 241 | while (readGuard == null) { // it is ok to be null just return 242 | readGuard = bufferPool.getReadGuard(fileName, pageId); 243 | } 244 | ByteBuffer readData = readGuard.getData(); 245 | byte[] readDatabuffer = new byte[readData.remaining()]; 246 | readData.get(readDatabuffer); 247 | readGuard.close(); 248 | } catch (Exception e) { 249 | e.printStackTrace(); 250 | fail("Exception should not be thrown: " + e.getMessage()); 251 | } 252 | } 253 | } 254 | 255 | @Test 256 | public void testConcurrency() { 257 | int pageCount = 2 * bufferPoolSize; 258 | // allocate pages 259 | for (int i = 0; i < pageCount; i++) { 260 | try { 261 | long pageId = bufferPool.allocateNewPage(fileName); 262 | assertEquals(i, pageId); 263 | } catch (Exception e) { 264 | fail("Exception should not be thrown: " + e.getMessage()); 265 | } 266 | } 267 | 268 | List threads = new ArrayList<>(); 269 | 270 | for (int i = 0; i < pageCount; i++) { 271 | byte[] msg = ("Hello World " + i + "\0").getBytes(); 272 | // writer thread 273 | Thread curr1 = new Thread(new Writer(fileName, i, msg)); 274 | threads.add(curr1); 275 | Thread curr2 = new Thread(new Reader(fileName, i)); 276 | threads.add(curr2); 277 | } 278 | 279 | // shuffle the threads 280 | Collections.shuffle(threads); 281 | // start the threads 282 | for (Thread thread : threads) { 283 | thread.start(); 284 | } 285 | 286 | // wait for all threads to finish 287 | for (Thread thread : threads) { 288 | try { 289 | thread.join(); 290 | } catch (InterruptedException e) { 291 | fail("Exception should not be thrown: " + e.getMessage()); 292 | } 293 | } 294 | 295 | // check if all pages are written correctly 296 | for (int i = 0; i < pageCount; i++) { 297 | String msg = ("Hello World " + i + "\0"); 298 | try { 299 | ReadGuard readGuard = bufferPool.getReadGuard(fileName, i); 300 | ByteBuffer readData = readGuard.getData(); 301 | byte[] readDatabuffer = new byte[readData.remaining()]; 302 | readData.get(readDatabuffer); 303 | // has prefix "Hello World " + i 304 | String readMsg = new String(readDatabuffer); 305 | assertTrue(readMsg.startsWith(msg)); 306 | assertTrue(readMsg.length() > msg.length()); 307 | // check if the message is correct 308 | assertTrue(readMsg.contains(msg)); 309 | readGuard.close(); 310 | } catch (Exception e) { 311 | fail("Exception should not be thrown: " + e.getMessage()); 312 | } 313 | } 314 | } 315 | 316 | @Test 317 | public void testConcurrencyBig() { 318 | int pageCount = 2 * bufferPoolSize; 319 | // allocate pages 320 | for (int i = 0; i < pageCount; i++) { 321 | try { 322 | long pageId = bufferPool.allocateNewPage(fileName); 323 | assertEquals(i, pageId); 324 | } catch (Exception e) { 325 | fail("Exception should not be thrown: " + e.getMessage()); 326 | } 327 | } 328 | 329 | List threads = new ArrayList<>(); 330 | 331 | for (int i = 0; i < pageCount; i++) { 332 | byte[] msg = ("Hello World " + i + "\0").getBytes(); 333 | // writer thread 334 | Thread curr1 = new Thread(new Writer(fileName, i, msg)); 335 | threads.add(curr1); 336 | Thread curr2 = new Thread(new Reader(fileName, i)); 337 | threads.add(curr2); 338 | curr1 = new Thread(new Writer(fileName, i, msg)); 339 | threads.add(curr1); 340 | curr2 = new Thread(new Reader(fileName, i)); 341 | threads.add(curr2); 342 | } 343 | 344 | // shuffle the threads 345 | Collections.shuffle(threads); 346 | // start the threads 347 | for (Thread thread : threads) { 348 | thread.start(); 349 | } 350 | 351 | // wait for all threads to finish 352 | for (Thread thread : threads) { 353 | try { 354 | thread.join(); 355 | } catch (InterruptedException e) { 356 | fail("Exception should not be thrown: " + e.getMessage()); 357 | } 358 | } 359 | 360 | // check if all pages are written correctly 361 | for (int i = 0; i < pageCount; i++) { 362 | String msg = ("Hello World " + i + "\0"); 363 | try { 364 | ReadGuard readGuard = bufferPool.getReadGuard(fileName, i); 365 | ByteBuffer readData = readGuard.getData(); 366 | byte[] readDatabuffer = new byte[readData.remaining()]; 367 | readData.get(readDatabuffer); 368 | // has prefix "Hello World " + i 369 | String readMsg = new String(readDatabuffer); 370 | assertTrue(readMsg.startsWith(msg)); 371 | assertTrue(readMsg.length() > msg.length()); 372 | // check if the message is correct 373 | assertTrue(readMsg.contains(msg)); 374 | readGuard.close(); 375 | } catch (Exception e) { 376 | fail("Exception should not be thrown: " + e.getMessage()); 377 | } 378 | } 379 | } 380 | } 381 | -------------------------------------------------------------------------------- /src/test/btree/BtreeTest.java: -------------------------------------------------------------------------------- 1 | package test.btree; 2 | 3 | import static org.junit.Assert.*; 4 | 5 | import btree.Btree; 6 | import btree.Cursor; 7 | import bufferpool.*; 8 | import diskmanager.*; 9 | import globals.Globals; 10 | import java.util.ArrayList; 11 | import java.util.Collections; 12 | import java.util.List; 13 | import org.junit.After; 14 | import org.junit.Before; 15 | import org.junit.Test; 16 | import types.Compositekey; 17 | import types.Template; 18 | 19 | public class BtreeTest { 20 | 21 | private Btree btree; 22 | private BufferPool bufferPool; 23 | private DiskManager diskManager; 24 | private static final int MAX_PAGES = 4000; // Example max pages 25 | private static final int K = 10; 26 | private static final String btreeFilePath = "test.btree"; // Example file path for the B-tree 27 | Template keyType; 28 | Template valueType; 29 | 30 | // private static final String logsFilePath = "logs.btree"; // Example file path 31 | // for the B-tree 32 | // private Thread monitor; 33 | 34 | @Before 35 | public void setUp() { 36 | CleanUp(); 37 | // Initialize the DiskManager 38 | diskManager = new BasicDiskManager(); 39 | // Initialize the buffer pool with a size of 10 pages 40 | bufferPool = new BufferPool(MAX_PAGES, K, diskManager); 41 | // Initialize the B-tree 42 | keyType = new Template(Integer.class); 43 | valueType = new Template(Integer.class); 44 | btree = new Btree(keyType, valueType, btreeFilePath, Globals.INVALID_PAGE_ID, bufferPool); 45 | } 46 | 47 | @After 48 | public void CleanUp() { 49 | // monitor.interrupt(); 50 | // delete the test file 51 | java.io.File file = new java.io.File("storage/" + btreeFilePath); 52 | if (file.exists()) { 53 | if (!file.delete()) { 54 | fail("Failed to delete test file: " + btreeFilePath); 55 | } 56 | } 57 | } 58 | 59 | private Compositekey makeCompositekey(int val, Template type) { 60 | Compositekey key = new Compositekey(type); 61 | key.set(0, val, Integer.class); 62 | return key; 63 | } 64 | 65 | @Test 66 | public void testInsertAndSearch() { 67 | double startTime = (double) System.currentTimeMillis(); 68 | int key = 5; 69 | int value = 10; 70 | try { 71 | btree.insert(makeCompositekey(key, keyType), makeCompositekey(value, valueType)); 72 | } catch (Exception e) { 73 | fail("Insert operation failed: " + e.getMessage()); 74 | } 75 | try { 76 | Compositekey result = btree.get(makeCompositekey(key, keyType)); 77 | assertEquals( 78 | "Search operation failed", 0, result.compareTo(makeCompositekey(value, valueType))); 79 | } catch (Exception e) { 80 | fail("Search operation failed: " + e.getMessage()); 81 | } 82 | 83 | double endTime = System.currentTimeMillis(); 84 | double fTime = (endTime - startTime) / (double) 1000; 85 | System.out.println("test testInsertAndSearch done in: " + fTime + "s"); 86 | } 87 | 88 | @Test 89 | public void testInsertAndSearchBigWithoutSplit() { 90 | double startTime = (double) System.currentTimeMillis(); 91 | for (int i = 0; i < 100; i++) { 92 | try { 93 | btree.insert(makeCompositekey(i, keyType), makeCompositekey(i, valueType)); 94 | } catch (Exception e) { 95 | fail("Insert operation failed: " + e.getMessage()); 96 | } 97 | try { 98 | Compositekey result = btree.get(makeCompositekey(i, keyType)); 99 | assertEquals( 100 | "Search operation failed", 0, result.compareTo(makeCompositekey(i, valueType))); 101 | } catch (Exception e) { 102 | fail("Search operation failed: " + e.getMessage()); 103 | } 104 | } 105 | double endTime = System.currentTimeMillis(); 106 | double fTime = (endTime - startTime) / (double) 1000; 107 | System.out.println("test testInsertAndSearchBigWithoutSplit done in: " + fTime + "s"); 108 | } 109 | 110 | @Test 111 | public void testInsertAndSearchBigWithSplit() { 112 | double startTime = (double) System.currentTimeMillis(); 113 | int itr = 1_000_000; 114 | for (int i = 0; i < itr; i++) { 115 | try { 116 | btree.insert(makeCompositekey(i, keyType), makeCompositekey(i, valueType)); 117 | } catch (Exception e) { 118 | System.out.println(i); 119 | e.printStackTrace(); 120 | fail("Insert operation failed: " + e.getMessage()); 121 | } 122 | try { 123 | Compositekey result = btree.get(makeCompositekey(i, keyType)); 124 | assertEquals( 125 | "Search operation failed", 0, result.compareTo(makeCompositekey(i, valueType))); 126 | } catch (Exception e) { 127 | System.out.println(i); 128 | e.printStackTrace(); 129 | fail("Search operation failed: " + e.getMessage()); 130 | } 131 | } 132 | double endTime = System.currentTimeMillis(); 133 | double fTime = (endTime - startTime) / (double) 1000; 134 | System.out.println("test testInsertAndSearchBigWithSplit done in: " + fTime + "s"); 135 | } 136 | 137 | @Test 138 | public void testInsertAndSearchBigRev() { 139 | double startTime = (double) System.currentTimeMillis(); 140 | int itr = 1_000_000; 141 | int[] keys = new int[itr]; 142 | for (int i = 0; i < itr; i++) { 143 | keys[i] = i; 144 | } 145 | for (int i = 0; i < itr; i++) { 146 | try { 147 | btree.insert(makeCompositekey(keys[i], keyType), makeCompositekey(i, valueType)); 148 | } catch (Exception e) { 149 | System.out.println(i); 150 | e.printStackTrace(); 151 | fail("Insert operation failed: " + e.getMessage()); 152 | } 153 | } 154 | 155 | for (int i = 0; i < itr; i++) { 156 | try { 157 | Compositekey result = btree.get(makeCompositekey(keys[i], keyType)); 158 | assertEquals( 159 | "Search operation failed", 0, result.compareTo(makeCompositekey(i, valueType))); 160 | } catch (Exception e) { 161 | System.out.println(i); 162 | e.printStackTrace(); 163 | fail("Insert operation failed: " + e.getMessage()); 164 | } 165 | } 166 | 167 | double endTime = System.currentTimeMillis(); 168 | double fTime = (endTime - startTime) / (double) 1000; 169 | System.out.println("test testInsertAndSearchBigRev done in: " + fTime + "s"); 170 | } 171 | 172 | @Test 173 | public void testInsertAndSearchBigRandom() { 174 | int itr = 1_000_000; 175 | int[] keys = new int[itr]; 176 | for (int i = 0; i < itr; i += 2) { 177 | keys[i] = i; 178 | } 179 | for (int i = 1; i < itr; i += 2) { 180 | keys[i] = itr - i; 181 | } 182 | double startTime = (double) System.currentTimeMillis(); 183 | 184 | for (int i = 0; i < itr; i++) { 185 | try { 186 | btree.insert(makeCompositekey(keys[i], keyType), makeCompositekey(i, valueType)); 187 | } catch (Exception e) { 188 | System.out.println(i); 189 | e.printStackTrace(); 190 | fail("Insert operation failed: " + e.getMessage()); 191 | } 192 | } 193 | double endTime = System.currentTimeMillis(); 194 | double insertTime = (endTime - startTime) / (double) 1000; 195 | System.out.println("insertion done in: " + insertTime + "s"); 196 | 197 | startTime = System.currentTimeMillis(); 198 | for (int i = 0; i < itr; i++) { 199 | try { 200 | Compositekey result = btree.get(makeCompositekey(keys[i], keyType)); 201 | assertEquals( 202 | "Search operation failed", 0, result.compareTo(makeCompositekey(i, valueType))); 203 | } catch (Exception e) { 204 | System.out.println(i); 205 | e.printStackTrace(); 206 | fail("Insert operation failed: " + e.getMessage()); 207 | } 208 | } 209 | endTime = System.currentTimeMillis(); 210 | double readTime = (endTime - startTime) / (double) 1000; 211 | System.out.println("reading done in: " + readTime + "s"); 212 | readTime += insertTime; 213 | System.out.println("test testInsertAndSearchBigRandom done in: " + readTime + "s"); 214 | } 215 | 216 | @Test 217 | public void testInsertAndSearchBigRandomSwap() { 218 | int itr = 1_000_000; 219 | int[] keys = new int[itr]; 220 | for (int i = 0; i < itr; i += 2) { 221 | keys[i] = i; 222 | } 223 | for (int i = 1; i < itr; i += 2) { 224 | keys[i] = itr - i; 225 | } 226 | double startTime = (double) System.currentTimeMillis(); 227 | 228 | for (int i = 0; i < itr; i++) { 229 | try { 230 | btree.insert(makeCompositekey(keys[i], keyType), makeCompositekey(i, valueType)); 231 | } catch (Exception e) { 232 | System.out.println(i); 233 | e.printStackTrace(); 234 | fail("Insert operation failed: " + e.getMessage()); 235 | } 236 | try { 237 | Compositekey result = btree.get(makeCompositekey(keys[i], keyType)); 238 | assertEquals( 239 | "Search operation failed", 0, result.compareTo(makeCompositekey(i, valueType))); 240 | } catch (Exception e) { 241 | System.out.println(i); 242 | e.printStackTrace(); 243 | fail("Insert operation failed: " + e.getMessage()); 244 | } 245 | } 246 | double endTime = System.currentTimeMillis(); 247 | endTime = System.currentTimeMillis(); 248 | double fTime = (endTime - startTime) / (double) 1000; 249 | System.out.println("test testInsertAndSearchBigRandomSwap done in: " + fTime + "s"); 250 | } 251 | 252 | @Test 253 | public void testCursor() throws Exception { 254 | int itr = 1_000_000; 255 | double startTime = (double) System.currentTimeMillis(); 256 | 257 | for (int i = 0; i < itr; i++) { 258 | try { 259 | btree.insert(makeCompositekey(i, keyType), makeCompositekey(i, valueType)); 260 | } catch (Exception e) { 261 | System.out.println(i); 262 | e.printStackTrace(); 263 | fail("Insert operation failed: " + e.getMessage()); 264 | } 265 | } 266 | 267 | int i = 0; 268 | try { 269 | for (Cursor cursor = btree.begin(); !cursor.isEnd(); cursor.next()) { 270 | Cursor.Pair curr = cursor.get(); 271 | assertEquals( 272 | "Search operation failed", 0, curr.first.compareTo(makeCompositekey(i, keyType))); 273 | assertEquals( 274 | "Search operation failed", 0, curr.second.compareTo(makeCompositekey(i, valueType))); 275 | i++; 276 | } 277 | 278 | double endTime = System.currentTimeMillis(); 279 | endTime = System.currentTimeMillis(); 280 | double fTime = (endTime - startTime) / (double) 1000; 281 | System.out.println("test testCursur done in: " + fTime + "s"); 282 | } catch (Exception e) { 283 | e.printStackTrace(); 284 | fail(); 285 | } 286 | } 287 | 288 | @Test 289 | public void testConcurrency() throws Exception { 290 | int itrs = 5; 291 | for (int itr = 1; itr <= itrs; itr++) { 292 | setUp(); 293 | double startTime = (double) System.currentTimeMillis(); 294 | 295 | int writersCnt = 100; 296 | int readersCnt = 100; 297 | List threads = new ArrayList<>(); 298 | int op = 10000; 299 | for (int i = 0; i < writersCnt; i++) { 300 | final int end = op * i; 301 | Thread writer = 302 | new Thread( 303 | () -> { 304 | for (int key = end - op; key < end; key++) { 305 | try { 306 | btree.insert( 307 | makeCompositekey(key, keyType), makeCompositekey(key, valueType)); 308 | } catch (Exception e) { 309 | e.printStackTrace(); 310 | fail(); 311 | } 312 | } 313 | }); 314 | threads.add(writer); 315 | } 316 | 317 | for (Thread thread : threads) { 318 | thread.start(); 319 | } 320 | 321 | for (Thread thread : threads) { 322 | thread.join(); 323 | } 324 | threads = new ArrayList<>(); 325 | 326 | for (int i = 0; i < readersCnt; i++) { 327 | final int end = op * i; 328 | Thread reader = 329 | new Thread( 330 | () -> { 331 | for (int key = end - op; key < end; key++) { 332 | try { 333 | Compositekey result = btree.get(makeCompositekey(key, keyType)); 334 | assertEquals( 335 | "Search operation failed", 336 | 0, 337 | result.compareTo(makeCompositekey(key, valueType))); 338 | } catch (Exception e) { 339 | System.out.println("thread " + end / op + ": expected ->" + key); 340 | e.printStackTrace(); 341 | fail(); 342 | } 343 | } 344 | }); 345 | threads.add(reader); 346 | } 347 | 348 | for (Thread thread : threads) { 349 | thread.start(); 350 | } 351 | 352 | for (Thread thread : threads) { 353 | thread.join(); 354 | } 355 | 356 | double endTime = System.currentTimeMillis(); 357 | endTime = System.currentTimeMillis(); 358 | double fTime = (endTime - startTime) / (double) 1000; 359 | System.out.println("test testConcurrency done itr " + itr + " : " + fTime + "s"); 360 | } 361 | } 362 | 363 | @Test 364 | public void testConcurrencyRand() throws Exception { 365 | int itrs = 1; 366 | for (int itr = 1; itr <= itrs; itr++) { 367 | setUp(); 368 | double startTime = (double) System.currentTimeMillis(); 369 | 370 | int writersCnt = 200; 371 | int readersCnt = 200; 372 | List threads = new ArrayList<>(); 373 | int op = 10000; 374 | for (int i = 0; i < writersCnt; i++) { 375 | final int end = op * i; 376 | Thread writer = 377 | new Thread( 378 | () -> { 379 | for (int key = end - op; key < end; key++) { 380 | try { 381 | btree.insert( 382 | makeCompositekey(key, keyType), makeCompositekey(key, valueType)); 383 | } catch (Exception e) { 384 | e.printStackTrace(); 385 | fail(); 386 | } 387 | } 388 | }); 389 | threads.add(writer); 390 | } 391 | 392 | Collections.shuffle(threads); 393 | for (Thread thread : threads) { 394 | thread.start(); 395 | } 396 | 397 | for (Thread thread : threads) { 398 | thread.join(); 399 | } 400 | threads = new ArrayList<>(); 401 | 402 | for (int i = 0; i < readersCnt; i++) { 403 | final int end = op * i; 404 | Thread reader = 405 | new Thread( 406 | () -> { 407 | for (int key = end - op; key < end; key++) { 408 | try { 409 | Compositekey result = btree.get(makeCompositekey(key, keyType)); 410 | assertEquals( 411 | "Search operation failed", 412 | 0, 413 | result.compareTo(makeCompositekey(key, valueType))); 414 | } catch (Exception e) { 415 | System.out.println("thread " + end / op + ": expected ->" + key); 416 | e.printStackTrace(); 417 | fail(); 418 | } 419 | } 420 | }); 421 | threads.add(reader); 422 | } 423 | 424 | Collections.shuffle(threads); 425 | for (Thread thread : threads) { 426 | thread.start(); 427 | } 428 | 429 | for (Thread thread : threads) { 430 | thread.join(); 431 | } 432 | 433 | double endTime = System.currentTimeMillis(); 434 | endTime = System.currentTimeMillis(); 435 | double fTime = (endTime - startTime) / (double) 1000; 436 | System.out.println("test testConcurrencyRand done itr " + itr + " : " + fTime + "s"); 437 | } 438 | } 439 | 440 | // Delete Tests X( 441 | 442 | @Test 443 | public void testDeleteBasic() throws Exception { 444 | int itr = 1_000_000; 445 | double startTime = (double) System.currentTimeMillis(); 446 | 447 | for (int i = 0; i < itr; i++) { 448 | try { 449 | btree.insert(makeCompositekey(i, keyType), makeCompositekey(i, valueType)); 450 | } catch (Exception e) { 451 | System.out.println(i); 452 | e.printStackTrace(); 453 | fail("Insert operation failed: " + e.getMessage()); 454 | } 455 | } 456 | 457 | for (int start = 0; start < itr; start += 2000) { 458 | for (int i = start; i < start + 1000; i++) { 459 | try { 460 | boolean deleted = btree.delete(makeCompositekey(i, keyType)); 461 | assertEquals(true, deleted); 462 | } catch (Exception e) { 463 | System.out.println(i); 464 | e.printStackTrace(); 465 | fail("Insert operation failed: " + e.getMessage()); 466 | } 467 | } 468 | } 469 | 470 | for (int start = 1000; start < itr; start += 2000) { 471 | for (int i = start; i < start + 1000; i++) { 472 | try { 473 | Compositekey result = btree.get(makeCompositekey(i, keyType)); 474 | assertNotNull(result); 475 | assertEquals( 476 | "Search operation failed", 0, result.compareTo(makeCompositekey(i, valueType))); 477 | } catch (Exception e) { 478 | System.out.println("expected -> " + i + "\n"); 479 | e.printStackTrace(); 480 | fail(); 481 | } 482 | } 483 | } 484 | 485 | try { 486 | Cursor cursor = btree.begin(); 487 | for (int start = 1000; start < itr && !cursor.isEnd(); start += 2000) { 488 | for (int i = start; i < start + 1000; i++, cursor.next()) { 489 | Cursor.Pair curr = cursor.get(); 490 | assertEquals( 491 | "Search operation failed", 0, curr.first.compareTo(makeCompositekey(i, keyType))); 492 | assertEquals( 493 | "Search operation failed", 0, curr.second.compareTo(makeCompositekey(i, valueType))); 494 | } 495 | } 496 | 497 | double endTime = System.currentTimeMillis(); 498 | endTime = System.currentTimeMillis(); 499 | double fTime = (endTime - startTime) / (double) 1000; 500 | System.out.println("test testCursur done in: " + fTime + "s"); 501 | } catch (Exception e) { 502 | e.printStackTrace(); 503 | fail(); 504 | } 505 | } 506 | 507 | @Test 508 | public void testConcurrencyAllOut() throws Exception { 509 | int itrs = 5; 510 | for (int itr = 1; itr <= itrs; itr++) { 511 | setUp(); 512 | double startTime = (double) System.currentTimeMillis(); 513 | 514 | int writersCnt = 200; 515 | int DeleterCnt = 100; 516 | int readersCnt = 0; 517 | List threads = new ArrayList<>(); 518 | int op = 10000; 519 | for (int i = 0; i < writersCnt; i++) { 520 | final int end = op * (i + 1); 521 | Thread writer = 522 | new Thread( 523 | () -> { 524 | for (int key = end - op; key < end; key++) { 525 | try { 526 | btree.insert( 527 | makeCompositekey(key, keyType), makeCompositekey(key, valueType)); 528 | } catch (Exception e) { 529 | e.printStackTrace(); 530 | fail(); 531 | } 532 | } 533 | }); 534 | threads.add(writer); 535 | } 536 | 537 | for (int i = 0; i < DeleterCnt; i += 2) { 538 | final int end = op * i; 539 | Thread writer = 540 | new Thread( 541 | () -> { 542 | for (int key = end - op; key < end; key++) { 543 | try { 544 | btree.delete(makeCompositekey(key, keyType)); 545 | } catch (Exception e) { 546 | e.printStackTrace(); 547 | fail(); 548 | } 549 | } 550 | }); 551 | threads.add(writer); 552 | } 553 | 554 | Collections.shuffle(threads); 555 | for (Thread thread : threads) { 556 | thread.start(); 557 | } 558 | 559 | for (Thread thread : threads) { 560 | thread.join(); 561 | } 562 | double endTime = System.currentTimeMillis(); 563 | endTime = System.currentTimeMillis(); 564 | double fTime = (endTime - startTime) / (double) 1000; 565 | System.out.println("test testConcurrencyRand done itr " + itr + " : " + fTime + "s"); 566 | } 567 | } 568 | } 569 | -------------------------------------------------------------------------------- /src/btree/Btree.java: -------------------------------------------------------------------------------- 1 | package btree; 2 | 3 | import bufferpool.BufferPool; 4 | import bufferpool.ReadGuard; 5 | import bufferpool.WriteGuard; 6 | import globals.Globals; 7 | import indexmanager.Index; 8 | 9 | import java.nio.ByteBuffer; 10 | import java.util.ArrayDeque; 11 | import java.util.Deque; 12 | import page.InternalNode; 13 | import page.LeafNode; 14 | import types.Compositekey; 15 | import types.Template; 16 | 17 | public class Btree implements Index { 18 | private final String fileName; 19 | private final Template keyType; 20 | private final Template valueType; 21 | 22 | private long headerPageId; 23 | private BufferPool bufferPool; 24 | 25 | public Btree( 26 | Template keyType, 27 | Template valueType, 28 | String fileName, 29 | long headerPageId, 30 | BufferPool bufferPool) { 31 | this.fileName = fileName; 32 | this.keyType = keyType; 33 | this.valueType = valueType; 34 | this.headerPageId = headerPageId; 35 | this.bufferPool = bufferPool; 36 | 37 | // read the header page 38 | if (headerPageId == Globals.INVALID_PAGE_ID) { 39 | // create a new header page 40 | try { 41 | this.headerPageId = bufferPool.allocateNewPage(fileName); 42 | WriteGuard guard = bufferPool.getWriteGuard(fileName, this.headerPageId); 43 | BtreeHeader header = new BtreeHeader(guard.getDataMut()); 44 | header.setRootPageId(Globals.INVALID_PAGE_ID); 45 | header.setHeight((short) 0); 46 | guard.close(); 47 | } catch (Exception e) { 48 | throw new RuntimeException("Error creating header page", e); 49 | } 50 | } 51 | } 52 | 53 | public Compositekey get(Compositekey key) throws Exception { 54 | out: 55 | while (true) { 56 | Context ctx = new Context(); 57 | ReadGuard guard = bufferPool.getReadGuard(fileName, headerPageId); 58 | if (guard == null) { 59 | Thread.sleep(10); 60 | continue out; 61 | } 62 | BtreeHeader header = new BtreeHeader(guard.getData()); 63 | if (header.getRootPageId() == Globals.INVALID_PAGE_ID) { 64 | guard.close(); 65 | return null; // the tree is empty 66 | } 67 | ctx.setHeaderReadGuard(guard); 68 | long rootPageId = header.getRootPageId(); 69 | long currentPageId = rootPageId; 70 | int lvl = 1; 71 | while (true) { 72 | ReadGuard currentGuard = bufferPool.getReadGuard(fileName, currentPageId); 73 | if (currentGuard == null) { 74 | ctx.release(); 75 | Thread.sleep(10); 76 | continue out; 77 | } 78 | ctx.release(); 79 | ctx.addReadGuard(currentGuard); 80 | if (lvl == header.getHeight()) { // we are at the leaf node level 81 | LeafNode currentNode = new LeafNode(keyType, valueType, currentGuard.getData()); 82 | Compositekey value = currentNode.get(key); 83 | ctx.release(); 84 | return value; 85 | } 86 | // if we are not at the leaf node level, we need to find the child node 87 | InternalNode currentNode = new InternalNode(keyType, currentGuard.getData()); 88 | Compositekey childPageId = currentNode.getChildForKey(key); 89 | currentPageId = childPageId.getVal(0); 90 | lvl++; 91 | } 92 | } 93 | } 94 | 95 | public boolean insert(Compositekey key, Compositekey value) throws Exception { 96 | out: 97 | while (true) { 98 | int tryInsertOpt = optimisticInsert(key, value); 99 | if (tryInsertOpt != 0) { 100 | return (tryInsertOpt == 1); 101 | } 102 | // check if the B+ tree is empty 103 | Context ctx = new Context(); 104 | WriteGuard guard = bufferPool.getWriteGuard(fileName, headerPageId); 105 | if (guard == null) { 106 | Thread.sleep(10); 107 | continue; 108 | } 109 | BtreeHeader header = new BtreeHeader(guard.getDataMut()); 110 | if (header.getRootPageId() == Globals.INVALID_PAGE_ID) { 111 | // create a new B+ tree 112 | long newRootPageId = bufferPool.allocateNewPage(fileName); 113 | WriteGuard newRootGuard = bufferPool.getWriteGuard(fileName, newRootPageId); 114 | if (newRootGuard == null) { 115 | guard.close(); 116 | Thread.sleep(10); 117 | continue; 118 | } 119 | LeafNode newRoot = new LeafNode(keyType, valueType, newRootGuard.getDataMut()); 120 | newRoot.setLeaf(true); 121 | newRoot.setNextLeafNode(Globals.INVALID_PAGE_ID); 122 | newRoot.setPageId(newRootPageId); 123 | 124 | header.setRootPageId(newRootPageId); 125 | header.setHeight((short) 1); 126 | 127 | newRoot.insert(key, value); 128 | newRootGuard.close(); 129 | guard.close(); 130 | return true; 131 | } 132 | ctx.setHeaderWriteGuard(guard); 133 | // get the root page id 134 | long rootPageId = header.getRootPageId(); 135 | // get the root node 136 | long currentPageId = rootPageId; 137 | int lvl = 1; 138 | while (true) { 139 | WriteGuard currentGuard = bufferPool.getWriteGuard(fileName, currentPageId); 140 | if (currentGuard == null) { 141 | ctx.release(); 142 | Thread.sleep(10); 143 | continue out; 144 | } 145 | if (lvl == header.getHeight()) { // we are at the leaf node level 146 | ctx.addWriteGuard(currentGuard); 147 | LeafNode currentNode = new LeafNode(keyType, valueType, currentGuard.getDataMut()); 148 | if (currentNode.insert(key, value) 149 | != 0) { // if there is space in the node insert and we are done 150 | // we need to check if the node is full we can redistribute only if we are not 151 | // at the root 152 | if (currentNode.getKeysN() < currentNode.getMaxKeysN() || lvl == 1) { 153 | ctx.release(); 154 | return true; 155 | } 156 | // we need to redistribute 157 | insertRedistribute(ctx); 158 | ctx.release(); 159 | return true; 160 | } 161 | break; // if there is no space in the node we need to split 162 | } 163 | // if we are not at the leaf node level, we need to find the child node 164 | InternalNode currentNode = new InternalNode(keyType, currentGuard.getDataMut()); 165 | // relase the locks over the above nodes since we are not going to split farther 166 | // than this 167 | if (currentNode.getKeysN() < currentNode.getMaxKeysN()) { 168 | ctx.release(); 169 | } 170 | ctx.addWriteGuard(currentGuard); 171 | Compositekey childPageId = currentNode.getChildForKey(key); 172 | currentPageId = childPageId.getVal(0); 173 | lvl++; 174 | } 175 | 176 | // we are at the leaf node level and we need to split the node and propagate the 177 | // split up 178 | // get the leaf node 179 | WriteGuard currentGuard = ctx.popFrontWrite(); 180 | LeafNode currentNode = new LeafNode(keyType, valueType, currentGuard.getDataMut()); 181 | WriteGuard newNodeguard = currentNode.split(bufferPool, fileName); 182 | if (newNodeguard == null) { 183 | currentGuard.close(); 184 | ctx.release(); 185 | Thread.sleep(10); 186 | continue; 187 | } 188 | LeafNode newNode = new LeafNode(keyType, valueType, newNodeguard.getDataMut()); 189 | // insert the key into the correct node 190 | if (key.compareTo(currentNode.getKey(currentNode.getKeysN() - 1)) <= 0) { 191 | currentNode.insert(key, value); 192 | } else { 193 | newNode.insert(key, value); 194 | } 195 | // check if the split node was the root 196 | if (currentNode.getPageId() == header.getRootPageId()) { 197 | // create a new root node 198 | long newRootPageId = bufferPool.allocateNewPage(fileName); 199 | WriteGuard newRootGuard = bufferPool.getWriteGuard(fileName, newRootPageId); 200 | InternalNode newRoot = new InternalNode(keyType, newRootGuard.getDataMut()); 201 | newRoot.setLeaf(false); 202 | newRoot.setPageId(newRootPageId); 203 | // set the two child nodes 204 | newRoot.setValue(0, makeCompositekeyValue(currentNode.getPageId())); 205 | newRoot.setValue(1, makeCompositekeyValue(newNode.getPageId())); 206 | newRoot.setKey(1, currentNode.getKey(currentNode.getKeysN() - 1)); 207 | newRoot.setCompositekeyN((short) 2); 208 | 209 | // update the header 210 | header.setRootPageId(newRootPageId); 211 | header.setHeight((short) (header.getHeight() + 1)); 212 | 213 | newNodeguard.close(); 214 | currentGuard.close(); 215 | newRootGuard.close(); 216 | ctx.release(); 217 | return true; 218 | } 219 | 220 | // we update the parent node to point to the new node rather than the old node 221 | // get the parent node 222 | WriteGuard parentGuard = ctx.peekFrontWrite(); 223 | InternalNode parentNode = new InternalNode(keyType, parentGuard.getDataMut()); 224 | int index = parentNode.getKeyIdx(key); 225 | // update the child node 226 | parentNode.setValue(index - 1, makeCompositekeyValue(newNode.getPageId())); 227 | 228 | key = currentNode.getKey(currentNode.getKeysN() - 1); 229 | long propagatePageId = currentNode.getPageId(); 230 | newNodeguard.close(); 231 | currentGuard.close(); 232 | // we need to propagate the split up 233 | while (!ctx.writeGuardIsEmpty()) { 234 | WriteGuard currentInternalGuard = ctx.popFrontWrite(); 235 | InternalNode current = new InternalNode(keyType, currentInternalGuard.getDataMut()); 236 | // check if the parent node is full 237 | if (current.getKeysN() < current.getMaxKeysN()) { 238 | // insert the new key and child node 239 | current.insert(key, makeCompositekeyValue(propagatePageId)); 240 | currentInternalGuard.close(); 241 | break; 242 | } 243 | // if the parent node is full we need to split it 244 | // create a new node 245 | WriteGuard newInternalNodeGuard = current.split(bufferPool, fileName); 246 | if (newInternalNodeGuard == null) { 247 | currentInternalGuard.close(); 248 | break; // the node was not split 249 | } 250 | InternalNode newInternalNode = new InternalNode(keyType, newInternalNodeGuard.getDataMut()); 251 | // insert the key value 252 | if (key.compareTo(current.getKey(current.getKeysN() - 1)) <= 0) { 253 | current.insert(key, makeCompositekeyValue(propagatePageId)); 254 | } else { 255 | newInternalNode.insert(key, makeCompositekeyValue(propagatePageId)); 256 | } 257 | // check if the split node was the root 258 | if (current.getPageId() == header.getRootPageId()) { 259 | // create a new root node 260 | 261 | long newRootPageId = bufferPool.allocateNewPage(fileName); 262 | 263 | WriteGuard newRootGuard = bufferPool.getWriteGuard(fileName, newRootPageId); 264 | 265 | InternalNode newRoot = new InternalNode(keyType, newRootGuard.getDataMut()); 266 | 267 | newRoot.setLeaf(false); 268 | newRoot.setPageId(newRootPageId); 269 | 270 | // set the two child nodes 271 | newRoot.setValue(0, makeCompositekeyValue(current.getPageId())); 272 | newRoot.setValue(1, makeCompositekeyValue(newInternalNode.getPageId())); 273 | newRoot.setKey(1, newInternalNode.getKey(0)); 274 | newRoot.setCompositekeyN((short) 2); 275 | 276 | // update the header 277 | header.setRootPageId(newRootPageId); 278 | header.setHeight((short) (header.getHeight() + 1)); 279 | currentInternalGuard.close(); 280 | newInternalNodeGuard.close(); 281 | newRootGuard.close(); 282 | break; 283 | } 284 | 285 | // update the parent node to point to the new node rather than the old node 286 | // get the parent node 287 | parentGuard = ctx.peekFrontWrite(); 288 | InternalNode parent = new InternalNode(keyType, parentGuard.getDataMut()); 289 | index = parent.getKeyIdx(key); 290 | parent.setValue(index - 1, makeCompositekeyValue(newInternalNode.getPageId())); 291 | 292 | key = newInternalNode.getKey(0); 293 | propagatePageId = current.getPageId(); 294 | currentInternalGuard.close(); 295 | newInternalNodeGuard.close(); 296 | } 297 | 298 | // release the locks 299 | ctx.release(); 300 | return true; 301 | } 302 | } 303 | 304 | private int optimisticInsert(Compositekey key, Compositekey value) throws Exception { 305 | Context ctx = new Context(); 306 | ReadGuard guard = bufferPool.getReadGuard(fileName, headerPageId); 307 | if (guard == null) { 308 | return 0; 309 | } 310 | BtreeHeader header = new BtreeHeader(guard.getData()); 311 | if (header.getRootPageId() == Globals.INVALID_PAGE_ID) { 312 | guard.close(); 313 | return 0; // the tree is empty 314 | } 315 | 316 | long rootPageId = header.getRootPageId(); 317 | long currentPageId = rootPageId; 318 | int lvl = 1; 319 | ctx.addReadGuard(guard); 320 | while (true) { 321 | if (lvl == header.getHeight()) { // we are at the leaf node level 322 | WriteGuard currentGuard = bufferPool.getWriteGuard(fileName, currentPageId); 323 | if (currentGuard == null) { 324 | ctx.release(); 325 | return 0; 326 | } 327 | ctx.addWriteGuard(currentGuard); 328 | LeafNode currentNode = new LeafNode(keyType, valueType, currentGuard.getDataMut()); 329 | int tryInsert = currentNode.insert(key, value); 330 | if (tryInsert != 0 && currentNode.getKeysN() == currentNode.getMaxKeysN() && lvl != 1) { 331 | // try to redistribute 332 | insertRedistribute(ctx); 333 | ctx.release(); 334 | return tryInsert; 335 | } 336 | ctx.release(); 337 | return tryInsert; 338 | } 339 | InternalNode currentNode; 340 | if (lvl == header.getHeight() - 1) { 341 | WriteGuard currentGuard = bufferPool.getWriteGuard(fileName, currentPageId); 342 | if (currentGuard == null) { 343 | ctx.release(); 344 | return 0; 345 | } 346 | ctx.release(); 347 | ctx.addWriteGuard(currentGuard); 348 | // if we are not at the leaf node level, we need to find the child node 349 | currentNode = new InternalNode(keyType, currentGuard.getData()); 350 | } else { 351 | ReadGuard currentGuard = bufferPool.getReadGuard(fileName, currentPageId); 352 | if (currentGuard == null) { 353 | ctx.release(); 354 | return 0; 355 | } 356 | ctx.release(); 357 | ctx.addReadGuard(currentGuard); 358 | // if we are not at the leaf node level, we need to find the child node 359 | currentNode = new InternalNode(keyType, currentGuard.getData()); 360 | } 361 | Compositekey childPageId = currentNode.getChildForKey(key); 362 | currentPageId = childPageId.getVal(0); 363 | lvl++; 364 | } 365 | } 366 | 367 | private Compositekey makeCompositekeyForInternal(long pageId) { 368 | Compositekey key = new Compositekey(valueType); 369 | key.set(0, pageId, Long.class); 370 | return key; 371 | } 372 | 373 | private void insertRedistribute(Context ctx) throws Exception { 374 | // we have to make sure that there is two nodes in the ctx 375 | // a leaf node and an internal node (the parent of the leaf node) 376 | if (ctx.writeGuardIsEmpty()) { 377 | return; 378 | } 379 | 380 | WriteGuard leafGuard = ctx.popFrontWrite(); 381 | if (ctx.writeGuardIsEmpty()) { 382 | leafGuard.close(); 383 | return; 384 | } 385 | WriteGuard parentGuard = ctx.popFrontWrite(); 386 | LeafNode leafNode = new LeafNode(keyType, valueType, leafGuard.getDataMut()); 387 | InternalNode parentNode = new InternalNode(keyType, parentGuard.getDataMut()); 388 | 389 | int index = parentNode.getKeyIdx(leafNode.getKey(leafNode.getKeysN() - 1)); 390 | // try to redistribute with the left sibling 391 | if (index > 1) { 392 | WriteGuard leftGuard = 393 | bufferPool.getWriteGuard(fileName, parentNode.getValue(index - 2).getVal(0)); 394 | if (leftGuard != null) { 395 | LeafNode leftNode = new LeafNode(keyType, valueType, leftGuard.getDataMut()); 396 | if (leftNode.getKeysN() < leftNode.getMaxKeysN()) { 397 | // redistribute with the left sibling 398 | // move the first key and value of the leaf node to the left node 399 | leftNode.insert(leafNode.getKey(0), leafNode.getValue(0)); 400 | leafNode.delete(0); 401 | // update the parent node 402 | parentNode.setKey(index - 1, leftNode.getKey(leftNode.getKeysN() - 1)); 403 | // release the locks 404 | leftGuard.close(); 405 | leafGuard.close(); 406 | parentGuard.close(); 407 | return; 408 | } 409 | // release the left guard 410 | leftGuard.close(); 411 | } 412 | } 413 | 414 | // try to redistribute with the right sibling 415 | if (index < parentNode.getKeysN()) { 416 | WriteGuard rightGuard = 417 | bufferPool.getWriteGuard(fileName, parentNode.getValue(index).getVal(0)); 418 | if (rightGuard == null) { 419 | leafGuard.close(); 420 | parentGuard.close(); 421 | return; 422 | } 423 | 424 | LeafNode rightNode = new LeafNode(keyType, valueType, rightGuard.getDataMut()); 425 | if (rightNode.getKeysN() < rightNode.getMaxKeysN()) { 426 | // redistribute with the right sibling 427 | // move the last key and value of the leaf node to the right node 428 | rightNode.insert( 429 | leafNode.getKey(leafNode.getKeysN() - 1), leafNode.getValue(leafNode.getKeysN() - 1)); 430 | leafNode.delete(leafNode.getKeysN() - 1); 431 | // update the parent node 432 | parentNode.setKey(index, leafNode.getKey(leafNode.getKeysN() - 1)); 433 | // release the locks 434 | rightGuard.close(); 435 | leafGuard.close(); 436 | parentGuard.close(); 437 | return; 438 | } 439 | // release the right guard 440 | rightGuard.close(); 441 | } 442 | 443 | // could not redistribute 444 | // release the locks 445 | leafGuard.close(); 446 | parentGuard.close(); 447 | } 448 | 449 | public boolean delete(Compositekey key) throws Exception { 450 | out: 451 | while (true) { 452 | // todo: do optimistic deletes 453 | // check if the B+ tree is empty 454 | Context ctx = new Context(); 455 | WriteGuard guard = bufferPool.getWriteGuard(fileName, headerPageId); 456 | if (guard == null) { 457 | Thread.sleep(10); 458 | continue; 459 | } 460 | BtreeHeader header = new BtreeHeader(guard.getDataMut()); 461 | if (header.getRootPageId() == Globals.INVALID_PAGE_ID) { 462 | // tree is empty 463 | guard.close(); 464 | return true; 465 | } 466 | ctx.setHeaderWriteGuard(guard); 467 | // get the root page id 468 | long rootPageId = header.getRootPageId(); 469 | // get the root node 470 | long currentPageId = rootPageId; 471 | int lvl = 1; 472 | while (true) { 473 | WriteGuard currentGuard = bufferPool.getWriteGuard(fileName, currentPageId); 474 | if (currentGuard == null) { 475 | ctx.release(); 476 | Thread.sleep(10); 477 | continue out; 478 | } 479 | if (lvl == header.getHeight()) { // we are at the leaf node level 480 | ctx.addWriteGuard(currentGuard); 481 | LeafNode currentNode = new LeafNode(keyType, valueType, currentGuard.getDataMut()); 482 | if (currentNode.delete(key) || lvl == 1) { 483 | ctx.release(); 484 | return true; 485 | } 486 | break; 487 | } 488 | // if we are not at the leaf node level, we need to find the child node 489 | InternalNode currentNode = new InternalNode(keyType, currentGuard.getDataMut()); 490 | // relase the locks over the above nodes since we are not going to split farther 491 | // than this 492 | if (currentNode.getKeysN() > currentNode.getMinKeysN()) { 493 | ctx.release(); 494 | } 495 | ctx.addWriteGuard(currentGuard); 496 | Compositekey childPageId = currentNode.getChildForKey(key); 497 | currentPageId = childPageId.getVal(0); 498 | lvl++; 499 | } 500 | 501 | // first we redistribute with the left and right if we can not we start merging 502 | WriteGuard leafGuard = ctx.popFrontWrite(); 503 | WriteGuard parentGuard = ctx.peekFrontWrite(); 504 | LeafNode leafNode = new LeafNode(keyType, valueType, leafGuard.getDataMut()); 505 | InternalNode parentNode = new InternalNode(keyType, parentGuard.getDataMut()); 506 | // Store the key before closing the guard 507 | Compositekey lastKey = leafNode.getKey(leafNode.getKeysN() - 1); 508 | // try redistribute with the left sibiling 509 | int index = parentNode.getKeyIdx(lastKey); 510 | while (!leafNode.redistribute(fileName, index, parentNode, bufferPool) 511 | && !leafNode.merge(fileName, index, parentNode, bufferPool)) { 512 | // try until success 513 | Thread.sleep(10); 514 | continue; 515 | } 516 | 517 | leafGuard.close(); 518 | 519 | // propagete redistribute or merge up the tree 520 | while (!ctx.writeGuardIsEmpty()) { 521 | WriteGuard currentInternalGuard = ctx.popFrontWrite(); 522 | InternalNode current = new InternalNode(keyType, currentInternalGuard.getDataMut()); 523 | if (current.getPageId() == header.getRootPageId()) { 524 | if (current.getKeysN() == 1) { // update the root 525 | // update the header 526 | long newRootPageId = current.getValue(0).getVal(0); 527 | header.setRootPageId(newRootPageId); 528 | header.setHeight((short) (header.getHeight() - 1)); 529 | 530 | // delete the old root 531 | bufferPool.deletePage(fileName, current.getPageId()); 532 | } 533 | currentInternalGuard.close(); 534 | break; 535 | } 536 | 537 | if (current.getKeysN() >= current.getMinKeysN()) { 538 | currentInternalGuard.close(); 539 | break; 540 | } 541 | 542 | parentGuard = ctx.peekFrontWrite(); 543 | parentNode = new InternalNode(keyType, parentGuard.getDataMut()); 544 | index = parentNode.getKeyIdx(leafNode.getKey(leafNode.getKeysN() - 1)); 545 | while (!current.redistribute(fileName, index, parentNode, bufferPool) 546 | && !current.merge(fileName, index, parentNode, bufferPool)) { 547 | // try until success 548 | Thread.sleep(10); 549 | continue; 550 | } 551 | currentInternalGuard.close(); 552 | } 553 | ctx.release(); 554 | return true; 555 | } 556 | } 557 | 558 | // getters and setters 559 | public boolean isEmpty() { 560 | try { 561 | ReadGuard guard = bufferPool.getReadGuard(fileName, headerPageId); 562 | BtreeHeader header = new BtreeHeader(guard.getData()); 563 | boolean isEmpty = header.isEmpty(); 564 | guard.close(); 565 | return isEmpty; 566 | } catch (Exception e) { 567 | throw new RuntimeException("Error checking if B+ tree is empty", e); 568 | } 569 | } 570 | 571 | public void setEmpty() { 572 | try { 573 | WriteGuard guard = bufferPool.getWriteGuard(fileName, headerPageId); 574 | BtreeHeader header = new BtreeHeader(guard.getData()); 575 | header.setRootPageId(Globals.INVALID_PAGE_ID); 576 | header.setHeight((short) 0); 577 | guard.close(); 578 | } catch (Exception e) { 579 | throw new RuntimeException("Error setting B+ tree to empty", e); 580 | } 581 | } 582 | 583 | public long getRootPageId() { 584 | try { 585 | ReadGuard guard = bufferPool.getReadGuard(fileName, headerPageId); 586 | BtreeHeader header = new BtreeHeader(guard.getData()); 587 | long rootPageId = header.getRootPageId(); 588 | guard.close(); 589 | return rootPageId; 590 | } catch (Exception e) { 591 | throw new RuntimeException("Error getting root page ID", e); 592 | } 593 | } 594 | 595 | public String getFileName() { 596 | return fileName; 597 | } 598 | 599 | public long getHeaderPageId() { 600 | return headerPageId; 601 | } 602 | 603 | public void setHeaderPageId(long headerPageId) { 604 | this.headerPageId = headerPageId; 605 | } 606 | 607 | public Template getKeyType() { 608 | return keyType; 609 | } 610 | 611 | public Template getValueType() { 612 | return valueType; 613 | } 614 | 615 | public BufferPool getBufferPool() { 616 | return bufferPool; 617 | } 618 | 619 | private class Context { 620 | private WriteGuard headeWriteGuard; 621 | private ReadGuard headeReadGuard; 622 | private Deque writeGuards; 623 | private Deque readGuards; 624 | 625 | public Context() { 626 | writeGuards = new ArrayDeque<>(); 627 | readGuards = new ArrayDeque<>(); 628 | } 629 | 630 | public boolean writeGuardIsEmpty() { 631 | return writeGuards.isEmpty(); 632 | } 633 | 634 | public void setHeaderWriteGuard(WriteGuard guard) { 635 | this.headeWriteGuard = guard; 636 | } 637 | 638 | public void setHeaderReadGuard(ReadGuard guard) { 639 | this.headeReadGuard = guard; 640 | } 641 | 642 | public WriteGuard getHeaderWriteGuard() { 643 | return headeWriteGuard; 644 | } 645 | 646 | public void addWriteGuard(WriteGuard guard) { 647 | writeGuards.push(guard); 648 | } 649 | 650 | public void dropHeaderWriteGuard() { 651 | if (headeWriteGuard != null) { 652 | headeWriteGuard.close(); 653 | headeWriteGuard = null; 654 | } 655 | } 656 | 657 | public void dropHeaderReadGuard() { 658 | if (headeReadGuard != null) { 659 | headeReadGuard.close(); 660 | headeReadGuard = null; 661 | } 662 | } 663 | 664 | public void addReadGuard(ReadGuard guard) { 665 | readGuards.push(guard); 666 | } 667 | 668 | public WriteGuard peekFrontWrite() { 669 | return writeGuards.peekFirst(); 670 | } 671 | 672 | public ReadGuard peekFrontRead() { 673 | return readGuards.peekFirst(); 674 | } 675 | 676 | public WriteGuard peekBackWrite() { 677 | return writeGuards.peekLast(); 678 | } 679 | 680 | public ReadGuard peekBackRead() { 681 | return readGuards.peekLast(); 682 | } 683 | 684 | public WriteGuard popFrontWrite() { 685 | return writeGuards.removeFirst(); 686 | } 687 | 688 | public ReadGuard popFrontRead() { 689 | return readGuards.removeFirst(); 690 | } 691 | 692 | public WriteGuard popBackWrite() { 693 | return writeGuards.removeLast(); 694 | } 695 | 696 | public ReadGuard popBackRead() { 697 | return readGuards.removeLast(); 698 | } 699 | 700 | public void release() { 701 | dropHeaderWriteGuard(); 702 | while (!writeGuards.isEmpty()) { 703 | WriteGuard guard = writeGuards.pop(); 704 | guard.close(); 705 | } 706 | 707 | dropHeaderReadGuard(); 708 | while (!readGuards.isEmpty()) { 709 | ReadGuard guard = readGuards.pop(); 710 | guard.close(); 711 | } 712 | } 713 | } 714 | 715 | // cursor 716 | 717 | public Cursor begin() throws Exception { 718 | out: 719 | while (true) { 720 | Context ctx = new Context(); 721 | ReadGuard guard = bufferPool.getReadGuard(fileName, headerPageId); 722 | if (guard == null) { 723 | Thread.sleep(10); 724 | continue out; 725 | } 726 | BtreeHeader header = new BtreeHeader(guard.getData()); 727 | if (header.getRootPageId() == Globals.INVALID_PAGE_ID) { 728 | guard.close(); 729 | return null; // the tree is empty 730 | } 731 | 732 | ctx.setHeaderReadGuard(guard); 733 | long rootPageId = header.getRootPageId(); 734 | long currentPageId = rootPageId; 735 | int lvl = 1; 736 | Cursor itr; 737 | while (true) { 738 | ReadGuard currentGuard = bufferPool.getReadGuard(fileName, currentPageId); 739 | if (currentGuard == null) { 740 | ctx.release(); 741 | Thread.sleep(10); 742 | continue out; 743 | } 744 | 745 | if (lvl == header.getHeight()) { // we are at the leaf node level 746 | LeafNode node = new LeafNode(keyType, valueType, currentGuard.getData()); 747 | itr = new Cursor(this, currentGuard, node); 748 | break; // we are done 749 | } 750 | 751 | ctx.addReadGuard(currentGuard); 752 | // if we are not at the leaf node level, we need to find the child node 753 | InternalNode currentNode = new InternalNode(keyType, currentGuard.getData()); 754 | Compositekey childPageId = currentNode.getValue(0); 755 | currentPageId = childPageId.getVal(0); 756 | lvl++; 757 | ctx.release(); 758 | } 759 | ctx.release(); 760 | return itr; 761 | } 762 | } 763 | 764 | // utils 765 | 766 | private Compositekey makeCompositekeyValue(long val) { 767 | Compositekey key = new Compositekey(valueType); 768 | ByteBuffer buf = ByteBuffer.wrap(new byte[Long.BYTES]); 769 | buf.putLong(val); 770 | key.set(0, buf.array()); 771 | return key; 772 | } 773 | } 774 | --------------------------------------------------------------------------------