├── .github └── workflows │ └── maven.yml ├── .gitignore ├── LICENSE ├── README.md ├── benchmarks ├── bitcask │ ├── Dockerfile │ ├── bitcask_test.erl │ ├── build_image.sh │ ├── delete_image.sh │ ├── docker-compose.yml │ └── run.sh ├── firefly │ ├── .gitignore │ ├── Dockerfile │ ├── build_image.sh │ ├── delete_image.sh │ ├── docker-compose.yml │ ├── pom.xml │ └── src │ │ └── main │ │ └── java │ │ └── com │ │ └── sahilbondre │ │ └── fireflydb │ │ └── benchmark │ │ └── Main.java ├── hashmap │ ├── .env │ ├── Dockerfile │ ├── build_image.sh │ ├── delete_image.sh │ ├── docker-compose.yml │ ├── hashmap.py │ └── run_image_and_clean.sh ├── leveldb │ ├── .env │ ├── Dockerfile │ ├── build_image.sh │ ├── delete_image.sh │ ├── docker-compose.yml │ └── leveldb.py └── rocksdb │ ├── .env │ ├── Dockerfile │ ├── build_image.sh │ ├── delete_image.sh │ ├── docker-compose.yml │ └── rocksdb_test.cpp ├── docs ├── read-test.png ├── rw-test.png └── write-test.png ├── pom.xml └── src ├── main └── java │ └── com │ └── sahilbondre │ └── firefly │ ├── FireflyDB.java │ ├── filetable │ ├── FilePointer.java │ ├── InvalidFileTableException.java │ ├── PersistableFileTable.java │ └── SerializedPersistableFileTable.java │ ├── log │ ├── FileChannelRandomAccessLog.java │ ├── InvalidRangeException.java │ └── RandomAccessLog.java │ └── model │ └── Segment.java └── test ├── java └── com │ └── sahilbondre │ └── firefly │ ├── CompactionTest.java │ ├── FireflyDBStaticTest.java │ ├── FireflyDBTest.java │ ├── PerformanceTest.java │ ├── TestUtils.java │ ├── filetable │ └── SerializedPersistableFileTableTest.java │ ├── log │ └── FileChannelRandomAccessLogTest.java │ └── model │ └── SegmentTest.java └── resources └── .empty /.github/workflows/maven.yml: -------------------------------------------------------------------------------- 1 | name: Java CI with Maven 2 | 3 | on: 4 | push: 5 | branches: [ "master" ] 6 | pull_request: 7 | branches: [ "master" ] 8 | release: 9 | types: [ "published" ] 10 | 11 | jobs: 12 | build: 13 | 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v3 18 | - name: Set up JDK 17 19 | uses: actions/setup-java@v3 20 | with: 21 | java-version: '17' 22 | distribution: 'temurin' 23 | cache: maven 24 | - name: Build with Maven 25 | run: mvn -B package --file pom.xml -Dspring.profiles.active=ci 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | !.mvn/wrapper/maven-wrapper.jar 3 | !**/src/main/**/target/ 4 | !**/src/test/**/target/ 5 | 6 | ### IntelliJ IDEA ### 7 | .idea/ 8 | *.iws 9 | *.iml 10 | *.ipr 11 | 12 | ### Eclipse ### 13 | .apt_generated 14 | .classpath 15 | .factorypath 16 | .project 17 | .settings 18 | .springBeans 19 | .sts4-cache 20 | 21 | ### NetBeans ### 22 | /nbproject/private/ 23 | /nbbuild/ 24 | /dist/ 25 | /nbdist/ 26 | /.nb-gradle/ 27 | build/ 28 | !**/src/main/**/build/ 29 | !**/src/test/**/build/ 30 | 31 | ### VS Code ### 32 | .vscode/ 33 | 34 | ### Mac OS ### 35 | .DS_Store -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | Copyright 2024 Sahil Bondre 179 | 180 | Licensed under the Apache License, Version 2.0 (the "License"); 181 | you may not use this file except in compliance with the License. 182 | You may obtain a copy of the License at 183 | 184 | http://www.apache.org/licenses/LICENSE-2.0 185 | 186 | Unless required by applicable law or agreed to in writing, software 187 | distributed under the License is distributed on an "AS IS" BASIS, 188 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 189 | See the License for the specific language governing permissions and 190 | limitations under the License. 191 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FireflyDB 2 | 3 | FireflyDB is a fast, thread-safe, JVM-based key-value storage engine with microsecond latency. FireflyDB is 20x faster 4 | for reads and 10x faster for writes than [Bitcask](https://github.com/basho/bitcask), which has a similar architecture. 5 | 6 | FireflyDB is hash-based and gives up range queries to achieve high throughput and low latency. As a result, it is about 7 | 100x faster at writes than [LevelDB](https://github.com/google/leveldb) (Google) 8 | and [RocksDB](https://github.com/facebook/rocksdb) (Facebook). 9 | 10 | FireflyDB relies on sensible defaults and does not expose many configuration options. FireflyDB is designed with 11 | educated tradeoffs to achieve high performance: 12 | 13 | 1. All the keys must fit in memory. This is a tradeoff with all hash-based storage engines. Even with the largest key 14 | size of 32KB, FireflyDB can store 32,000+ keys per 1GB of memory. 15 | 2. FireflyDB does not support range queries. 16 | 3. Maximum key size is 32768 bytes or 32KB. 17 | 4. Maximum value size is 2,147,483,647 bytes or 2.14 GB. 18 | 19 | ## Installation 20 | 21 | ### Maven 22 | 23 | ```xml 24 | 25 | 26 | com.sahilbondre 27 | fireflydb 28 | 0.1.1 29 | 30 | ``` 31 | 32 | ### Gradle 33 | 34 | ```gradle 35 | implementation 'com.sahilbondre:fireflydb:0.1.1' 36 | ``` 37 | 38 | ## API 39 | 40 | ```java 41 | FireflyDB fireflyDB=FireflyDB.getInstance("path/to/db"); 42 | fireflyDB.start(); 43 | 44 | // Write 45 | byte[]key="testKey".getBytes(); 46 | byte[]value="testValue".getBytes(); 47 | 48 | fireflyDB.put(key,value); 49 | 50 | // Read 51 | byte[]result=fireflyDB.get(key); 52 | 53 | // Compaction 54 | // FireflyDB will compact automatically but can be triggered on demand. 55 | fireflyDB.compact(); 56 | 57 | fireflyDB.stop(); 58 | ``` 59 | 60 | ## Benchmarks 61 | 62 | ``` 63 | iterations: 100,000 64 | cpu: 1 65 | memory: 1GB 66 | key-size: 8 bytes 67 | value-size: 100 bytes 68 | ``` 69 | 70 | ### Random Write Test 71 | 72 | Test: Generate a random key and value and write it to the database. 73 | 74 | ![write-test](./docs/write-test.png) 75 | 76 | | Database | Avg Time (microseconds) | P90 Latency (microseconds) | 77 | |-----------|-------------------------|----------------------------| 78 | | In-Memory | 0.53 | 1 | 79 | | LevelDB | 445.94 | 811 | 80 | | Bitcask | 71.33 | 48 | 81 | | RocksDB | 568.60 | 872 | 82 | | FireflyDB | 7.10 | 5 | 83 | 84 | ### Random Read Test 85 | 86 | Test: Pick a random key from the ones written in the previous test and read it from the database. 87 | 88 | ![read-test](./docs/read-test.png) 89 | 90 | | Database | Avg Time (microseconds) | P90 Latency (microseconds) | 91 | |-----------|-------------------------|----------------------------| 92 | | In-Memory | 0.49 | 1 | 93 | | LevelDB | 1.55 | 2 | 94 | | Bitcask | 108.03 | 62 | 95 | | RocksDB | 0.94 | 2 | 96 | | FireflyDB | 4.97 | 4 | 97 | 98 | ### Alternating Read-Write Test 99 | 100 | Test: Perform a read and write operation alternately. 101 | 102 | ![alternating-read-write-test](./docs/rw-test.png) 103 | 104 | | Database | Avg Time (microseconds) | P90 Latency (microseconds) | 105 | |-------------------|-------------------------|----------------------------| 106 | | In-Memory (read) | 0.61 | 1 | 107 | | In-Memory (write) | 0.57 | 1 | 108 | | LevelDB (read) | 3.43 | 5 | 109 | | LevelDB (write) | 441.38 | 814 | 110 | | Bitcask (read) | 120.15 | 60 | 111 | | Bitcask (write) | 66.78 | 57 | 112 | | RocksDB (read) | 4.54 | 7 | 113 | | RocksDB (write) | 567.14 | 971 | 114 | | FireflyDB (read) | 3.89 | 4 | 115 | | FireflyDB (write) | 3.91 | 4 | 116 | 117 | ## Potential Improvements 118 | 119 | - [ ] Add an explicit delete operation. 120 | - [ ] Expose compaction size as a configuration option. 121 | - [ ] Expose compaction interval as a configuration option. 122 | - [ ] Allow larger key size as a configuration option. 123 | - [ ] Expose read only mode. 124 | 125 | ## Contributing 126 | 127 | Pull requests are welcome. For major changes, please open an issue first 128 | to discuss what you would like to change. 129 | 130 | Please make sure to update tests as appropriate. 131 | 132 | ## License 133 | 134 | [Apache 2.0](https://raw.githubusercontent.com/godcrampy/fireflydb/master/LICENSE) 135 | -------------------------------------------------------------------------------- /benchmarks/bitcask/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Erlang runtime as a parent image 2 | FROM erlang:26-slim 3 | 4 | RUN apt-get update \ 5 | && apt-get install -y git build-essential \ 6 | && apt-get clean \ 7 | && rm -rf /var/lib/apt/lists/* 8 | 9 | WORKDIR /app 10 | 11 | RUN git clone "https://github.com/basho/bitcask.git" \ 12 | && cd bitcask \ 13 | && make 14 | 15 | WORKDIR /app/bitcask 16 | 17 | COPY bitcask_test.erl /app/bitcask/ 18 | COPY run.sh /app/bitcask/ 19 | 20 | RUN erlc bitcask_test.erl 21 | 22 | RUN ["chmod", "+x", "./run.sh"] 23 | 24 | CMD "/app/bitcask/run.sh" 25 | -------------------------------------------------------------------------------- /benchmarks/bitcask/bitcask_test.erl: -------------------------------------------------------------------------------- 1 | -module(bitcask_test). 2 | -export([main/0]). 3 | 4 | main() -> 5 | % Add the path to the Bitcask ebin directory 6 | StartTime = erlang:system_time(microsecond), 7 | code:add_patha("_build/default/lib/bitcask/ebin"), 8 | ITERATIONS = 100000, 9 | 10 | % Open the Bitcask database 11 | DB = bitcask:open("db", [read_write]), 12 | 13 | % Run the write test 14 | {WriteDurations, Keys} = write_values(DB, ITERATIONS, [], []), 15 | 16 | % Run the read test 17 | {ReadDurations, _} = read_values(DB, Keys, ITERATIONS, []), 18 | 19 | % Run the read-write test 20 | {ReadLatencies, WriteLatencies, _} = read_write_alternate(DB, ITERATIONS, [], [], Keys), 21 | bitcask:close(DB), 22 | 23 | % Calculate and print results for write test 24 | AverageWriteDuration = calculate_average(WriteDurations), 25 | P90WriteDuration = calculate_percentile(WriteDurations, 90), 26 | io:format("Write Test Results:~n"), 27 | io:format(" Average write duration: ~p microseconds~n", [AverageWriteDuration]), 28 | io:format(" p90 write duration: ~p microseconds~n", [P90WriteDuration]), 29 | 30 | % Calculate and print results for read test 31 | AverageReadDuration = calculate_average(ReadDurations), 32 | P90ReadDuration = calculate_percentile(ReadDurations, 90), 33 | io:format("Read Test Results:~n"), 34 | io:format(" Average read duration: ~p microseconds~n", [AverageReadDuration]), 35 | io:format(" p90 read duration: ~p microseconds~n", [P90ReadDuration]), 36 | 37 | % Calculate and print results for the read and write alternate test 38 | AverageWriteLatency = calculate_average(WriteLatencies), 39 | P90WriteLatency = calculate_percentile(WriteLatencies, 90), 40 | io:format("Read Write Alternate Test Results:~n"), 41 | io:format(" Average write latency: ~p microseconds~n", [AverageWriteLatency]), 42 | io:format(" p90 write latency: ~p microseconds~n", [P90WriteLatency]), 43 | 44 | AverageReadLatency = calculate_average(ReadLatencies), 45 | P90ReadLatency = calculate_percentile(ReadLatencies, 90), 46 | io:format(" Average read latency: ~p microseconds~n", [AverageReadLatency]), 47 | io:format(" p90 read latency: ~p microseconds~n", [P90ReadLatency]), 48 | 49 | % Record the end time 50 | EndTime = erlang:system_time(microsecond), 51 | 52 | % Calculate and print the total runtime 53 | TotalDuration = (EndTime - StartTime) / 1000000, 54 | io:format("Total script runtime: ~p seconds~n", [TotalDuration]). 55 | 56 | write_values(_, 0, Durations, Keys) -> 57 | {Durations, Keys}; 58 | write_values(DB, N, Durations, Keys) when N > 0 -> 59 | {Duration, UpdatedKeys} = write_operation(DB, Keys), 60 | 61 | UpdatedDurations = [Duration | Durations], 62 | 63 | write_values(DB, N - 1, UpdatedDurations, UpdatedKeys). 64 | 65 | read_values(_, _, 0, Durations) -> 66 | {Durations, 0}; 67 | read_values(DB, Keys, N, Durations) when N > 0 -> 68 | {Duration, _} = read_operation(DB, Keys), 69 | 70 | UpdatedDurations = [Duration | Durations], 71 | 72 | read_values(DB, Keys, N - 1, UpdatedDurations). 73 | 74 | 75 | % Helper function for the read and write alternate test 76 | read_write_alternate(_, 0, ReadLatencies, WriteLatencies, Keys) -> 77 | {ReadLatencies, WriteLatencies, Keys}; 78 | read_write_alternate(DB, N, ReadLatencies, WriteLatencies, Keys) when N > 0 -> 79 | % Write operation 80 | {WriteLatency, UpdatedKeys} = write_operation(DB, Keys), 81 | 82 | % Read operation 83 | {ReadLatency, _} = read_operation(DB, Keys), 84 | 85 | % Update latencies lists 86 | UpdatedReadLatencies = [ReadLatency | ReadLatencies], 87 | UpdatedWriteLatencies = [WriteLatency | WriteLatencies], 88 | 89 | % Continue with the next iteration 90 | read_write_alternate(DB, N - 1, UpdatedReadLatencies, UpdatedWriteLatencies, UpdatedKeys). 91 | 92 | 93 | % Helper function for the write operation 94 | write_operation(DB, Keys) -> 95 | Key = generate_random_binary(8), 96 | Value = generate_random_binary(100), 97 | 98 | StartTime = erlang:system_time(microsecond), 99 | ok = bitcask:put(DB, Key, Value), 100 | EndTime = erlang:system_time(microsecond), 101 | Duration = EndTime - StartTime, 102 | 103 | UpdatedKeys = [Key | Keys], 104 | 105 | {Duration, UpdatedKeys}. 106 | 107 | % Helper function for the read operation 108 | read_operation(DB, Keys) -> 109 | RandomKey = lists:nth(rand:uniform(length(Keys)), Keys), 110 | 111 | StartTime = erlang:system_time(microsecond), 112 | _ = bitcask:get(DB, RandomKey), 113 | EndTime = erlang:system_time(microsecond), 114 | Duration = EndTime - StartTime, 115 | 116 | {Duration, Keys}. 117 | 118 | generate_random_binary(Length) -> 119 | crypto:strong_rand_bytes(Length). 120 | 121 | calculate_average(List) -> 122 | case lists:sum(List) of 123 | 0 -> 0; 124 | Sum -> Sum / length(List) 125 | end. 126 | 127 | calculate_percentile(List, Percentile) when Percentile > 0, Percentile =< 100 -> 128 | SortedList = lists:sort(List), 129 | Length = length(SortedList), 130 | Index = trunc((Percentile / 100) * (Length - 1)) + 1, 131 | lists:nth(Index, SortedList). 132 | -------------------------------------------------------------------------------- /benchmarks/bitcask/build_image.sh: -------------------------------------------------------------------------------- 1 | docker build -t fireflydb-benchmark-bitcask . 2 | -------------------------------------------------------------------------------- /benchmarks/bitcask/delete_image.sh: -------------------------------------------------------------------------------- 1 | docker rmi fireflydb-benchmark-bitcask 2 | -------------------------------------------------------------------------------- /benchmarks/bitcask/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | fireflydb-benchmark-bitcask: 4 | image: fireflydb-benchmark-bitcask 5 | env_file: 6 | - ./.env 7 | deploy: 8 | resources: 9 | limits: 10 | cpus: '1' 11 | memory: '1G' 12 | -------------------------------------------------------------------------------- /benchmarks/bitcask/run.sh: -------------------------------------------------------------------------------- 1 | erl -eval 'bitcask_test:main(), init:stop()' -noshell 2 | -------------------------------------------------------------------------------- /benchmarks/firefly/.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | !.mvn/wrapper/maven-wrapper.jar 3 | !**/src/main/**/target/ 4 | !**/src/test/**/target/ 5 | 6 | ### IntelliJ IDEA ### 7 | .idea/ 8 | *.iws 9 | *.iml 10 | *.ipr 11 | 12 | ### Eclipse ### 13 | .apt_generated 14 | .classpath 15 | .factorypath 16 | .project 17 | .settings 18 | .springBeans 19 | .sts4-cache 20 | 21 | ### NetBeans ### 22 | /nbproject/private/ 23 | /nbbuild/ 24 | /dist/ 25 | /nbdist/ 26 | /.nb-gradle/ 27 | build/ 28 | !**/src/main/**/build/ 29 | !**/src/test/**/build/ 30 | 31 | ### VS Code ### 32 | .vscode/ 33 | 34 | ### Mac OS ### 35 | .DS_Store -------------------------------------------------------------------------------- /benchmarks/firefly/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM eclipse-temurin:17-jre-jammy 2 | 3 | WORKDIR /app/firefly 4 | 5 | COPY ./target/benchmark-1.0-SNAPSHOT-jar-with-dependencies.jar /app/firefly/ 6 | 7 | CMD ["java", "-jar", "benchmark-1.0-SNAPSHOT-jar-with-dependencies.jar"] 8 | -------------------------------------------------------------------------------- /benchmarks/firefly/build_image.sh: -------------------------------------------------------------------------------- 1 | mvn clean compile assembly:single 2 | 3 | docker build -t fireflydb-benchmark-firefly . 4 | -------------------------------------------------------------------------------- /benchmarks/firefly/delete_image.sh: -------------------------------------------------------------------------------- 1 | docker rmi fireflydb-benchmark-firefly 2 | -------------------------------------------------------------------------------- /benchmarks/firefly/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | fireflydb-benchmark-bitcask: 4 | image: fireflydb-benchmark-firefly 5 | deploy: 6 | resources: 7 | limits: 8 | cpus: '1' 9 | memory: '1G' 10 | -------------------------------------------------------------------------------- /benchmarks/firefly/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | com.sahilbondre.fireflydb 8 | benchmark 9 | 1.0-SNAPSHOT 10 | 11 | 12 | 17 13 | 17 14 | UTF-8 15 | 16 | 17 | 18 | 19 | com.sahilbondre 20 | fireflydb 21 | 0.1.1 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | org.apache.maven.plugins 30 | maven-assembly-plugin 31 | 3.6.0 32 | 33 | 34 | 35 | com.sahilbondre.fireflydb.benchmark.Main 36 | 37 | 38 | 39 | jar-with-dependencies 40 | 41 | 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /benchmarks/firefly/src/main/java/com/sahilbondre/fireflydb/benchmark/Main.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.fireflydb.benchmark; 2 | 3 | import com.sahilbondre.firefly.FireflyDB; 4 | 5 | import java.io.IOException; 6 | import java.nio.file.Files; 7 | import java.nio.file.Paths; 8 | import java.util.ArrayList; 9 | import java.util.Arrays; 10 | import java.util.List; 11 | import java.util.Random; 12 | import java.util.logging.Logger; 13 | 14 | public class Main { 15 | private static final String TEST_FOLDER = "src/test/resources/test_folder"; 16 | private static final int ITERATIONS = 100000; 17 | private static final int KEY_LENGTH = 8; 18 | private static final int VALUE_LENGTH = 100; 19 | private static final Logger logger = Logger.getLogger(Main.class.getName()); 20 | 21 | private static final FireflyDB fireflyDB = FireflyDB.getInstance(TEST_FOLDER); 22 | 23 | 24 | 25 | public static void main(String[] args) throws IOException { 26 | Files.createDirectories(Paths.get(TEST_FOLDER)); 27 | 28 | fireflyDB.start(); 29 | 30 | logger.info("Starting benchmark..."); 31 | logger.info("Iterations: " + ITERATIONS); 32 | logger.info("Key length: " + KEY_LENGTH); 33 | logger.info("Value length: " + VALUE_LENGTH); 34 | 35 | logger.info("Starting writes..."); 36 | 37 | long[] writeTimes = new long[ITERATIONS]; 38 | 39 | List availableKeys = new ArrayList<>(); 40 | 41 | long startTime = System.nanoTime(); 42 | for (int i = 0; i < ITERATIONS; i++) { 43 | byte[] key = getRandomBytes(KEY_LENGTH); 44 | byte[] value = getRandomBytes(VALUE_LENGTH); 45 | 46 | long writeTime = saveKeyValuePairAndGetTime(key, value); 47 | availableKeys.add(key); 48 | writeTimes[i] = writeTime; 49 | } 50 | long totalTime = (System.nanoTime() - startTime) / 1000; // Convert nanoseconds to microseconds 51 | logger.info("Total time for writes: " + totalTime + " mus"); 52 | 53 | double averageWriteTime = 0; 54 | for (long writeTime : writeTimes) { 55 | averageWriteTime += writeTime; 56 | } 57 | averageWriteTime /= ITERATIONS; 58 | 59 | logger.info("Average write latency: " + averageWriteTime + " mus"); 60 | 61 | // Calculate p90 write latency 62 | // Sort write times 63 | Arrays.sort(writeTimes); 64 | long p90WriteTime = writeTimes[(int) (ITERATIONS * 0.9)]; 65 | logger.info("p90 write latency: " + p90WriteTime + " mus"); 66 | 67 | // Benchmark reads 68 | logger.info("\nStarting reads..."); 69 | 70 | long[] readTimes = new long[ITERATIONS]; 71 | 72 | startTime = System.nanoTime(); 73 | for (int i = 0; i < ITERATIONS; i++) { 74 | // Get a random key from the list of available keys 75 | byte[] key = availableKeys.get(new Random().nextInt(availableKeys.size())); 76 | 77 | long readTime = getKeyValuePairAndGetTime(key); 78 | readTimes[i] = readTime; 79 | } 80 | totalTime = (System.nanoTime() - startTime) / 1000; // Convert nanoseconds to microseconds 81 | logger.info("Total time for reads: " + totalTime + " mus"); 82 | 83 | double averageReadTime = 0; 84 | for (long readTime : readTimes) { 85 | averageReadTime += readTime; 86 | } 87 | averageReadTime /= ITERATIONS; 88 | 89 | logger.info("Average read latency: " + averageReadTime + " mus"); 90 | 91 | // Calculate p90 read latency 92 | // Sort read times 93 | Arrays.sort(readTimes); 94 | long p90ReadTime = readTimes[(int) (ITERATIONS * 0.9)]; 95 | logger.info("p90 read latency: " + p90ReadTime + " mus"); 96 | 97 | 98 | // Benchmark reads and writes 99 | logger.info("\nStarting reads and writes..."); 100 | 101 | 102 | startTime = System.nanoTime(); 103 | for (int i = 0; i < ITERATIONS; i++) { 104 | byte[] writeKey = getRandomBytes(KEY_LENGTH); 105 | byte[] writeValue = getRandomBytes(VALUE_LENGTH); 106 | 107 | long writeTime = saveKeyValuePairAndGetTime(writeKey, writeValue); 108 | 109 | availableKeys.add(writeKey); 110 | 111 | byte[] readKey = availableKeys.get(new Random().nextInt(availableKeys.size())); 112 | 113 | long readTime = getKeyValuePairAndGetTime(readKey); 114 | 115 | writeTimes[i] = writeTime; 116 | readTimes[i] = readTime; 117 | } 118 | totalTime = (System.nanoTime() - startTime) / 1000; // Convert nanoseconds to microseconds 119 | logger.info("Total time for reads and writes: " + totalTime + " mus"); 120 | 121 | averageReadTime = 0; 122 | for (long readTime : readTimes) { 123 | averageReadTime += readTime; 124 | } 125 | averageReadTime /= ITERATIONS; 126 | 127 | averageWriteTime = 0; 128 | for (long writeTime : writeTimes) { 129 | averageWriteTime += writeTime; 130 | } 131 | averageWriteTime /= ITERATIONS; 132 | 133 | logger.info("Average read latency: " + averageReadTime + " mus"); 134 | logger.info("Average write latency: " + averageWriteTime + " mus"); 135 | 136 | // Calculate p90 read latency 137 | // Sort read times 138 | Arrays.sort(readTimes); 139 | 140 | // Calculate p90 write latency 141 | // Sort write times 142 | Arrays.sort(writeTimes); 143 | 144 | p90ReadTime = readTimes[(int) (ITERATIONS * 0.9)]; 145 | logger.info("p90 read latency: " + p90ReadTime + " mus"); 146 | 147 | p90WriteTime = writeTimes[(int) (ITERATIONS * 0.9)]; 148 | logger.info("p90 write latency: " + p90WriteTime + " mus"); 149 | } 150 | 151 | private static byte[] getRandomBytes(int length) { 152 | byte[] bytes = new byte[length]; 153 | new Random().nextBytes(bytes); 154 | return bytes; 155 | } 156 | 157 | private static long saveKeyValuePairAndGetTime(byte[] key, byte[] value) throws IOException { 158 | long startTime = System.nanoTime(); 159 | fireflyDB.set(key, value); 160 | return (System.nanoTime() - startTime) / 1000; // Convert nanoseconds to microseconds 161 | } 162 | 163 | private static long getKeyValuePairAndGetTime(byte[] key) throws IOException { 164 | long startTime = System.nanoTime(); 165 | fireflyDB.get(key); 166 | return (System.nanoTime() - startTime) / 1000; // Convert nanoseconds to microseconds 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /benchmarks/hashmap/.env: -------------------------------------------------------------------------------- 1 | ITERATIONS=1000000 2 | KEY_LENGTH=8 3 | VALUE_LENGTH=100 4 | -------------------------------------------------------------------------------- /benchmarks/hashmap/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-slim 2 | 3 | WORKDIR /app 4 | 5 | COPY hashmap.py /app/ 6 | 7 | CMD ["python", "hashmap.py"] 8 | -------------------------------------------------------------------------------- /benchmarks/hashmap/build_image.sh: -------------------------------------------------------------------------------- 1 | docker build -t fireflydb-benchmark-hashmap . 2 | -------------------------------------------------------------------------------- /benchmarks/hashmap/delete_image.sh: -------------------------------------------------------------------------------- 1 | docker rmi fireflydb-benchmark-hashmap 2 | -------------------------------------------------------------------------------- /benchmarks/hashmap/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | fireflydb-benchmark-hashmap: 4 | image: fireflydb-benchmark-hashmap 5 | env_file: 6 | - ./.env 7 | deploy: 8 | resources: 9 | limits: 10 | cpus: '1' 11 | memory: '1G' 12 | -------------------------------------------------------------------------------- /benchmarks/hashmap/hashmap.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | from datetime import datetime 4 | 5 | db = {} 6 | startTime = datetime.now() 7 | 8 | # read from environment variables 9 | ITERATIONS = int(os.environ.get("ITERATIONS")) 10 | KEY_LENGTH = int(os.environ.get("KEY_LENGTH")) 11 | VALUE_LENGTH = int(os.environ.get("VALUE_LENGTH")) 12 | 13 | print("Starting HashMap benchmark") 14 | print("Iterations: " + str(ITERATIONS)) 15 | print("Key length: " + str(KEY_LENGTH)) 16 | print("Value length: " + str(VALUE_LENGTH)) 17 | 18 | 19 | def get_random_bytes_of_length(length): 20 | return os.urandom(length) 21 | 22 | 23 | def save_kv_pair_and_get_time_mus(k, v): 24 | start_time = datetime.now() 25 | db[k] = v 26 | return (datetime.now() - start_time).microseconds 27 | 28 | 29 | def get_kv_pair_and_get_time_mus(k): 30 | start_time = datetime.now() 31 | db[k] 32 | return (datetime.now() - start_time).microseconds 33 | 34 | 35 | # Benchmark writes 36 | print() 37 | print("Starting writes") 38 | 39 | times = [] 40 | keys = [] 41 | 42 | for i in range(ITERATIONS): 43 | key = get_random_bytes_of_length(KEY_LENGTH) 44 | value = get_random_bytes_of_length(VALUE_LENGTH) 45 | keys.append(key) 46 | times.append(save_kv_pair_and_get_time_mus(key, value)) 47 | 48 | print("Average time to save a key-value pair: " + str(sum(times) / len(times)) + " mus") 49 | print("Total time: " + str(datetime.now() - startTime)) 50 | print("P90 latency: " + str(sorted(times)[int(len(times) * 0.9)]) + " mus") 51 | 52 | # Benchmark reads 53 | print() 54 | print("Starting reads") 55 | times = [] 56 | 57 | for i in range(ITERATIONS): 58 | key = random.choice(keys) 59 | 60 | times.append(get_kv_pair_and_get_time_mus(key)) 61 | 62 | print("Average time to read a key-value pair: " + str(sum(times) / len(times)) + " mus") 63 | print("Total time: " + str(datetime.now() - startTime)) 64 | print("P90 latency: " + str(sorted(times)[int(len(times) * 0.9)]) + " mus") 65 | 66 | # Benchmark reads and writes 67 | print() 68 | print("Starting reads and writes") 69 | read_times = [] 70 | write_times = [] 71 | 72 | for i in range(ITERATIONS): 73 | key = get_random_bytes_of_length(KEY_LENGTH) 74 | value = get_random_bytes_of_length(VALUE_LENGTH) 75 | 76 | write_times.append(save_kv_pair_and_get_time_mus(key, value)) 77 | keys.append(key) 78 | 79 | key = random.choice(keys) 80 | read_times.append(get_kv_pair_and_get_time_mus(key)) 81 | 82 | print("Average time to write a key-value pair: " + str(sum(write_times) / len(write_times)) + " mus") 83 | print("Average time to read a key-value pair: " + str(sum(read_times) / len(read_times)) + " mus") 84 | print("Total time: " + str(datetime.now() - startTime)) 85 | print("P90 latency to write: " + str(sorted(write_times)[int(len(write_times) * 0.9)]) + " mus") 86 | print("P90 latency to read: " + str(sorted(read_times)[int(len(read_times) * 0.9)]) + " mus") 87 | 88 | print() 89 | print("Time to run: " + str(datetime.now() - startTime)) 90 | -------------------------------------------------------------------------------- /benchmarks/hashmap/run_image_and_clean.sh: -------------------------------------------------------------------------------- 1 | docker run --env-file .env --rm fireflydb-benchmark-hashmap 2 | -------------------------------------------------------------------------------- /benchmarks/leveldb/.env: -------------------------------------------------------------------------------- 1 | ITERATIONS=100000 2 | KEY_LENGTH=8 3 | VALUE_LENGTH=100 4 | -------------------------------------------------------------------------------- /benchmarks/leveldb/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-slim 2 | 3 | # Install required system dependencies 4 | RUN apt-get update \ 5 | && apt-get install -y libleveldb-dev build-essential \ 6 | && apt-get clean \ 7 | && rm -rf /var/lib/apt/lists/* 8 | 9 | RUN pip install plyvel 10 | 11 | WORKDIR /app 12 | 13 | COPY leveldb.py /app/ 14 | 15 | CMD ["python", "leveldb.py"] 16 | -------------------------------------------------------------------------------- /benchmarks/leveldb/build_image.sh: -------------------------------------------------------------------------------- 1 | docker build -t fireflydb-benchmark-leveldb . 2 | -------------------------------------------------------------------------------- /benchmarks/leveldb/delete_image.sh: -------------------------------------------------------------------------------- 1 | docker rmi fireflydb-benchmark-leveldb 2 | -------------------------------------------------------------------------------- /benchmarks/leveldb/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | fireflydb-benchmark-leveldb: 4 | image: fireflydb-benchmark-leveldb 5 | env_file: 6 | - ./.env 7 | deploy: 8 | resources: 9 | limits: 10 | cpus: '1' 11 | memory: '1G' 12 | -------------------------------------------------------------------------------- /benchmarks/leveldb/leveldb.py: -------------------------------------------------------------------------------- 1 | import os 2 | import plyvel 3 | import random 4 | from datetime import datetime 5 | 6 | db = plyvel.DB('/tmp/testdb/', create_if_missing=True) 7 | startTime = datetime.now() 8 | 9 | # read from environment variables 10 | ITERATIONS = int(os.environ.get("ITERATIONS")) 11 | KEY_LENGTH = int(os.environ.get("KEY_LENGTH")) 12 | VALUE_LENGTH = int(os.environ.get("VALUE_LENGTH")) 13 | 14 | print("Starting LevelDB benchmark") 15 | print("Iterations: " + str(ITERATIONS)) 16 | print("Key length: " + str(KEY_LENGTH)) 17 | print("Value length: " + str(VALUE_LENGTH)) 18 | 19 | 20 | def get_random_bytes_of_length(length): 21 | return os.urandom(length) 22 | 23 | 24 | def save_kv_pair_and_get_time_mus(k, v): 25 | start_time = datetime.now() 26 | db.put(k, v, sync=True) 27 | return (datetime.now() - start_time).microseconds 28 | 29 | 30 | def get_kv_pair_and_get_time_mus(k): 31 | start_time = datetime.now() 32 | db.get(k) 33 | return (datetime.now() - start_time).microseconds 34 | 35 | 36 | # Benchmark writes 37 | print("\nStarting writes...") 38 | 39 | times = [] 40 | keys = [] 41 | 42 | for i in range(ITERATIONS): 43 | key = get_random_bytes_of_length(KEY_LENGTH) 44 | value = get_random_bytes_of_length(VALUE_LENGTH) 45 | keys.append(key) 46 | times.append(save_kv_pair_and_get_time_mus(key, value)) 47 | 48 | print("\nWrite Test Results:") 49 | print(" Average write latency: " + str(sum(times) / len(times)) + " mus") 50 | print(" P90 write latency: " + str(sorted(times)[int(len(times) * 0.9)]) + " mus") 51 | print(" Total time: " + str(datetime.now() - startTime)) 52 | 53 | # Benchmark reads 54 | print("\nStarting reads...") 55 | times = [] 56 | 57 | for i in range(ITERATIONS): 58 | key = random.choice(keys) 59 | 60 | times.append(get_kv_pair_and_get_time_mus(key)) 61 | 62 | print("\nRead Test Results:") 63 | print(" Average read latency: " + str(sum(times) / len(times)) + " mus") 64 | print(" P90 read latency: " + str(sorted(times)[int(len(times) * 0.9)]) + " mus") 65 | print(" Total time: " + str(datetime.now() - startTime)) 66 | 67 | # Benchmark reads and writes 68 | print("\nStarting reads and writes...") 69 | read_times = [] 70 | write_times = [] 71 | 72 | for i in range(ITERATIONS): 73 | key = get_random_bytes_of_length(KEY_LENGTH) 74 | value = get_random_bytes_of_length(VALUE_LENGTH) 75 | 76 | write_times.append(save_kv_pair_and_get_time_mus(key, value)) 77 | keys.append(key) 78 | 79 | key = random.choice(keys) 80 | read_times.append(get_kv_pair_and_get_time_mus(key)) 81 | 82 | print("\nRead and Write Test Results:") 83 | print(" Average write latency: " + str(sum(write_times) / len(write_times)) + " mus") 84 | print(" Average read latency: " + str(sum(read_times) / len(read_times)) + " mus") 85 | print(" P90 write latency: " + str(sorted(write_times)[int(len(write_times) * 0.9)]) + " mus") 86 | print(" P90 read latency: " + str(sorted(read_times)[int(len(read_times) * 0.9)]) + " mus") 87 | print(" Total time: " + str(datetime.now() - startTime)) 88 | 89 | print("\n Total time to run tests: " + str(datetime.now() - startTime)) 90 | -------------------------------------------------------------------------------- /benchmarks/rocksdb/.env: -------------------------------------------------------------------------------- 1 | ITERATIONS=100000 2 | KEY_LENGTH=8 3 | VALUE_LENGTH=100 4 | -------------------------------------------------------------------------------- /benchmarks/rocksdb/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bullseye-slim 2 | 3 | WORKDIR /app 4 | 5 | RUN apt-get update && \ 6 | apt-get install -y --no-install-recommends \ 7 | g++ \ 8 | cmake \ 9 | libsnappy-dev \ 10 | zlib1g-dev \ 11 | libbz2-dev \ 12 | liblz4-dev \ 13 | libgflags-dev \ 14 | libpthread-stubs0-dev \ 15 | librocksdb-dev \ 16 | && apt-get clean \ 17 | && rm -rf /var/lib/apt/lists/* 18 | 19 | 20 | COPY ./rocksdb_test.cpp /app/ 21 | 22 | RUN g++ -std=c++17 -o rocksdb_test rocksdb_test.cpp -lrocksdb -lpthread -lsnappy -lgflags -lz -lbz2 -llz4 23 | 24 | CMD ["./rocksdb_test"] 25 | -------------------------------------------------------------------------------- /benchmarks/rocksdb/build_image.sh: -------------------------------------------------------------------------------- 1 | docker build -t fireflydb-benchmark-rocksdb . 2 | -------------------------------------------------------------------------------- /benchmarks/rocksdb/delete_image.sh: -------------------------------------------------------------------------------- 1 | docker rmi fireflydb-benchmark-rocksdb 2 | -------------------------------------------------------------------------------- /benchmarks/rocksdb/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | fireflydb-benchmark-rocksdb: 4 | image: fireflydb-benchmark-rocksdb 5 | env_file: 6 | - ./.env 7 | deploy: 8 | resources: 9 | limits: 10 | cpus: '1' 11 | memory: '1G' 12 | -------------------------------------------------------------------------------- /benchmarks/rocksdb/rocksdb_test.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "rocksdb/db.h" 10 | 11 | const std::string db_path = "/tmp/rockstestdb/"; 12 | 13 | const int ITERATIONS = std::atoi(std::getenv("ITERATIONS")); 14 | const int KEY_LENGTH = std::atoi(std::getenv("KEY_LENGTH")); 15 | const int VALUE_LENGTH = std::atoi(std::getenv("VALUE_LENGTH")); 16 | 17 | std::string get_random_bytes_of_length(int length) { 18 | std::string random_bytes; 19 | for (int i = 0; i < length; ++i) { 20 | random_bytes.push_back(static_cast(std::rand() % 256)); 21 | } 22 | return random_bytes; 23 | } 24 | 25 | double save_kv_pair_and_get_time_ms(rocksdb::DB* db, const std::string& key, const std::string& value) { 26 | auto start_time = std::chrono::high_resolution_clock::now(); 27 | rocksdb::WriteOptions write_options; 28 | write_options.sync = true; 29 | rocksdb::Status status = db->Put(write_options, key, value); 30 | auto end_time = std::chrono::high_resolution_clock::now(); 31 | if (!status.ok()) { 32 | std::cerr << "Error during write: " << status.ToString() << std::endl; 33 | } 34 | return std::chrono::duration_cast(end_time - start_time).count(); 35 | } 36 | 37 | double get_kv_pair_and_get_time_ms(rocksdb::DB* db, const std::string& key) { 38 | auto start_time = std::chrono::high_resolution_clock::now(); 39 | std::string value; 40 | rocksdb::Status status = db->Get(rocksdb::ReadOptions(), key, &value); 41 | auto end_time = std::chrono::high_resolution_clock::now(); 42 | if (!status.ok()) { 43 | std::cerr << "Error during read: " << status.ToString() << std::endl; 44 | } 45 | return std::chrono::duration_cast(end_time - start_time).count(); 46 | } 47 | 48 | int main() { 49 | auto start_time = std::chrono::high_resolution_clock::now(); 50 | 51 | std::cout << "Starting RocksDB benchmark..." << std::endl; 52 | std::cout << "Iterations: " << ITERATIONS << std::endl; 53 | std::cout << "Key Length: " << KEY_LENGTH << std::endl; 54 | std::cout << "Value Length: " << VALUE_LENGTH << std::endl; 55 | 56 | // Open RocksDB 57 | rocksdb::DB* db; 58 | rocksdb::Options options; 59 | options.create_if_missing = true; 60 | rocksdb::Status status = rocksdb::DB::Open(options, db_path, &db); 61 | if (!status.ok()) { 62 | std::cerr << "Error opening RocksDB: " << status.ToString() << std::endl; 63 | return 1; 64 | } 65 | 66 | // Seed the random number generator 67 | std::srand(std::time(0)); 68 | 69 | // Benchmark writes 70 | std::cout << "Starting writes..." << std::endl; 71 | std::vector keys; 72 | std::vector write_times; 73 | 74 | for (int i = 0; i < ITERATIONS; ++i) { 75 | std::string key = get_random_bytes_of_length(KEY_LENGTH); 76 | std::string value = get_random_bytes_of_length(VALUE_LENGTH); 77 | keys.push_back(key); 78 | write_times.push_back(save_kv_pair_and_get_time_ms(db, key, value)); 79 | } 80 | 81 | std::cout << "\nWrite Test Results:" << std::endl; 82 | std::cout << " Average write latency: " << (std::accumulate(write_times.begin(), write_times.end(), 0.0) / write_times.size()) << " mus" << std::endl; 83 | std::sort(write_times.begin(), write_times.end()); 84 | std::cout << " P90 write latency: " << write_times[static_cast(write_times.size() * 0.9)] << " mus" << std::endl; 85 | 86 | // Benchmark reads 87 | std::cout << "\nStarting reads..." << std::endl; 88 | std::vector read_times; 89 | 90 | for (int i = 0; i < ITERATIONS; ++i) { 91 | std::string key = keys[std::rand() % keys.size()]; 92 | read_times.push_back(get_kv_pair_and_get_time_ms(db, key)); 93 | } 94 | 95 | std::cout << "\nRead Test Results:" << std::endl; 96 | std::cout << " Average read latency: " << (std::accumulate(read_times.begin(), read_times.end(), 0.0) / read_times.size()) << " mus" << std::endl; 97 | std::sort(read_times.begin(), read_times.end()); 98 | std::cout << " P90 read latency: " << read_times[static_cast(read_times.size() * 0.9)] << " mus" << std::endl; 99 | 100 | // Benchmark reads and writes 101 | std::cout << "\nStarting reads and writes..." << std::endl; 102 | std::vector combined_read_times; 103 | std::vector combined_write_times; 104 | 105 | for (int i = 0; i < ITERATIONS; ++i) { 106 | // Write 107 | std::string key = get_random_bytes_of_length(KEY_LENGTH); 108 | std::string value = get_random_bytes_of_length(VALUE_LENGTH); 109 | combined_write_times.push_back(save_kv_pair_and_get_time_ms(db, key, value)); 110 | keys.push_back(key); 111 | 112 | // Read 113 | key = keys[std::rand() % keys.size()]; 114 | combined_read_times.push_back(get_kv_pair_and_get_time_ms(db, key)); 115 | } 116 | 117 | // Print combined read and write benchmark results 118 | std::cout << "\nRead and Write Test Results:" << std::endl; 119 | std::cout << " Average write latency: " << (std::accumulate(combined_write_times.begin(), combined_write_times.end(), 0.0) / combined_write_times.size()) << " mus" << std::endl; 120 | std::cout << " Average read latency: " << (std::accumulate(combined_read_times.begin(), combined_read_times.end(), 0.0) / combined_read_times.size()) << " mus" << std::endl; 121 | std::sort(combined_write_times.begin(), combined_write_times.end()); 122 | std::sort(combined_read_times.begin(), combined_read_times.end()); 123 | std::cout << " P90 write latency: " << combined_write_times[static_cast(combined_write_times.size() * 0.9)] << " mus" << std::endl; 124 | std::cout << " P90 read latency: " << combined_read_times[static_cast(combined_read_times.size() * 0.9)] << " mus" << std::endl; 125 | 126 | // Close RocksDB 127 | delete db; 128 | 129 | auto end_time = std::chrono::high_resolution_clock::now(); 130 | std::cout << "\nTotal time: " << std::chrono::duration_cast(end_time - start_time).count() << " seconds" << std::endl; 131 | 132 | return 0; 133 | } 134 | -------------------------------------------------------------------------------- /docs/read-test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godcrampy/fireflydb/a3ddffc3d5b2722b870af6b1bf21c35bf50cfa89/docs/read-test.png -------------------------------------------------------------------------------- /docs/rw-test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godcrampy/fireflydb/a3ddffc3d5b2722b870af6b1bf21c35bf50cfa89/docs/rw-test.png -------------------------------------------------------------------------------- /docs/write-test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godcrampy/fireflydb/a3ddffc3d5b2722b870af6b1bf21c35bf50cfa89/docs/write-test.png -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | com.sahilbondre 8 | fireflydb 9 | 0.1.1 10 | 11 | ${project.artifactId} 12 | FireflyDB is a fast, thread-safe, JVM-based key-value storage engine with microsecond latency. 13 | https://github.com/godcrampy/fireflydb 14 | 15 | 16 | 17 | The Apache License, Version 2.0 18 | https://raw.githubusercontent.com/godcrampy/fireflydb/master/LICENSE 19 | 20 | 21 | 22 | 23 | 24 | Sahil Bondre 25 | sahil.bondre+fireflydb@gmail.com 26 | https://sahilbondre.com 27 | 28 | 29 | 30 | 31 | scm:git:git://github.com/godcrampy/fireflydb.git 32 | scm:git:ssh://github.com:godcrampy/fireflydb.git 33 | https://github.com/godcrampy/fireflydb/ 34 | 35 | 36 | 37 | 17 38 | 17 39 | UTF-8 40 | 5.10.1 41 | 42 | 43 | 44 | 45 | 46 | org.junit.jupiter 47 | junit-jupiter-api 48 | ${junit.jupiter.version} 49 | test 50 | 51 | 52 | org.junit.jupiter 53 | junit-jupiter-engine 54 | ${junit.jupiter.version} 55 | test 56 | 57 | 58 | com.esotericsoftware 59 | kryo 60 | 5.5.0 61 | 62 | 63 | 64 | 65 | 66 | 67 | org.apache.maven.plugins 68 | maven-source-plugin 69 | 3.2.1 70 | 71 | 72 | attach-sources 73 | 74 | jar-no-fork 75 | 76 | 77 | 78 | 79 | 80 | org.apache.maven.plugins 81 | maven-javadoc-plugin 82 | 3.4.1 83 | 84 | 85 | attach-javadocs 86 | 87 | jar 88 | 89 | 90 | 91 | 92 | 93 | org.sonatype.central 94 | central-publishing-maven-plugin 95 | 0.3.0 96 | true 97 | 98 | central 99 | true 100 | 101 | 102 | 103 | 104 | org.apache.maven.plugins 105 | maven-surefire-plugin 106 | 3.2.3 107 | 108 | 109 | **/PerformanceTest.java 110 | 111 | 112 | 113 | 114 | 115 | 116 | -------------------------------------------------------------------------------- /src/main/java/com/sahilbondre/firefly/FireflyDB.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly; 2 | 3 | import com.sahilbondre.firefly.filetable.FilePointer; 4 | import com.sahilbondre.firefly.filetable.PersistableFileTable; 5 | import com.sahilbondre.firefly.filetable.SerializedPersistableFileTable; 6 | import com.sahilbondre.firefly.log.FileChannelRandomAccessLog; 7 | import com.sahilbondre.firefly.log.RandomAccessLog; 8 | import com.sahilbondre.firefly.model.Segment; 9 | 10 | import java.io.IOException; 11 | import java.nio.file.*; 12 | import java.nio.file.attribute.BasicFileAttributes; 13 | import java.util.ArrayList; 14 | import java.util.HashMap; 15 | import java.util.List; 16 | import java.util.Map; 17 | 18 | public class FireflyDB { 19 | private static final Map instances = new HashMap<>(); 20 | private static final String NOT_STARTED_ERROR_MESSAGE = "FireflyDB is not started."; 21 | // 4 GB 22 | private static final long MAX_LOG_SIZE = 4 * 1024 * 1024 * 1024L; 23 | 24 | private final String folderPath; 25 | 26 | private final String fileTablePath; 27 | private final Map logMap = new HashMap<>(); 28 | private RandomAccessLog activeLog; 29 | private boolean isStarted = false; 30 | private PersistableFileTable fileTable; 31 | 32 | private FireflyDB(String folderPath) { 33 | this.folderPath = folderPath; 34 | this.fileTablePath = folderPath + "/map.kryo"; 35 | } 36 | 37 | public static synchronized FireflyDB getInstance(String folderPath) { 38 | instances.computeIfAbsent(folderPath, FireflyDB::new); 39 | return instances.get(folderPath); 40 | } 41 | 42 | private static boolean isNumeric(String str) { 43 | return str.matches("\\d+"); 44 | } 45 | 46 | public String getFolderPath() { 47 | return folderPath; 48 | } 49 | 50 | public boolean isStarted() { 51 | return isStarted; 52 | } 53 | 54 | public synchronized void start() throws IOException { 55 | if (!isStarted) { 56 | isStarted = true; 57 | compaction(); 58 | } 59 | } 60 | 61 | public synchronized void stop() throws IOException { 62 | if (isStarted) { 63 | // Save file-table to disk 64 | fileTable.saveToDisk(fileTablePath); 65 | // Close all RandomAccessLog 66 | for (RandomAccessLog log : logMap.values()) { 67 | log.close(); 68 | } 69 | } 70 | isStarted = false; 71 | } 72 | 73 | public synchronized void set(byte[] key, byte[] value) throws IOException { 74 | if (!isStarted) { 75 | throw new IllegalStateException(NOT_STARTED_ERROR_MESSAGE); 76 | } 77 | 78 | // Append to active log 79 | Segment segment = Segment.fromKeyValuePair(key, value); 80 | FilePointer filePointer = activeLog.append(segment.getBytes()); 81 | fileTable.put(key, filePointer); 82 | 83 | // Check if compaction is needed 84 | if (activeLog.size() > MAX_LOG_SIZE) { 85 | moveToNewActiveLog(); 86 | } 87 | } 88 | 89 | private void moveToNewActiveLog() throws IOException { 90 | // Create a new log 91 | int nextActiveLogId = activeLog == null ? 1 : activeLog.getLogId() + 1; 92 | RandomAccessLog nextActiveLog = new FileChannelRandomAccessLog(folderPath + "/" + nextActiveLogId + ".log"); 93 | // Update logMap 94 | logMap.put(nextActiveLogId, nextActiveLog); 95 | activeLog = nextActiveLog; 96 | } 97 | 98 | public byte[] get(byte[] key) throws IOException { 99 | if (!isStarted) { 100 | throw new IllegalStateException(NOT_STARTED_ERROR_MESSAGE); 101 | } 102 | 103 | // Get file-pointer from file-table 104 | FilePointer filePointer = fileTable.get(key); 105 | if (filePointer == null) { 106 | throw new IllegalArgumentException("Key not found."); 107 | } 108 | 109 | // Read from log 110 | String filename = Paths.get(filePointer.getFileName()).getFileName().toString(); 111 | Integer logId = Integer.parseInt(filename.substring(0, filename.length() - 4)); 112 | RandomAccessLog log = logMap.get(logId); 113 | Segment segment = log.readSegment(filePointer.getOffset()); 114 | return segment.getValue(); 115 | } 116 | 117 | public synchronized void compaction() throws IOException { 118 | if (!isStarted) { 119 | throw new IllegalStateException(NOT_STARTED_ERROR_MESSAGE); 120 | } 121 | 122 | closeAllLogMapsIfOpen(); 123 | 124 | // Iterate over all log files in descending order 125 | List logs = getRandomAccessLogsFromDir(folderPath); 126 | 127 | if (!logs.isEmpty()) { 128 | // Set the last log as active log 129 | activeLog = logs.get(0); 130 | } 131 | 132 | this.fileTable = SerializedPersistableFileTable.fromEmpty(); 133 | 134 | // Create a new log 135 | moveToNewActiveLog(); 136 | // Iterate over all logs 137 | for (RandomAccessLog log : logs) { 138 | // Iterate over all segments in the log 139 | long offset = 0; 140 | 141 | while (offset < log.size()) { 142 | Segment segment = log.readSegment(offset); 143 | offset += segment.getBytes().length; 144 | // Append only if the key is not seen before 145 | if (fileTable.get(segment.getKey()) != null) { 146 | continue; 147 | } 148 | // Append to new log 149 | FilePointer filePointer = activeLog.append(segment.getBytes()); 150 | fileTable.put(segment.getKey(), filePointer); 151 | } 152 | 153 | orphanizeLog(log); 154 | } 155 | 156 | // update logmap 157 | logMap.clear(); 158 | logMap.put(activeLog.getLogId(), activeLog); 159 | // save file-table 160 | fileTable.saveToDisk(fileTablePath); 161 | } 162 | 163 | private void closeAllLogMapsIfOpen() { 164 | for (RandomAccessLog log : logMap.values()) { 165 | try { 166 | log.close(); 167 | } catch (IOException ignored) { 168 | // Ignore 169 | } 170 | } 171 | } 172 | 173 | private void orphanizeLog(RandomAccessLog log) throws IOException { 174 | log.close(); 175 | // rename all stale logs and add underscore before file name 176 | Path oldPath = Paths.get(log.getFilePath()); 177 | Path dir = oldPath.getParent(); 178 | Path newPath = Paths.get(dir.toString(), "_" + oldPath.getFileName().toString()); 179 | Files.move(oldPath, newPath, StandardCopyOption.REPLACE_EXISTING); 180 | } 181 | 182 | private List getRandomAccessLogsFromDir(String dir) throws IOException { 183 | List logs = new ArrayList<>(); 184 | Files.walkFileTree(Paths.get(dir), new SimpleFileVisitor<>() { 185 | @Override 186 | public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { 187 | 188 | String fileName = file.getFileName().toString(); 189 | if (fileName.endsWith(".log")) { 190 | String fileNameWithoutExtension = fileName.substring(0, fileName.length() - 4); 191 | if (isNumeric(fileNameWithoutExtension)) { 192 | // Create a RandomAccessLog for each file 193 | RandomAccessLog log = new FileChannelRandomAccessLog(file.toString()); 194 | // Add it to the logMap 195 | logs.add(log); 196 | } 197 | } 198 | return FileVisitResult.CONTINUE; 199 | } 200 | }); 201 | 202 | 203 | // Sort the logs in descending order 204 | logs.sort((o1, o2) -> { 205 | int o1Id = Integer.parseInt(o1.getFilePath().substring(o1.getFilePath().lastIndexOf("/") + 1, o1.getFilePath().length() - 4)); 206 | int o2Id = Integer.parseInt(o2.getFilePath().substring(o2.getFilePath().lastIndexOf("/") + 1, o2.getFilePath().length() - 4)); 207 | return Integer.compare(o2Id, o1Id); 208 | }); 209 | return logs; 210 | } 211 | } 212 | -------------------------------------------------------------------------------- /src/main/java/com/sahilbondre/firefly/filetable/FilePointer.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly.filetable; 2 | 3 | import java.util.Objects; 4 | 5 | public class FilePointer { 6 | private String fileName; 7 | private long offset; 8 | 9 | public FilePointer(String fileName, long offset) { 10 | this.fileName = fileName; 11 | this.offset = offset; 12 | } 13 | 14 | public FilePointer() { 15 | } 16 | 17 | public String getFileName() { 18 | return fileName; 19 | } 20 | 21 | public void setFileName(String fileName) { 22 | this.fileName = fileName; 23 | } 24 | 25 | public long getOffset() { 26 | return offset; 27 | } 28 | 29 | public void setOffset(long offset) { 30 | this.offset = offset; 31 | } 32 | 33 | @Override 34 | public boolean equals(Object o) { 35 | if (this == o) return true; 36 | if (o == null || getClass() != o.getClass()) return false; 37 | FilePointer that = (FilePointer) o; 38 | return offset == that.offset && fileName.equals(that.fileName); 39 | } 40 | 41 | @Override 42 | public int hashCode() { 43 | return Objects.hash(fileName, offset); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/main/java/com/sahilbondre/firefly/filetable/InvalidFileTableException.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly.filetable; 2 | 3 | public class InvalidFileTableException extends RuntimeException { 4 | public InvalidFileTableException(String message) { 5 | super(message); 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /src/main/java/com/sahilbondre/firefly/filetable/PersistableFileTable.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly.filetable; 2 | 3 | import java.io.FileNotFoundException; 4 | 5 | public interface PersistableFileTable { 6 | void put(byte[] key, FilePointer value); 7 | 8 | FilePointer get(byte[] key); 9 | 10 | void saveToDisk(String filePath) throws FileNotFoundException; 11 | } 12 | -------------------------------------------------------------------------------- /src/main/java/com/sahilbondre/firefly/filetable/SerializedPersistableFileTable.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly.filetable; 2 | 3 | import com.esotericsoftware.kryo.Kryo; 4 | import com.esotericsoftware.kryo.KryoException; 5 | import com.esotericsoftware.kryo.io.Input; 6 | import com.esotericsoftware.kryo.io.Output; 7 | 8 | import java.io.FileInputStream; 9 | import java.io.FileNotFoundException; 10 | import java.io.FileOutputStream; 11 | import java.io.Serializable; 12 | import java.util.HashMap; 13 | import java.util.Map; 14 | 15 | public class SerializedPersistableFileTable implements PersistableFileTable, Serializable { 16 | 17 | private static final Kryo kryo = new Kryo(); 18 | 19 | private final Map table; 20 | 21 | public SerializedPersistableFileTable() { 22 | kryo.register(SerializedPersistableFileTable.class); 23 | kryo.register(HashMap.class); 24 | kryo.register(FilePointer.class); 25 | this.table = new HashMap<>(); 26 | } 27 | 28 | public static SerializedPersistableFileTable fromEmpty() { 29 | return new SerializedPersistableFileTable(); 30 | } 31 | 32 | public static SerializedPersistableFileTable fromFile(String filePath) throws FileNotFoundException, KryoException { 33 | kryo.register(SerializedPersistableFileTable.class); 34 | kryo.register(HashMap.class); 35 | kryo.register(FilePointer.class); 36 | try (Input input = new Input(new FileInputStream(filePath))) { 37 | return kryo.readObject(input, SerializedPersistableFileTable.class); 38 | } catch (KryoException e) { 39 | throw new InvalidFileTableException("Failed to load FileTable from disk: " + e.getMessage()); 40 | } 41 | } 42 | 43 | @Override 44 | public void put(byte[] key, FilePointer value) { 45 | if (key != null && value != null) { 46 | table.put(new String(key), value); 47 | } 48 | } 49 | 50 | @Override 51 | public FilePointer get(byte[] key) { 52 | if (key != null) { 53 | return table.get(new String(key)); 54 | } 55 | return null; 56 | } 57 | 58 | @Override 59 | public void saveToDisk(String filePath) throws FileNotFoundException { 60 | Output output = new Output(new FileOutputStream(filePath)); 61 | kryo.writeObject(output, this); 62 | output.close(); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/main/java/com/sahilbondre/firefly/log/FileChannelRandomAccessLog.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly.log; 2 | 3 | import com.sahilbondre.firefly.filetable.FilePointer; 4 | import com.sahilbondre.firefly.model.Segment; 5 | 6 | import java.io.IOException; 7 | import java.io.RandomAccessFile; 8 | import java.nio.ByteBuffer; 9 | import java.nio.channels.FileChannel; 10 | import java.nio.channels.FileLock; 11 | import java.nio.file.Paths; 12 | 13 | public class FileChannelRandomAccessLog implements RandomAccessLog { 14 | 15 | private final String filePath; 16 | private final RandomAccessFile randomAccessFile; 17 | private final FileChannel fileChannel; 18 | private final FileLock fileLock; 19 | 20 | public FileChannelRandomAccessLog(String filePath) throws IOException { 21 | this.filePath = filePath; 22 | this.randomAccessFile = new RandomAccessFile(filePath, "rw"); 23 | this.fileChannel = randomAccessFile.getChannel(); 24 | this.fileLock = fileChannel.lock(); 25 | } 26 | 27 | @Override 28 | public long size() throws IOException { 29 | return fileChannel.size(); 30 | } 31 | 32 | @Override 33 | public String getFilePath() { 34 | return filePath; 35 | } 36 | 37 | @Override 38 | public FilePointer append(byte[] message) throws IOException { 39 | fileChannel.position(fileChannel.size()); 40 | ByteBuffer buffer = ByteBuffer.wrap(message); 41 | fileChannel.write(buffer); 42 | return new FilePointer(filePath, fileChannel.size() - message.length); 43 | } 44 | 45 | @Override 46 | public byte[] read(long offset, long length) throws IOException, InvalidRangeException { 47 | long fileSize = fileChannel.size(); 48 | 49 | if (offset < 0 || offset >= fileSize || length <= 0 || offset + length > fileSize) { 50 | throw new InvalidRangeException("Invalid offset or length"); 51 | } 52 | 53 | fileChannel.position(offset); 54 | ByteBuffer buffer = ByteBuffer.allocate((int) length); 55 | fileChannel.read(buffer); 56 | return buffer.array(); 57 | } 58 | 59 | @Override 60 | public Segment readSegment(long offset) throws IOException, InvalidRangeException { 61 | long fileSize = fileChannel.size(); 62 | 63 | if (offset < 0 || offset >= fileSize) { 64 | throw new InvalidRangeException("Invalid offset"); 65 | } 66 | 67 | // Read Key Size 68 | byte[] keySizeBytes = new byte[Segment.KEY_SIZE_LENGTH]; 69 | fileChannel.read(ByteBuffer.wrap(keySizeBytes), offset + Segment.KEY_SIZE_LENGTH); 70 | 71 | // Read Value Size 72 | byte[] valueSizeBytes = new byte[Segment.VALUE_SIZE_LENGTH]; 73 | fileChannel.read(ByteBuffer.wrap(valueSizeBytes), 74 | offset + Segment.KEY_SIZE_LENGTH + Segment.VALUE_SIZE_LENGTH); 75 | 76 | // Total Size 77 | int totalSize = Segment.CRC_LENGTH + Segment.KEY_SIZE_LENGTH + 78 | Segment.VALUE_SIZE_LENGTH + byteArrayToInt(keySizeBytes) + byteArrayToInt(valueSizeBytes); 79 | 80 | // Read entire segment 81 | byte[] segmentBytes = new byte[totalSize]; 82 | fileChannel.read(ByteBuffer.wrap(segmentBytes), offset); 83 | 84 | 85 | Segment segment = Segment.fromByteArray(segmentBytes); 86 | 87 | // Validate CRC 88 | if (!segment.isSegmentValid()) { 89 | throw new InvalidRangeException("Segment is invalid"); 90 | } 91 | 92 | return segment; 93 | } 94 | 95 | @Override 96 | public Integer getLogId() { 97 | String fileNameWithoutPath = Paths.get(filePath).getFileName().toString(); 98 | return Integer.parseInt(fileNameWithoutPath.substring(0, fileNameWithoutPath.length() - 4)); 99 | } 100 | 101 | private int byteArrayToInt(byte[] bytes) { 102 | return (bytes[0] << 8) | (bytes[1] & 0xFF); 103 | } 104 | 105 | public void close() throws IOException { 106 | fileLock.release(); 107 | fileChannel.close(); 108 | randomAccessFile.close(); 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /src/main/java/com/sahilbondre/firefly/log/InvalidRangeException.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly.log; 2 | 3 | public class InvalidRangeException extends IllegalArgumentException { 4 | public InvalidRangeException(String message) { 5 | super(message); 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /src/main/java/com/sahilbondre/firefly/log/RandomAccessLog.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly.log; 2 | 3 | import com.sahilbondre.firefly.filetable.FilePointer; 4 | import com.sahilbondre.firefly.model.Segment; 5 | 6 | import java.io.IOException; 7 | 8 | public interface RandomAccessLog { 9 | long size() throws IOException; 10 | 11 | String getFilePath(); 12 | 13 | FilePointer append(byte[] message) throws IOException; 14 | 15 | byte[] read(long offset, long length) throws IOException, InvalidRangeException; 16 | 17 | Segment readSegment(long offset) throws IOException, InvalidRangeException; 18 | 19 | void close() throws IOException; 20 | 21 | Integer getLogId(); 22 | } 23 | -------------------------------------------------------------------------------- /src/main/java/com/sahilbondre/firefly/model/Segment.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly.model; 2 | 3 | public class Segment { 4 | 5 | public static final int CRC_LENGTH = 2; 6 | public static final int KEY_SIZE_LENGTH = 2; 7 | public static final int VALUE_SIZE_LENGTH = 4; 8 | /** 9 | * Class representing a segment of the log file. 10 | *

11 | * Two big decisions here to save on performance: 12 | * 1. We're using byte[] instead of ByteBuffer. 13 | * 2. We're trusting that the byte[] is immutable and hence avoiding copying it. 14 | *

15 | *

16 | * 2 bytes: CRC 17 | * 2 bytes: Key Size 18 | * 4 bytes: Value Size 19 | * n bytes: Key 20 | * m bytes: Value 21 | *

22 | * Note: Value size is four bytes because we're using a 32-bit integer to store the size. 23 | * Int is 32-bit signed, so we can only store 2^31 - 1 bytes in the value. 24 | * Hence, the maximum size of the value is 2,147,483,647 bytes or 2.14 GB. 25 | */ 26 | private final byte[] bytes; 27 | 28 | private Segment(byte[] bytes) { 29 | this.bytes = bytes; 30 | } 31 | 32 | public static Segment fromByteArray(byte[] data) { 33 | return new Segment(data); 34 | } 35 | 36 | public static Segment fromKeyValuePair(byte[] key, byte[] value) { 37 | int keySize = key.length; 38 | int valueSize = value.length; 39 | int totalSize = CRC_LENGTH + KEY_SIZE_LENGTH + VALUE_SIZE_LENGTH + keySize + valueSize; 40 | 41 | byte[] segment = new byte[totalSize]; 42 | 43 | // Set key size 44 | segment[2] = (byte) ((keySize >> 8) & 0xFF); 45 | segment[3] = (byte) (keySize & 0xFF); 46 | 47 | // Set value size 48 | segment[4] = (byte) ((valueSize >> 24) & 0xFF); 49 | segment[5] = (byte) ((valueSize >> 16) & 0xFF); 50 | segment[6] = (byte) ((valueSize >> 8) & 0xFF); 51 | segment[7] = (byte) (valueSize & 0xFF); 52 | 53 | System.arraycopy(key, 0, segment, CRC_LENGTH + KEY_SIZE_LENGTH + VALUE_SIZE_LENGTH, keySize); 54 | 55 | System.arraycopy(value, 0, segment, CRC_LENGTH + KEY_SIZE_LENGTH + VALUE_SIZE_LENGTH + keySize, valueSize); 56 | 57 | byte[] crc = new Segment(segment).crc16(); 58 | segment[0] = crc[0]; 59 | segment[1] = crc[1]; 60 | 61 | return new Segment(segment); 62 | } 63 | 64 | public byte[] getBytes() { 65 | return bytes; 66 | } 67 | 68 | public byte[] getKey() { 69 | int keySize = getKeySize(); 70 | return extractBytes(CRC_LENGTH + KEY_SIZE_LENGTH + VALUE_SIZE_LENGTH, keySize); 71 | } 72 | 73 | public byte[] getValue() { 74 | int keySize = getKeySize(); 75 | int valueSize = getValueSize(); 76 | return extractBytes(CRC_LENGTH + KEY_SIZE_LENGTH + VALUE_SIZE_LENGTH + keySize, valueSize); 77 | } 78 | 79 | public int getKeySize() { 80 | return ((bytes[2] & 0xff) << 8) | (bytes[3] & 0xff); 81 | } 82 | 83 | public int getValueSize() { 84 | return ((bytes[4] & 0xff) << 24) | ((bytes[5] & 0xff) << 16) | 85 | ((bytes[6] & 0xff) << 8) | (bytes[7] & 0xff); 86 | } 87 | 88 | public byte[] getCrc() { 89 | return extractBytes(0, CRC_LENGTH); 90 | } 91 | 92 | public boolean isChecksumValid() { 93 | byte[] crc = crc16(); 94 | return crc[0] == bytes[0] && crc[1] == bytes[1]; 95 | } 96 | 97 | public boolean isSegmentValid() { 98 | return isChecksumValid() && getKeySize() > 0 && getValueSize() >= 0 99 | && bytes.length == CRC_LENGTH + KEY_SIZE_LENGTH + VALUE_SIZE_LENGTH + getKeySize() + getValueSize(); 100 | } 101 | 102 | private byte[] extractBytes(int offset, int length) { 103 | byte[] result = new byte[length]; 104 | System.arraycopy(bytes, offset, result, 0, length); 105 | return result; 106 | } 107 | 108 | private byte[] crc16(byte[] segment) { 109 | int crc = 0xFFFF; // Initial CRC value 110 | int polynomial = 0x1021; // CRC-16 polynomial 111 | 112 | for (int index = CRC_LENGTH; index < segment.length; index++) { 113 | byte b = segment[index]; 114 | crc ^= (b & 0xFF) << 8; 115 | 116 | for (int i = 0; i < 8; i++) { 117 | if ((crc & 0x8000) != 0) { 118 | crc = (crc << 1) ^ polynomial; 119 | } else { 120 | crc <<= 1; 121 | } 122 | } 123 | } 124 | 125 | return new byte[]{(byte) ((crc >> 8) & 0xFF), (byte) (crc & 0xFF)}; 126 | } 127 | 128 | private byte[] crc16() { 129 | return crc16(bytes); 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /src/test/java/com/sahilbondre/firefly/CompactionTest.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly; 2 | 3 | import com.sahilbondre.firefly.log.FileChannelRandomAccessLog; 4 | import com.sahilbondre.firefly.log.RandomAccessLog; 5 | import com.sahilbondre.firefly.model.Segment; 6 | import org.junit.jupiter.api.AfterEach; 7 | import org.junit.jupiter.api.BeforeEach; 8 | import org.junit.jupiter.api.Test; 9 | 10 | import java.io.IOException; 11 | import java.nio.file.Files; 12 | import java.nio.file.Paths; 13 | 14 | import static com.sahilbondre.firefly.TestUtils.deleteFolderContentsIfExists; 15 | import static org.junit.jupiter.api.Assertions.assertEquals; 16 | import static org.junit.jupiter.api.Assertions.assertTrue; 17 | 18 | class CompactionTest { 19 | 20 | private static final String TEST_FOLDER = "src/test/resources/test_folder_compaction"; 21 | private static final String TEST_LOG_FILE_1 = "1.log"; 22 | private static final String TEST_LOG_FILE_2 = "2.log"; 23 | private static final String TEST_LOG_FILE_3 = "3.log"; 24 | 25 | private FireflyDB fireflyDB; 26 | 27 | @BeforeEach 28 | void setUp() throws IOException { 29 | deleteFolderContentsIfExists(TEST_FOLDER); 30 | // Create a test folder and log files 31 | Files.createDirectories(Paths.get(TEST_FOLDER)); 32 | Files.createFile(Paths.get(TEST_FOLDER, TEST_LOG_FILE_1)); 33 | Files.createFile(Paths.get(TEST_FOLDER, TEST_LOG_FILE_2)); 34 | Files.createFile(Paths.get(TEST_FOLDER, TEST_LOG_FILE_3)); 35 | 36 | RandomAccessLog log1 = new FileChannelRandomAccessLog(TEST_FOLDER + "/" + TEST_LOG_FILE_1); 37 | RandomAccessLog log2 = new FileChannelRandomAccessLog(TEST_FOLDER + "/" + TEST_LOG_FILE_2); 38 | RandomAccessLog log3 = new FileChannelRandomAccessLog(TEST_FOLDER + "/" + TEST_LOG_FILE_3); 39 | 40 | log1.append(Segment.fromKeyValuePair("key1".getBytes(), "value1".getBytes()).getBytes()); 41 | log1.append(Segment.fromKeyValuePair("key2".getBytes(), "value2".getBytes()).getBytes()); 42 | log1.append(Segment.fromKeyValuePair("key3".getBytes(), "value3".getBytes()).getBytes()); 43 | 44 | log2.append(Segment.fromKeyValuePair("key4".getBytes(), "value4".getBytes()).getBytes()); 45 | log2.append(Segment.fromKeyValuePair("key1".getBytes(), "value5".getBytes()).getBytes()); 46 | log2.append(Segment.fromKeyValuePair("key2".getBytes(), "value6".getBytes()).getBytes()); 47 | 48 | log3.append(Segment.fromKeyValuePair("key7".getBytes(), "value7".getBytes()).getBytes()); 49 | log3.append(Segment.fromKeyValuePair("key8".getBytes(), "value8".getBytes()).getBytes()); 50 | log3.append(Segment.fromKeyValuePair("key1".getBytes(), "value9".getBytes()).getBytes()); 51 | 52 | log1.close(); 53 | log2.close(); 54 | log3.close(); 55 | 56 | fireflyDB = FireflyDB.getInstance(TEST_FOLDER); 57 | } 58 | 59 | @AfterEach 60 | void tearDown() throws IOException { 61 | fireflyDB.stop(); 62 | deleteFolderContentsIfExists(TEST_FOLDER); 63 | } 64 | 65 | @Test 66 | void givenMultipleLogFiles_whenCompaction_thenAllFilesRenamedCorrectly() throws IOException { 67 | // Given 68 | // A FireflyDB instance with a folder path 69 | fireflyDB.start(); 70 | 71 | // When 72 | // Compaction is triggered 73 | fireflyDB.compaction(); 74 | 75 | // Then 76 | // All log files are processed correctly 77 | assertTrue(Files.exists(Paths.get(TEST_FOLDER, "_1.log"))); 78 | assertTrue(Files.exists(Paths.get(TEST_FOLDER, "_2.log"))); 79 | assertTrue(Files.exists(Paths.get(TEST_FOLDER, "_3.log"))); 80 | assertEquals("value9", new String(fireflyDB.get("key1".getBytes()))); 81 | assertEquals("value6", new String(fireflyDB.get("key2".getBytes()))); 82 | assertEquals("value3", new String(fireflyDB.get("key3".getBytes()))); 83 | assertEquals("value4", new String(fireflyDB.get("key4".getBytes()))); 84 | assertEquals("value7", new String(fireflyDB.get("key7".getBytes()))); 85 | assertEquals("value8", new String(fireflyDB.get("key8".getBytes()))); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/test/java/com/sahilbondre/firefly/FireflyDBStaticTest.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly; 2 | 3 | import org.junit.jupiter.api.Test; 4 | 5 | import java.lang.reflect.Method; 6 | import java.lang.reflect.Modifier; 7 | 8 | import static org.junit.jupiter.api.Assertions.*; 9 | 10 | class FireflyDBStaticTest { 11 | 12 | private static final String FOLDER_A = "/path/to/folderA"; 13 | private static final String FOLDER_B = "/path/to/folderB"; 14 | 15 | @Test 16 | void givenSameFolder_whenGetInstance_thenSameObjectReferenced() { 17 | // Given 18 | // Two instances with the same folder should reference the same object 19 | 20 | // When 21 | FireflyDB dbA1 = FireflyDB.getInstance(FOLDER_A); 22 | FireflyDB dbA2 = FireflyDB.getInstance(FOLDER_A); 23 | 24 | // Then 25 | assertSame(dbA1, dbA2); 26 | assertEquals(FOLDER_A, dbA1.getFolderPath()); 27 | assertEquals(FOLDER_A, dbA1.getFolderPath()); 28 | } 29 | 30 | @Test 31 | void givenDifferentFolders_whenGetInstance_thenDifferentObjectsReferenced() { 32 | // Given 33 | // Two instances with different folders should reference different objects 34 | 35 | // When 36 | FireflyDB dbA = FireflyDB.getInstance(FOLDER_A); 37 | FireflyDB dbB = FireflyDB.getInstance(FOLDER_B); 38 | 39 | // Then 40 | assertNotSame(dbA, dbB); 41 | assertEquals(FOLDER_A, dbA.getFolderPath()); 42 | assertEquals(FOLDER_B, dbB.getFolderPath()); 43 | } 44 | 45 | @Test 46 | void givenGetInstanceMethod_whenCheckSynchronizedModifier_thenTrue() throws NoSuchMethodException { 47 | // Given 48 | Method getInstanceMethod = FireflyDB.class.getDeclaredMethod("getInstance", String.class); 49 | 50 | // When/Then 51 | assertTrue(Modifier.isSynchronized(getInstanceMethod.getModifiers())); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/test/java/com/sahilbondre/firefly/FireflyDBTest.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly; 2 | 3 | import org.junit.jupiter.api.AfterEach; 4 | import org.junit.jupiter.api.BeforeEach; 5 | import org.junit.jupiter.api.Test; 6 | 7 | import java.io.IOException; 8 | import java.nio.file.Files; 9 | import java.nio.file.Paths; 10 | 11 | import static com.sahilbondre.firefly.TestUtils.deleteFolderContentsIfExists; 12 | import static org.junit.jupiter.api.Assertions.*; 13 | 14 | class FireflyDBTest { 15 | 16 | private static final String TEST_FOLDER = "src/test/resources/test_folder_simple"; 17 | private static final String TEST_LOG_FILE_1 = "1.log"; 18 | private static final String TEST_LOG_FILE_2 = "2.log"; 19 | private static final String TEST_LOG_FILE_3 = "3.log"; 20 | 21 | private FireflyDB fireflyDB; 22 | 23 | @BeforeEach 24 | void setUp() throws IOException { 25 | deleteFolderContentsIfExists(TEST_FOLDER); 26 | // Create a test folder and log files 27 | Files.createDirectories(Paths.get(TEST_FOLDER)); 28 | Files.createFile(Paths.get(TEST_FOLDER, TEST_LOG_FILE_1)); 29 | Files.createFile(Paths.get(TEST_FOLDER, TEST_LOG_FILE_2)); 30 | Files.createFile(Paths.get(TEST_FOLDER, TEST_LOG_FILE_3)); 31 | 32 | fireflyDB = FireflyDB.getInstance(TEST_FOLDER); 33 | } 34 | 35 | @AfterEach 36 | void tearDown() throws IOException { 37 | fireflyDB.stop(); 38 | deleteFolderContentsIfExists(TEST_FOLDER); 39 | } 40 | 41 | @Test 42 | void givenFolderPath_whenStarted_thenInstanceCreatedAndMarkedAsStarted() throws IOException { 43 | // Given 44 | // A FireflyDB instance with a folder path 45 | 46 | // When 47 | fireflyDB.start(); 48 | 49 | // Then 50 | assertNotNull(fireflyDB); 51 | assertEquals(TEST_FOLDER, fireflyDB.getFolderPath()); 52 | assertTrue(fireflyDB.isStarted()); 53 | } 54 | 55 | @Test 56 | void givenStartedInstance_whenStop_thenLogsClosed() throws IOException { 57 | // Given 58 | // A started FireflyDB instance 59 | fireflyDB.start(); 60 | assertTrue(fireflyDB.isStarted()); 61 | 62 | // When 63 | fireflyDB.stop(); 64 | 65 | // Then 66 | assertFalse(fireflyDB.isStarted()); 67 | } 68 | 69 | @Test 70 | void givenStartedInstance_whenSetAndGet_thenValuesAreCorrect() throws IOException { 71 | 72 | // Given 73 | fireflyDB.start(); 74 | assertTrue(fireflyDB.isStarted()); 75 | 76 | // Set a value 77 | byte[] key = "testKey".getBytes(); 78 | byte[] value = "testValue".getBytes(); 79 | fireflyDB.set(key, value); 80 | 81 | // Get the value 82 | byte[] retrievedValue = fireflyDB.get(key); 83 | assertArrayEquals(value, retrievedValue); 84 | } 85 | 86 | @Test 87 | void givenUnstartedInstance_whenSet_thenExceptionThrown() { 88 | // Given 89 | byte[] key = "testKey".getBytes(); 90 | byte[] value = "testValue".getBytes(); 91 | 92 | // When/Then 93 | // Attempt to set a value without starting the instance 94 | assertThrows(IllegalStateException.class, () -> fireflyDB.set(key, value)); 95 | } 96 | 97 | @Test 98 | void givenUnstartedInstance_whenGet_thenExceptionThrown() { 99 | // Given 100 | byte[] key = "testKey".getBytes(); 101 | 102 | // When/Then 103 | // Attempt to get a value without starting the instance 104 | assertThrows(IllegalStateException.class, () -> fireflyDB.get(key)); 105 | } 106 | 107 | @Test 108 | void givenNonexistentKey_whenGet_thenExceptionThrown() throws IOException { 109 | // Given 110 | fireflyDB.start(); 111 | assertTrue(fireflyDB.isStarted()); 112 | byte[] key = "nonexistentKey".getBytes(); 113 | 114 | // When/Then 115 | // Attempt to get a nonexistent key 116 | assertThrows(IllegalArgumentException.class, () -> fireflyDB.get(key)); 117 | } 118 | 119 | @Test 120 | void givenStartedInstance_whenSetMultipleTimes_thenValuesAreCorrect() throws IOException { 121 | // Given 122 | fireflyDB.start(); 123 | assertTrue(fireflyDB.isStarted()); 124 | 125 | // Set a value 126 | byte[] key = "testKey".getBytes(); 127 | byte[] value = "testValue".getBytes(); 128 | fireflyDB.set(key, value); 129 | 130 | // Set another value 131 | byte[] key2 = "testKey2".getBytes(); 132 | byte[] value2 = "testValue2".getBytes(); 133 | fireflyDB.set(key2, value2); 134 | 135 | // Get the values 136 | byte[] retrievedValue = fireflyDB.get(key); 137 | byte[] retrievedValue2 = fireflyDB.get(key2); 138 | assertArrayEquals(value, retrievedValue); 139 | assertArrayEquals(value2, retrievedValue2); 140 | } 141 | 142 | @Test 143 | void givenStartedInstance_whenSetSameKeyMultipleTimes_thenValueIsCorrect() throws IOException { 144 | // Given 145 | fireflyDB.start(); 146 | assertTrue(fireflyDB.isStarted()); 147 | 148 | // When 149 | // Set a value 150 | byte[] key = "testKey".getBytes(); 151 | byte[] value = "testValue".getBytes(); 152 | fireflyDB.set(key, value); 153 | 154 | // Set another value 155 | byte[] value2 = "testValue2".getBytes(); 156 | fireflyDB.set(key, value2); 157 | 158 | // Get the values 159 | byte[] retrievedValue = fireflyDB.get(key); 160 | assertArrayEquals(value2, retrievedValue); 161 | } 162 | 163 | @Test 164 | void givenStartedInstance_whenSetAndRestart_thenValueIsCorrect() throws IOException { 165 | // Given 166 | fireflyDB.start(); 167 | assertTrue(fireflyDB.isStarted()); 168 | byte[] key = "testKey".getBytes(); 169 | byte[] value = "testValue".getBytes(); 170 | fireflyDB.set(key, value); 171 | fireflyDB.stop(); 172 | 173 | // When 174 | // Restart the instance 175 | fireflyDB = FireflyDB.getInstance(TEST_FOLDER); 176 | fireflyDB.start(); 177 | 178 | // Get the values 179 | byte[] retrievedValue = fireflyDB.get(key); 180 | assertArrayEquals(value, retrievedValue); 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /src/test/java/com/sahilbondre/firefly/PerformanceTest.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly; 2 | 3 | import org.junit.jupiter.api.AfterEach; 4 | import org.junit.jupiter.api.BeforeEach; 5 | import org.junit.jupiter.api.Test; 6 | 7 | import java.io.IOException; 8 | import java.nio.file.Files; 9 | import java.nio.file.Path; 10 | import java.nio.file.Paths; 11 | import java.util.ArrayList; 12 | import java.util.Arrays; 13 | import java.util.List; 14 | import java.util.Random; 15 | import java.util.logging.Logger; 16 | import java.util.stream.Stream; 17 | 18 | 19 | class PerformanceTest { 20 | 21 | private static final String TEST_FOLDER = "src/test/resources/test_folder"; 22 | private static final int ITERATIONS = 100000; 23 | private static final int KEY_LENGTH = 8; 24 | private static final int VALUE_LENGTH = 100; 25 | 26 | Logger logger = Logger.getLogger(PerformanceTest.class.getName()); 27 | 28 | private FireflyDB fireflyDB; 29 | 30 | @BeforeEach 31 | public void setUp() throws IOException { 32 | // Create a test folder 33 | Files.createDirectories(Paths.get(TEST_FOLDER)); 34 | 35 | fireflyDB = FireflyDB.getInstance(TEST_FOLDER); 36 | } 37 | 38 | @AfterEach 39 | void tearDown() throws IOException { 40 | fireflyDB.stop(); 41 | // Cleanup: Delete the test folder and its contents 42 | try (Stream pathStream = Files.walk(Paths.get(TEST_FOLDER))) { 43 | pathStream 44 | .sorted((path1, path2) -> -path1.compareTo(path2)) 45 | .forEach(path -> { 46 | try { 47 | Files.delete(path); 48 | } catch (IOException e) { 49 | e.printStackTrace(); 50 | } 51 | }); 52 | } catch (IOException e) { 53 | e.printStackTrace(); 54 | } 55 | } 56 | 57 | @Test 58 | void testPerformance() throws IOException { 59 | fireflyDB.start(); 60 | 61 | // Benchmark writes 62 | logger.info("Starting writes..."); 63 | 64 | long[] writeTimes = new long[ITERATIONS]; 65 | 66 | List availableKeys = new ArrayList<>(); 67 | 68 | long startTime = System.nanoTime(); 69 | for (int i = 0; i < ITERATIONS; i++) { 70 | byte[] key = getRandomBytes(KEY_LENGTH); 71 | byte[] value = getRandomBytes(VALUE_LENGTH); 72 | 73 | long writeTime = saveKeyValuePairAndGetTime(key, value); 74 | availableKeys.add(key); 75 | writeTimes[i] = writeTime; 76 | } 77 | long totalTime = (System.nanoTime() - startTime) / 1000; // Convert nanoseconds to microseconds 78 | logger.info("Total time for writes: " + totalTime + " mus"); 79 | 80 | double averageWriteTime = 0; 81 | for (long writeTime : writeTimes) { 82 | averageWriteTime += writeTime; 83 | } 84 | averageWriteTime /= ITERATIONS; 85 | 86 | logger.info("Average write latency: " + averageWriteTime + " mus"); 87 | 88 | // Calculate p90 write latency 89 | // Sort write times 90 | Arrays.sort(writeTimes); 91 | long p90WriteTime = writeTimes[(int) (ITERATIONS * 0.9)]; 92 | logger.info("p90 write latency: " + p90WriteTime + " mus"); 93 | 94 | // Benchmark reads 95 | logger.info("\nStarting reads..."); 96 | 97 | long[] readTimes = new long[ITERATIONS]; 98 | 99 | startTime = System.nanoTime(); 100 | for (int i = 0; i < ITERATIONS; i++) { 101 | // Get a random key from the list of available keys 102 | byte[] key = availableKeys.get(new Random().nextInt(availableKeys.size())); 103 | 104 | long readTime = getKeyValuePairAndGetTime(key); 105 | readTimes[i] = readTime; 106 | } 107 | totalTime = (System.nanoTime() - startTime) / 1000; // Convert nanoseconds to microseconds 108 | logger.info("Total time for reads: " + totalTime + " mus"); 109 | 110 | double averageReadTime = 0; 111 | for (long readTime : readTimes) { 112 | averageReadTime += readTime; 113 | } 114 | averageReadTime /= ITERATIONS; 115 | 116 | logger.info("Average read latency: " + averageReadTime + " mus"); 117 | 118 | // Calculate p90 read latency 119 | // Sort read times 120 | Arrays.sort(readTimes); 121 | long p90ReadTime = readTimes[(int) (ITERATIONS * 0.9)]; 122 | logger.info("p90 read latency: " + p90ReadTime + " mus"); 123 | 124 | 125 | // Benchmark reads and writes 126 | logger.info("\nStarting reads and writes..."); 127 | 128 | 129 | startTime = System.nanoTime(); 130 | for (int i = 0; i < ITERATIONS; i++) { 131 | byte[] writeKey = getRandomBytes(KEY_LENGTH); 132 | byte[] writeValue = getRandomBytes(VALUE_LENGTH); 133 | 134 | long writeTime = saveKeyValuePairAndGetTime(writeKey, writeValue); 135 | 136 | availableKeys.add(writeKey); 137 | 138 | byte[] readKey = availableKeys.get(new Random().nextInt(availableKeys.size())); 139 | 140 | long readTime = getKeyValuePairAndGetTime(readKey); 141 | 142 | writeTimes[i] = writeTime; 143 | readTimes[i] = readTime; 144 | } 145 | totalTime = (System.nanoTime() - startTime) / 1000; // Convert nanoseconds to microseconds 146 | logger.info("Total time for reads and writes: " + totalTime + " mus"); 147 | 148 | averageReadTime = 0; 149 | for (long readTime : readTimes) { 150 | averageReadTime += readTime; 151 | } 152 | averageReadTime /= ITERATIONS; 153 | 154 | averageWriteTime = 0; 155 | for (long writeTime : writeTimes) { 156 | averageWriteTime += writeTime; 157 | } 158 | averageWriteTime /= ITERATIONS; 159 | 160 | logger.info("Average read latency: " + averageReadTime + " mus"); 161 | logger.info("Average write latency: " + averageWriteTime + " mus"); 162 | 163 | // Calculate p90 read latency 164 | // Sort read times 165 | Arrays.sort(readTimes); 166 | 167 | // Calculate p90 write latency 168 | // Sort write times 169 | Arrays.sort(writeTimes); 170 | 171 | p90ReadTime = readTimes[(int) (ITERATIONS * 0.9)]; 172 | logger.info("p90 read latency: " + p90ReadTime + " mus"); 173 | 174 | p90WriteTime = writeTimes[(int) (ITERATIONS * 0.9)]; 175 | logger.info("p90 write latency: " + p90WriteTime + " mus"); 176 | } 177 | 178 | private byte[] getRandomBytes(int length) { 179 | byte[] bytes = new byte[length]; 180 | new Random().nextBytes(bytes); 181 | return bytes; 182 | } 183 | 184 | private long saveKeyValuePairAndGetTime(byte[] key, byte[] value) throws IOException { 185 | long startTime = System.nanoTime(); 186 | fireflyDB.set(key, value); 187 | return (System.nanoTime() - startTime) / 1000; // Convert nanoseconds to microseconds 188 | } 189 | 190 | private long getKeyValuePairAndGetTime(byte[] key) throws IOException { 191 | long startTime = System.nanoTime(); 192 | fireflyDB.get(key); 193 | return (System.nanoTime() - startTime) / 1000; // Convert nanoseconds to microseconds 194 | } 195 | } 196 | -------------------------------------------------------------------------------- /src/test/java/com/sahilbondre/firefly/TestUtils.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly; 2 | 3 | import java.io.IOException; 4 | import java.nio.file.Files; 5 | import java.nio.file.Path; 6 | import java.nio.file.Paths; 7 | import java.util.stream.Stream; 8 | 9 | public class TestUtils { 10 | public static void deleteFolderContentsIfExists(String folderPath) throws IOException { 11 | // check if folder exists 12 | Path path = Paths.get(folderPath); 13 | if (Files.exists(path)) { 14 | // Delete the all the files in the test folder 15 | Stream 16 | files = Files.walk(path); 17 | files 18 | .forEach(p -> { 19 | try { 20 | Files.delete(p); 21 | } catch (IOException ignored) { 22 | } 23 | }); 24 | 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/test/java/com/sahilbondre/firefly/filetable/SerializedPersistableFileTableTest.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly.filetable; 2 | 3 | import org.junit.jupiter.api.AfterEach; 4 | import org.junit.jupiter.api.BeforeEach; 5 | import org.junit.jupiter.api.Test; 6 | 7 | import java.io.FileNotFoundException; 8 | import java.io.IOException; 9 | import java.nio.file.Files; 10 | import java.nio.file.Path; 11 | import java.nio.file.Paths; 12 | import java.util.List; 13 | 14 | import static org.junit.jupiter.api.Assertions.*; 15 | 16 | class SerializedPersistableFileTableTest { 17 | 18 | private static final String TEST_FILE_PATH = "src/test/resources/map"; 19 | private SerializedPersistableFileTable fileTable; 20 | 21 | @BeforeEach 22 | void setUp() throws IOException { 23 | Files.deleteIfExists(Paths.get(TEST_FILE_PATH)); 24 | fileTable = SerializedPersistableFileTable.fromEmpty(); 25 | } 26 | 27 | @AfterEach 28 | void tearDown() throws IOException { 29 | Files.deleteIfExists(Paths.get(TEST_FILE_PATH)); 30 | } 31 | 32 | @Test 33 | void given_KeyValue_When_PuttingAndGet_Then_RetrievedValueMatches() { 34 | // Given 35 | byte[] key = "testKey".getBytes(); 36 | FilePointer expectedValue = new FilePointer("test.txt", 42); 37 | 38 | // When 39 | fileTable.put(key, new FilePointer("test.txt", 42)); 40 | FilePointer retrievedValue = fileTable.get(key); 41 | 42 | // Then 43 | assertEquals(expectedValue, retrievedValue); 44 | } 45 | 46 | @Test 47 | void given_NullKey_When_PuttingAndGet_Then_RetrievedValueIsNull() { 48 | // Given 49 | FilePointer value = new FilePointer("test.txt", 42); 50 | 51 | // When 52 | fileTable.put(null, value); 53 | FilePointer retrievedValue = fileTable.get(null); 54 | 55 | // Then 56 | assertNull(retrievedValue); 57 | } 58 | 59 | @Test 60 | void given_NullValue_When_PuttingAndGet_Then_RetrievedValueIsNull() { 61 | // Given 62 | byte[] key = "testKey".getBytes(); 63 | 64 | // When 65 | fileTable.put(key, null); 66 | FilePointer retrievedValue = fileTable.get(key); 67 | 68 | // Then 69 | assertNull(retrievedValue); 70 | } 71 | 72 | @Test 73 | void given_KeyValue_When_SavingToDiskAndLoadingFromFile_Then_RetrievedValueMatches() throws FileNotFoundException { 74 | // Given 75 | byte[] key = "testKey".getBytes(); 76 | FilePointer value = new FilePointer("test.txt", 42); 77 | 78 | // When 79 | fileTable.put(key, value); 80 | fileTable.saveToDisk(TEST_FILE_PATH); 81 | SerializedPersistableFileTable loadedFileTable = SerializedPersistableFileTable.fromFile(TEST_FILE_PATH); 82 | FilePointer retrievedValue = loadedFileTable.get(key); 83 | 84 | // Then 85 | assertEquals(value, retrievedValue); 86 | } 87 | 88 | @Test 89 | void given_NonexistentFile_When_LoadingFromFile_Then_FileNotFoundExceptionIsThrown() { 90 | // When 91 | // Then 92 | assertThrows(FileNotFoundException.class, 93 | () -> SerializedPersistableFileTable.fromFile(TEST_FILE_PATH)); 94 | } 95 | 96 | @Test 97 | void given_CorruptedFile_When_LoadingFromFile_Then_InvalidFileTableExceptionIsThrown() throws IOException { 98 | // Given 99 | // Create a corrupted file by writing invalid data 100 | Path filePath = Paths.get(TEST_FILE_PATH); 101 | Files.write(filePath, List.of("Invalid Data")); 102 | 103 | // Then 104 | assertThrows(InvalidFileTableException.class, 105 | () -> SerializedPersistableFileTable.fromFile(TEST_FILE_PATH)); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/test/java/com/sahilbondre/firefly/log/FileChannelRandomAccessLogTest.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly.log; 2 | 3 | import com.sahilbondre.firefly.filetable.FilePointer; 4 | import com.sahilbondre.firefly.model.Segment; 5 | import org.junit.jupiter.api.AfterEach; 6 | import org.junit.jupiter.api.BeforeEach; 7 | import org.junit.jupiter.api.Test; 8 | 9 | import java.io.IOException; 10 | import java.nio.channels.ClosedChannelException; 11 | import java.nio.file.Files; 12 | import java.nio.file.Path; 13 | import java.nio.file.Paths; 14 | 15 | import static org.junit.jupiter.api.Assertions.*; 16 | 17 | class FileChannelRandomAccessLogTest { 18 | 19 | private static final String TEST_FILE_NAME = "src/test/resources/test.log"; 20 | private static final Path TEST_FILE_PATH = Paths.get(TEST_FILE_NAME); 21 | private FileChannelRandomAccessLog randomAccessLog; 22 | 23 | @BeforeEach 24 | void setUp() throws IOException { 25 | Files.deleteIfExists(TEST_FILE_PATH); 26 | Files.createFile(TEST_FILE_PATH); 27 | randomAccessLog = new FileChannelRandomAccessLog(TEST_FILE_NAME); 28 | } 29 | 30 | @AfterEach 31 | void tearDown() throws IOException { 32 | try { 33 | randomAccessLog.close(); 34 | } catch (ClosedChannelException e) { 35 | // Ignore 36 | } 37 | Files.deleteIfExists(TEST_FILE_PATH); 38 | } 39 | 40 | @Test 41 | void givenEmptyLog_whenGetSize_thenReturnsZero() throws IOException { 42 | // Given 43 | // An empty log 44 | 45 | // When 46 | long size = randomAccessLog.size(); 47 | 48 | // Then 49 | assertEquals(0, size); 50 | } 51 | 52 | 53 | @Test 54 | void givenLogWithContent_whenGetSize_thenReturnsCorrectSize() throws IOException { 55 | // Given 56 | // A log with content 57 | 58 | // When 59 | randomAccessLog.append("Hello".getBytes()); 60 | randomAccessLog.append("World".getBytes()); 61 | 62 | // Then 63 | assertEquals(10, randomAccessLog.size()); 64 | } 65 | 66 | @Test 67 | void givenLog_whenGetFilePath_thenReturnsCorrectPath() { 68 | // Given 69 | // A log instance 70 | 71 | // When 72 | String filePath = randomAccessLog.getFilePath(); 73 | 74 | // Then 75 | assertEquals(TEST_FILE_NAME, filePath); 76 | } 77 | 78 | @Test 79 | void givenLogWithContent_whenAppend_thenAppendsCorrectly() throws IOException { 80 | // Given 81 | // A log with existing content 82 | 83 | // When 84 | randomAccessLog.append("Hello".getBytes()); 85 | randomAccessLog.append("World".getBytes()); 86 | byte[] result = randomAccessLog.read(0, randomAccessLog.size()); 87 | 88 | // Then 89 | assertArrayEquals("HelloWorld".getBytes(), result); 90 | } 91 | 92 | @Test 93 | void givenLogWithContent_whenReadSubset_thenReturnsSubset() throws IOException, InvalidRangeException { 94 | // Given 95 | // A log with existing content 96 | 97 | // When 98 | randomAccessLog.append("The quick brown fox".getBytes()); 99 | byte[] result = randomAccessLog.read(4, 5); 100 | 101 | // Then 102 | assertArrayEquals("quick".getBytes(), result); 103 | } 104 | 105 | @Test 106 | void givenInvalidRange_whenRead_thenThrowsInvalidRangeException() throws IOException { 107 | // Given 108 | randomAccessLog.append("Hello".getBytes()); 109 | // An invalid range for reading 110 | 111 | // When/Then 112 | assertThrows(InvalidRangeException.class, () -> randomAccessLog.read(0, -1)); 113 | assertThrows(InvalidRangeException.class, () -> randomAccessLog.read(-1, 5)); 114 | assertThrows(InvalidRangeException.class, () -> randomAccessLog.read(15, 10)); 115 | assertThrows(InvalidRangeException.class, () -> randomAccessLog.read(2, 10)); 116 | assertThrows(InvalidRangeException.class, () -> randomAccessLog.read(0, 6)); 117 | } 118 | 119 | @Test 120 | void givenLog_whenClose_thenFileIsNotAccessible() throws IOException { 121 | // Given 122 | // An open log 123 | 124 | // When 125 | randomAccessLog.close(); 126 | 127 | // Then 128 | assertTrue(Files.exists(TEST_FILE_PATH)); 129 | assertThrows(IOException.class, () -> randomAccessLog.append("NewContent".getBytes())); 130 | } 131 | 132 | @Test 133 | void givenLogWithContent_whenReadSegment_thenReturnsCorrectSegment() throws IOException, InvalidRangeException { 134 | // Given 135 | // A log with existing content 136 | Segment firstSegment = Segment.fromKeyValuePair("Hello".getBytes(), "World".getBytes()); 137 | Segment secondSegment = Segment.fromKeyValuePair("Foo".getBytes(), "Bar".getBytes()); 138 | FilePointer firstFilePointer = randomAccessLog.append(firstSegment.getBytes()); 139 | FilePointer secondFilePointer = randomAccessLog.append(secondSegment.getBytes()); 140 | 141 | // When 142 | Segment firstReadSegment = randomAccessLog.readSegment(firstFilePointer.getOffset()); 143 | Segment secondReadSegment = randomAccessLog.readSegment(secondFilePointer.getOffset()); 144 | 145 | // Then 146 | assertArrayEquals(firstSegment.getBytes(), firstReadSegment.getBytes()); 147 | assertArrayEquals(secondSegment.getBytes(), secondReadSegment.getBytes()); 148 | assertEquals("Hello", new String(firstReadSegment.getKey())); 149 | assertEquals("World", new String(firstReadSegment.getValue())); 150 | assertEquals("Foo", new String(secondReadSegment.getKey())); 151 | assertEquals("Bar", new String(secondReadSegment.getValue())); 152 | } 153 | 154 | @Test 155 | void givenLogWithContent_whenReadSegmentWithInvalidOffset_thenThrowsInvalidRangeException() throws IOException { 156 | // Given 157 | // A log with existing content 158 | Segment firstSegment = Segment.fromKeyValuePair("Hello".getBytes(), "World".getBytes()); 159 | Segment secondSegment = Segment.fromKeyValuePair("Foo".getBytes(), "Bar".getBytes()); 160 | randomAccessLog.append(firstSegment.getBytes()); 161 | randomAccessLog.append(secondSegment.getBytes()); 162 | 163 | // When/Then 164 | assertThrows(InvalidRangeException.class, () -> randomAccessLog.readSegment(-1)); 165 | assertThrows(InvalidRangeException.class, () -> randomAccessLog.readSegment(100)); 166 | } 167 | 168 | @Test 169 | void givenEmptyLog_whenReadSegment_thenThrowsInvalidRangeException() { 170 | // Given 171 | // An empty log 172 | 173 | // When/Then 174 | assertThrows(InvalidRangeException.class, () -> randomAccessLog.readSegment(0)); 175 | } 176 | 177 | @Test 178 | void givenLogWithContent_whenAppend_thenReturnsCorrectFilePointer() throws IOException { 179 | // Given 180 | // A log with existing content 181 | 182 | // When 183 | FilePointer fp1 = randomAccessLog.append("Hello".getBytes()); 184 | FilePointer fp2 = randomAccessLog.append("World".getBytes()); 185 | 186 | // Then 187 | assertEquals(TEST_FILE_NAME, fp1.getFileName()); 188 | assertEquals(0, fp1.getOffset()); 189 | assertEquals(TEST_FILE_NAME, fp2.getFileName()); 190 | assertEquals(5, fp2.getOffset()); 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /src/test/java/com/sahilbondre/firefly/model/SegmentTest.java: -------------------------------------------------------------------------------- 1 | package com.sahilbondre.firefly.model; 2 | 3 | import org.junit.jupiter.api.Test; 4 | 5 | import static org.junit.jupiter.api.Assertions.*; 6 | 7 | class SegmentTest { 8 | 9 | @Test 10 | void givenByteArray_whenCreatingSegment_thenAccessorsReturnCorrectValues() { 11 | // Given 12 | byte[] testData = new byte[]{ 13 | (byte) -83, (byte) 64, 14 | 0x00, 0x05, // Key Size 15 | 0x00, 0x00, 0x00, 0x05, // Value Size 16 | 0x48, 0x65, 0x6C, 0x6C, 0x6F, // Key: "Hello" 17 | 0x57, 0x6F, 0x72, 0x6C, 0x64 // Value: "World" 18 | }; 19 | 20 | // When 21 | Segment segment = Segment.fromByteArray(testData); 22 | 23 | // Then 24 | assertArrayEquals(testData, segment.getBytes()); 25 | assertArrayEquals("Hello".getBytes(), segment.getKey()); 26 | assertArrayEquals("World".getBytes(), segment.getValue()); 27 | assertEquals(5, segment.getKeySize()); 28 | assertEquals(5, segment.getValueSize()); 29 | assertEquals(-83, segment.getCrc()[0]); 30 | assertEquals(64, segment.getCrc()[1]); 31 | assertTrue(segment.isSegmentValid()); 32 | assertTrue(segment.isChecksumValid()); 33 | } 34 | 35 | @Test 36 | void givenCorruptedKeySizeSegment_whenCheckingChecksum_thenIsChecksumValidReturnsFalse() { 37 | // Given 38 | byte[] testData = new byte[]{ 39 | (byte) -83, (byte) 64, 40 | 0x01, 0x45, // Key Size (Bit Flipped) 41 | 0x00, 0x00, 0x00, 0x05, // Value Size 42 | 0x48, 0x65, 0x6C, 0x6C, 0x6F, // Key: "Hello" 43 | 0x57, 0x6F, 0x72, 0x6C, 0x64 // Value: "World" 44 | }; 45 | 46 | // When 47 | Segment corruptedSegment = Segment.fromByteArray(testData); 48 | 49 | // Then 50 | assertFalse(corruptedSegment.isChecksumValid()); 51 | assertFalse(corruptedSegment.isSegmentValid()); 52 | } 53 | 54 | @Test 55 | void givenCorruptedValueSizeSegment_whenCheckingChecksum_thenIsChecksumValidReturnsFalse() { 56 | // Given 57 | byte[] testData = new byte[]{ 58 | (byte) -83, (byte) 64, 59 | 0x00, 0x05, // Key Size 60 | 0x00, 0x00, 0x01, 0x05, // Value Size (Bit Flipped) 61 | 0x48, 0x65, 0x6C, 0x6C, 0x6F, // Key: "Hello" 62 | 0x57, 0x6F, 0x72, 0x6C, 0x64 // Value: "World" 63 | }; 64 | 65 | // When 66 | Segment corruptedSegment = Segment.fromByteArray(testData); 67 | 68 | // Then 69 | assertFalse(corruptedSegment.isChecksumValid()); 70 | assertFalse(corruptedSegment.isSegmentValid()); 71 | } 72 | 73 | @Test 74 | void givenCorruptedKeySegment_whenCheckingChecksum_thenIsChecksumValidReturnsFalse() { 75 | // Given 76 | byte[] testData = new byte[]{ 77 | (byte) -83, (byte) 64, 78 | 0x00, 0x05, // Key Size 79 | 0x00, 0x00, 0x00, 0x05, // Value Size 80 | 0x48, 0x65, 0x6C, 0x6C, 0x6E, // Key: "Hello" (Bit Flipped) 81 | 0x57, 0x6F, 0x72, 0x6C, 0x64 // Value: "World" 82 | }; 83 | 84 | // When 85 | Segment corruptedSegment = Segment.fromByteArray(testData); 86 | 87 | // Then 88 | assertFalse(corruptedSegment.isChecksumValid()); 89 | assertFalse(corruptedSegment.isSegmentValid()); 90 | } 91 | 92 | @Test 93 | void givenCorruptedValueSegment_whenCheckingChecksum_thenIsChecksumValidReturnsFalse() { 94 | // Given 95 | byte[] testData = new byte[]{ 96 | (byte) -83, (byte) 64, 97 | 0x00, 0x05, // Key Size 98 | 0x00, 0x00, 0x00, 0x05, // Value Size 99 | 0x48, 0x65, 0x6C, 0x6C, 0x6F, // Key: "Hello" 100 | 0x57, 0x6F, 0x62, 0x6C, 0x65 // Value: "World" (Bit Flipped) 101 | }; 102 | 103 | // When 104 | Segment corruptedSegment = Segment.fromByteArray(testData); 105 | 106 | // Then 107 | assertFalse(corruptedSegment.isChecksumValid()); 108 | assertFalse(corruptedSegment.isSegmentValid()); 109 | } 110 | 111 | @Test 112 | void givenIncorrectValueLengthSegment_whenCheckingSegmentValid_thenIsSegmentValidReturnsFalse() { 113 | // Given 114 | byte[] testData = new byte[]{ 115 | (byte) -43, (byte) -70, 116 | 0x00, 0x05, // Key Size 117 | 0x00, 0x00, 0x00, 0x06, // Value Size (Incorrect) 118 | 0x48, 0x65, 0x6C, 0x6C, 0x6F, // Key: "Hello" 119 | 0x57, 0x6F, 0x72, 0x6C, 0x64 // Value: "World" 120 | }; 121 | 122 | // When 123 | Segment corruptedSegment = Segment.fromByteArray(testData); 124 | 125 | // Then 126 | assertTrue(corruptedSegment.isChecksumValid()); 127 | assertFalse(corruptedSegment.isSegmentValid()); 128 | } 129 | 130 | @Test 131 | void givenKeyValuePair_whenCreatingSegment_thenAccessorsReturnCorrectValues() { 132 | // Given 133 | byte[] key = "Hello".getBytes(); 134 | byte[] value = "World".getBytes(); 135 | byte[] expectedSegment = new byte[]{ 136 | (byte) -83, (byte) 64, 137 | 0x00, 0x05, // Key Size 138 | 0x00, 0x00, 0x00, 0x05, // Value Size 139 | 0x48, 0x65, 0x6C, 0x6C, 0x6F, // Key: "Hello" 140 | 0x57, 0x6F, 0x72, 0x6C, 0x64 // Value: "World" 141 | }; 142 | 143 | // When 144 | Segment segment = Segment.fromKeyValuePair(key, value); 145 | 146 | // Then 147 | assertArrayEquals("Hello".getBytes(), segment.getKey()); 148 | assertArrayEquals("World".getBytes(), segment.getValue()); 149 | assertEquals(5, segment.getKeySize()); 150 | assertEquals(5, segment.getValueSize()); 151 | assertEquals(-83, segment.getCrc()[0]); 152 | assertEquals(64, segment.getCrc()[1]); 153 | assertTrue(segment.isSegmentValid()); 154 | assertTrue(segment.isChecksumValid()); 155 | assertArrayEquals(expectedSegment, segment.getBytes()); 156 | } 157 | 158 | @Test 159 | void givenKeyAndValue_whenCreatingSegment_thenSegmentIsCreatedWithCorrectSizes() { 160 | // Given 161 | byte[] key = "Hello".getBytes(); 162 | byte[] value = "World".getBytes(); 163 | 164 | // When 165 | Segment segment = Segment.fromKeyValuePair(key, value); 166 | 167 | // Then 168 | assertArrayEquals(key, segment.getKey()); 169 | assertArrayEquals(value, segment.getValue()); 170 | assertEquals(key.length, segment.getKeySize()); 171 | assertEquals(value.length, segment.getValueSize()); 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /src/test/resources/.empty: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godcrampy/fireflydb/a3ddffc3d5b2722b870af6b1bf21c35bf50cfa89/src/test/resources/.empty --------------------------------------------------------------------------------