├── .circleci
└── config.yml
├── .codespellrc
├── .gitignore
├── Changelog.md
├── Dockerfile
├── LICENSE.txt
├── README.md
├── build.sbt
├── client
├── .gitignore
├── .python-version
├── Dockerfile
├── copy-all-script
├── copy-some-script
├── fossildb-client
├── interactive
│ ├── client.py
│ ├── client.tcss
│ ├── db_connection.py
│ ├── modal.tcss
│ └── record_explorer.py
├── pyproject.toml
├── update_api.sh
└── uv.lock
├── docker-compose.yaml
├── fossildb
├── icon.svg
├── logo.svg
├── project
├── assembly.sbt
├── build.properties
├── buildinfo.sbt
├── plugins.sbt
└── scalapb.sbt
└── src
├── main
├── protobuf
│ └── fossildbapi.proto
├── resources
│ ├── .gitignore
│ └── logback.xml
└── scala
│ └── com
│ └── scalableminds
│ └── fossildb
│ ├── FossilDB.scala
│ ├── FossilDBGrpcImpl.scala
│ ├── FossilDBServer.scala
│ └── db
│ ├── RocksDBStore.scala
│ ├── StoreManager.scala
│ └── VersionedKeyValueStore.scala
└── test
├── resources
├── .gitignore
└── logback-test.xml
└── scala
├── .gitignore
└── com
└── scalableminds
└── fossildb
├── FossilDBSuite.scala
├── RocksOptionsSuite.scala
└── TestHelpers.scala
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | jobs:
3 | build_and_push:
4 | machine:
5 | image: ubuntu-2004:202111-02
6 | environment:
7 | SBT_VERSION_TAG: master__7830403826
8 | USER_UID: 1000
9 | USER_GID: 1000
10 | TARGET_DIR: target/scala-2.13
11 | steps:
12 | - checkout
13 |
14 | - run:
15 | name: Build server
16 | command: docker-compose run -T -e CI=$CI sbt sbt assembly
17 |
18 | - run:
19 | name: Get FossilDB version
20 | command: docker-compose run -T sbt java -jar $TARGET_DIR/fossildb.jar --version > $TARGET_DIR/version
21 |
22 | - run:
23 | name: Build server docker image
24 | command: |
25 | docker build \
26 | -t scalableminds/fossildb:${CIRCLE_BRANCH}__${CIRCLE_BUILD_NUM} \
27 | -t scalableminds/fossildb:${CIRCLE_BRANCH} \
28 | .
29 | - run:
30 | name: Build client docker image
31 | command: |
32 | docker build \
33 | -f client/Dockerfile \
34 | -t scalableminds/fossildb-client:${CIRCLE_BRANCH}__${CIRCLE_BUILD_NUM} \
35 | -t scalableminds/fossildb-client:${CIRCLE_BRANCH} \
36 | .
37 |
38 | - run:
39 | name: Smoke test
40 | command: |
41 | FOSSILDB_TAG=${CIRCLE_BRANCH}__${CIRCLE_BUILD_NUM} \
42 | docker-compose up -d fossildb
43 | sleep 1
44 | FOSSILDB_TAG=${CIRCLE_BRANCH}__${CIRCLE_BUILD_NUM} \
45 | FOSSILDB_CLIENT_TAG=${CIRCLE_BRANCH}__${CIRCLE_BUILD_NUM} \
46 | docker-compose run fossildb-health-check
47 | docker-compose down
48 |
49 | - run:
50 | name: Push to Dockerhub
51 | command: |
52 | echo $DOCKER_PASS | docker login -u $DOCKER_USER --password-stdin
53 | docker push scalableminds/fossildb:${CIRCLE_BRANCH}__${CIRCLE_BUILD_NUM}
54 | docker push scalableminds/fossildb:${CIRCLE_BRANCH}
55 | docker push scalableminds/fossildb-client:${CIRCLE_BRANCH}__${CIRCLE_BUILD_NUM}
56 | docker push scalableminds/fossildb-client:${CIRCLE_BRANCH}
57 | if [ "${CIRCLE_BRANCH}" == "master" ]; then
58 | VERSION=$(cat $TARGET_DIR/version)
59 | docker tag scalableminds/fossildb:${CIRCLE_BRANCH}__${CIRCLE_BUILD_NUM} scalableminds/fossildb:${VERSION}
60 | docker push scalableminds/fossildb:${VERSION}
61 | docker tag scalableminds/fossildb-client:${CIRCLE_BRANCH}__${CIRCLE_BUILD_NUM} scalableminds/fossildb-client:${VERSION}
62 | docker push scalableminds/fossildb-client:${VERSION}
63 | fi
64 | docker logout
65 |
66 | - run:
67 | name: Show version
68 | command: cat $TARGET_DIR/version
69 |
70 | - run:
71 | name: Release JAR on Github
72 | command: |
73 | if [ "${CIRCLE_BRANCH}" == "master" ]; then
74 | wget https://github.com/cli/cli/releases/download/v2.18.1/gh_2.18.1_linux_amd64.deb
75 | sudo apt install ./gh_2.18.1_linux_amd64.deb
76 | TAG_NAME="$(cat $TARGET_DIR/version | tr -d [:space:])"
77 | gh release create $TAG_NAME -t "$TAG_NAME $(git log -1 --pretty=%B)" -R scalableminds/fossildb --target $(git rev-parse HEAD) -n "Executable JAR of __FossilDB__" $TARGET_DIR/fossildb.jar
78 | fi
79 |
80 | workflows:
81 | version: 2
82 | circleci_build:
83 | jobs:
84 | - build_and_push:
85 | context:
86 | - DockerHub
87 |
--------------------------------------------------------------------------------
/.codespellrc:
--------------------------------------------------------------------------------
1 | [codespell]
2 | # Ref: https://github.com/codespell-project/codespell#using-a-config-file
3 | skip = *.svg,*.sublime-workspace,*.log,.codespellrc,./target
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | data
2 | backup
3 |
4 |
5 | *.class
6 | *.log
7 |
8 | # sbt specific
9 | .cache/
10 | .history/
11 | .lib/
12 | dist/*
13 | target/
14 | lib_managed/
15 | src_managed/
16 | project/boot/
17 | project/plugins/project/
18 | .bsp
19 |
20 | # Scala-IDE specific
21 | .scala_dependencies
22 | .worksheet
23 | .idea
24 |
--------------------------------------------------------------------------------
/Changelog.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## Added
4 | - New API endpoints `DeleteAllByPrefix` and `PutMultipleVersions`. [#47](https://github.com/scalableminds/fossildb/pull/47)
5 | - New API endpoints `GetMultipleKeysByListWithMultipleVersions` and `PutMultipleKeysWithMultipleVersions` for reading and writing multiple keys/versions in one request. [#48](https://github.com/scalableminds/fossildb/pull/48)
6 | - `ListKeys` now supports optional `prefix` field
7 | - New API endpoint `GetMultipleKeysByList`. [#52](https://github.com/scalableminds/fossildb/pull/52)
8 |
9 | ## Breaking Changes
10 |
11 | - The `GetMultipleKeys` call now takes a `startAfterKey` instead of a `key` for pagination. The returned list will only start *after* this key. [#38](https://github.com/scalableminds/fossildb/pull/38)
12 | - Now needs Java 11+
13 |
14 | ## Fixes
15 |
16 | - Fixed a bug where the pagination for `GetMultipleKeys` could lead to an endless loop if some keys are prefixes of others. [#38](https://github.com/scalableminds/fossildb/pull/38)
17 | - Empty entries are now removed in the response of `GetMultipleKeysByListWithMultipleVersions`. Those could previously occur if no versions matched the requested range. [#51](https://github.com/scalableminds/fossildb/pull/51)
18 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM eclipse-temurin:21-jammy
2 |
3 | RUN apt-get update && apt-get install -y --no-install-recommends gosu && rm -rf /var/lib/apt/lists/*
4 | RUN mkdir -p /fossildb
5 | WORKDIR /fossildb
6 |
7 | COPY target/scala-2.13/fossildb.jar .
8 | COPY fossildb .
9 |
10 | RUN groupadd -r fossildb \
11 | && useradd -r -g fossildb fossildb \
12 | && ln -s /fossildb/fossildb /usr/local/bin \
13 | && chmod 777 . \
14 | && chown -R fossildb .
15 |
16 | RUN GRPC_HEALTH_PROBE_VERSION=v0.4.20 && \
17 | wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
18 | chmod +x /bin/grpc_health_probe
19 |
20 | EXPOSE 7155
21 |
22 | HEALTHCHECK \
23 | --interval=2s --timeout=5s --retries=30 \
24 | CMD /bin/grpc_health_probe -addr=:7155
25 |
26 | CMD [ "fossildb" ]
27 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) 2017 scalable minds UG (haftungsbeschränkt) & Co. KG
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining
4 | a copy of this software and associated documentation files (the
5 | "Software"), to deal in the Software without restriction, including
6 | without limitation the rights to use, copy, modify, merge, publish,
7 | distribute, sublicense, and/or sell copies of the Software, and to
8 | permit persons to whom the Software is furnished to do so, subject to
9 | the following conditions:
10 |
11 | The above copyright notice and this permission notice shall be
12 | included in all copies or substantial portions of the Software.
13 |
14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | #
2 |
3 | Versioned Key-Value Store with RocksDB backend and gRPC API
4 |
5 | [](https://circleci.com/gh/scalableminds/fossildb)
6 |
7 | ## Installation & Usage
8 | You can download the [executable jar](https://github.com/scalableminds/fossildb/releases/latest),
9 | ```
10 | java -jar fossildb.jar -c default
11 | ```
12 | or use a [docker image](https://hub.docker.com/r/scalableminds/fossildb/tags) and run
13 | ```
14 | docker run scalableminds/fossildb:master fossildb -c default
15 | ```
16 |
17 | For further options, see `help`:
18 | ```
19 | -p, --port port to listen on. Default: 7155
20 | -d, --dataDir database directory. Default: data
21 | -b, --backupDir backup directory. Default: backup
22 | -c, --columnFamilies ,...
23 | column families of the database (created if there is no db yet)
24 | ```
25 |
26 | ## API
27 | FossilDB can be used via its [gRPC API](src/main/protobuf/fossildbapi.proto).
28 |
--------------------------------------------------------------------------------
/build.sbt:
--------------------------------------------------------------------------------
1 | import sbt._
2 |
3 | name := "fossildb"
4 |
5 | def getVersionFromGit: String = {
6 | def run(cmd: String): String = new java.io.BufferedReader(new java.io.InputStreamReader(java.lang.Runtime.getRuntime.exec(cmd).getInputStream)).readLine()
7 | def getBranch = run("git rev-parse --abbrev-ref HEAD")
8 |
9 | if (sys.env.contains("CI") && getBranch == "master") {
10 | val oldVersion = run("git describe --tags --abbrev=0").split('.').toList.map(_.toInt)
11 | (oldVersion.init :+ (oldVersion.last + 1)).mkString(".")
12 | } else {
13 | "DEV-" + getBranch
14 | }
15 | }
16 |
17 | ThisBuild / scalacOptions ++= Seq(
18 | "-feature",
19 | "-deprecation"
20 | )
21 |
22 | version := getVersionFromGit
23 |
24 | scalaVersion := "2.13.15"
25 |
26 | libraryDependencies ++= Seq(
27 | "ch.qos.logback" % "logback-classic" % "1.5.6",
28 | "com.typesafe.scala-logging" %% "scala-logging" % "3.9.5",
29 | "org.scalatest" % "scalatest_2.13" % "3.2.19" % "test",
30 | "io.grpc" % "grpc-netty" % scalapb.compiler.Version.grpcJavaVersion,
31 | "io.grpc" % "grpc-services" % scalapb.compiler.Version.grpcJavaVersion,
32 | "com.thesamet.scalapb" %% "scalapb-runtime-grpc" % scalapb.compiler.Version.scalapbVersion,
33 | "org.rocksdb" % "rocksdbjni" % "9.4.0",
34 | "com.github.scopt" %% "scopt" % "4.1.0"
35 | )
36 |
37 | Compile / managedSourceDirectories += target.value / "protobuf-generated"
38 |
39 | Compile / PB.targets := Seq(
40 | scalapb.gen() -> (target.value / "protobuf-generated")
41 | )
42 |
43 | Compile / mainClass := Some("com.scalableminds.fossildb.FossilDB")
44 |
45 | assembly / assemblyMergeStrategy := {
46 | case x if x.endsWith("io.netty.versions.properties") => MergeStrategy.first
47 | // compare https://stackoverflow.com/questions/54834125/sbt-assembly-deduplicate-module-info-class
48 | case x if x.endsWith("module-info.class") => MergeStrategy.concat
49 | case x =>
50 | val oldStrategy = (assembly / assemblyMergeStrategy).value
51 | oldStrategy(x)
52 | }
53 |
54 | assembly / assemblyJarName := "fossildb.jar"
55 |
56 |
57 | lazy val buildInfoSettings = Seq(
58 | buildInfoKeys := Seq[BuildInfoKey](version,
59 | "commitHash" -> new java.lang.Object() {
60 | override def toString: String = {
61 | try {
62 | val extracted = new java.io.InputStreamReader(java.lang.Runtime.getRuntime.exec("git rev-parse HEAD").getInputStream)
63 | val str = new java.io.BufferedReader(extracted).readLine()
64 | if (str == null) {
65 | "get git hash failed"
66 | } else str
67 | } catch {
68 | case t: Throwable => "get git hash failed"
69 | }
70 | }
71 | }.toString()
72 | ),
73 | buildInfoPackage := "fossildb",
74 | buildInfoOptions := Seq(BuildInfoOption.BuildTime, BuildInfoOption.ToJson)
75 | )
76 |
77 | lazy val root = (project in file(".")).
78 | enablePlugins(BuildInfoPlugin).
79 | settings(
80 | buildInfoSettings
81 | )
82 |
--------------------------------------------------------------------------------
/client/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | *.pyc
3 |
4 | fossildbapi_pb2_grpc.py
5 | fossildbapi_pb2.py
6 |
7 | env
8 | venv
9 | .venv
10 |
11 | *.bin
--------------------------------------------------------------------------------
/client/.python-version:
--------------------------------------------------------------------------------
1 | 3.10
2 |
--------------------------------------------------------------------------------
/client/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.10
2 |
3 | COPY src/main/protobuf /fossildb/src/main/protobuf
4 | COPY client /fossildb/client
5 |
6 | WORKDIR /fossildb/client
7 |
8 | RUN pip3 install argparse grpcio-tools grpcio-health-checking
9 | RUN ./update_api.sh
10 |
11 | ENTRYPOINT ["/fossildb/client/fossildb-client"]
12 |
--------------------------------------------------------------------------------
/client/copy-all-script:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import argparse
4 | import grpc
5 | import sys
6 |
7 | import fossildbapi_pb2 as proto
8 | import fossildbapi_pb2_grpc as proto_rpc
9 |
10 | MAX_MESSAGE_LENGTH = 1073741824
11 |
12 | def main():
13 | verbose = True
14 |
15 | collections = ['skeletons', 'volumes', 'volumeData', 'skeletonUpdates']
16 |
17 | listKeysBatchSize = 300
18 |
19 | srcPort = 2000
20 | dstPort = 7155
21 |
22 | srcChannel = grpc.insecure_channel('localhost:{}'.format(srcPort), options=[('grpc.max_send_message_length', MAX_MESSAGE_LENGTH), (
23 | 'grpc.max_receive_message_length', MAX_MESSAGE_LENGTH)])
24 | srcStub = proto_rpc.FossilDBStub(srcChannel)
25 |
26 | dstChannel = grpc.insecure_channel('localhost:{}'.format(dstPort), options=[('grpc.max_send_message_length', MAX_MESSAGE_LENGTH), (
27 | 'grpc.max_receive_message_length', MAX_MESSAGE_LENGTH)])
28 | dstStub = proto_rpc.FossilDBStub(dstChannel)
29 |
30 | testHealth(srcStub, 'source fossildb at {}'.format(srcPort))
31 | testHealth(dstStub, 'destination fossildb at {}'.format(dstPort))
32 |
33 | putCount = 0
34 |
35 | for collection in collections:
36 | print('copying collection ' + collection)
37 | lastKey = None
38 | while True:
39 | listKeysReply = srcStub.ListKeys(proto.ListKeysRequest(collection=collection, limit=listKeysBatchSize, startAfterKey=lastKey))
40 | assertSuccess(listKeysReply)
41 | if len(listKeysReply.keys) == 0:
42 | break
43 | if verbose:
44 | print(' copying key batch ', listKeysReply.keys)
45 | for key in listKeysReply.keys:
46 | if verbose:
47 | print(' copying key ', key)
48 | getMultipleVersionsReply = srcStub.GetMultipleVersions(proto.GetMultipleVersionsRequest(collection=collection, key=key))
49 | assertSuccess(getMultipleVersionsReply)
50 | for versionValueTuple in zip(getMultipleVersionsReply.versions, getMultipleVersionsReply.values):
51 | if verbose:
52 | print(' copying version ', versionValueTuple[0])
53 | putReply = dstStub.Put(proto.PutRequest(collection=collection, key=key, version=versionValueTuple[0], value=versionValueTuple[1]))
54 | assertSuccess(putReply)
55 | putCount += 1
56 | if (verbose and putCount % 10 == 0) or putCount % 10000 == 0:
57 | print("total put count:", putCount)
58 |
59 | lastKey = listKeysReply.keys[-1]
60 | print("Done. total put count:", putCount)
61 |
62 | def testHealth(stub, label):
63 | try:
64 | reply = stub.Health(proto.HealthRequest())
65 | assertSuccess(reply)
66 | print('successfully connected to ' + label)
67 | except Exception as e:
68 | print('failed to connect to ' + label + ': ' + str(e))
69 | sys.exit(1)
70 |
71 | def assertSuccess(reply):
72 | if not reply.success:
73 | raise Exception("reply.success failed: " + reply.errorMessage)
74 |
75 | if __name__ == '__main__':
76 | main()
77 |
--------------------------------------------------------------------------------
/client/copy-some-script:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import json
4 | import grpc
5 | import sys
6 |
7 | import fossildbapi_pb2 as proto
8 | import fossildbapi_pb2_grpc as proto_rpc
9 |
10 | MAX_MESSAGE_LENGTH = 1073741824
11 |
12 | def main():
13 | verbose = True
14 |
15 | collectionsByTyp = {
16 | 'skeleton': ['skeletons', 'skeletonUpdates'],
17 | 'volume': ['volumes', 'volumeData']
18 | }
19 |
20 | srcPort = 2000
21 | dstPort = 7155
22 |
23 | tracingReferences = json.load(open('tracingReferences.json'))
24 |
25 | srcChannel = grpc.insecure_channel('localhost:{}'.format(srcPort), options=[('grpc.max_send_message_length', MAX_MESSAGE_LENGTH), (
26 | 'grpc.max_receive_message_length', MAX_MESSAGE_LENGTH)])
27 | srcStub = proto_rpc.FossilDBStub(srcChannel)
28 |
29 | dstChannel = grpc.insecure_channel('localhost:{}'.format(dstPort), options=[('grpc.max_send_message_length', MAX_MESSAGE_LENGTH), (
30 | 'grpc.max_receive_message_length', MAX_MESSAGE_LENGTH)])
31 | dstStub = proto_rpc.FossilDBStub(dstChannel)
32 |
33 | testHealth(srcStub, 'source fossildb at {}'.format(srcPort))
34 | testHealth(dstStub, 'destination fossildb at {}'.format(dstPort))
35 |
36 | putCount = 0
37 |
38 | for tracingReference in tracingReferences:
39 | key = tracingReference['id']
40 | if verbose:
41 | print(' copying key ', key)
42 | for collection in collectionsByTyp[tracingReference['typ']]:
43 | getMultipleVersionsReply = srcStub.GetMultipleVersions(proto.GetMultipleVersionsRequest(collection=collection, key=key))
44 | assertSuccess(getMultipleVersionsReply)
45 | if len(getMultipleVersionsReply.versions) == 0:
46 | print('[warn] no data for', key, 'in', collection)
47 | for versionValueTuple in zip(getMultipleVersionsReply.versions, getMultipleVersionsReply.values):
48 | if verbose:
49 | print(' copying version ', versionValueTuple[0])
50 | putReply = dstStub.Put(proto.PutRequest(collection=collection, key=key, version=versionValueTuple[0], value=versionValueTuple[1]))
51 | assertSuccess(putReply)
52 | putCount += 1
53 | if (verbose and putCount % 10 == 0) or putCount % 10000 == 0:
54 | print("total put count:", putCount)
55 | print("Done. total put count:", putCount)
56 |
57 | def testHealth(stub, label):
58 | try:
59 | reply = stub.Health(proto.HealthRequest())
60 | assertSuccess(reply)
61 | print('successfully connected to ' + label)
62 | except Exception as e:
63 | print('failed to connect to ' + label + ': ' + str(e))
64 | sys.exit(1)
65 |
66 | def assertSuccess(reply):
67 | if not reply.success:
68 | raise Exception("reply.success failed: " + reply.errorMessage)
69 |
70 | if __name__ == '__main__':
71 | main()
72 |
--------------------------------------------------------------------------------
/client/fossildb-client:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import argparse
4 | import grpc
5 | import sys
6 |
7 | import fossildbapi_pb2 as proto
8 | import fossildbapi_pb2_grpc as proto_rpc
9 |
10 | from grpc_health.v1 import health_pb2
11 | from grpc_health.v1 import health_pb2_grpc
12 |
13 |
14 | def parse_args(commands):
15 | parser = argparse.ArgumentParser()
16 | parser.add_argument(
17 | 'address', metavar='address', default='localhost', nargs='?',
18 | help='address of the fossildb server (default: %(default)s)')
19 | parser.add_argument(
20 | 'port', metavar='port', type=int, default=7155, nargs='?',
21 | help='port of the fossildb server (default: %(default)s)')
22 | parser.add_argument(
23 | 'command', metavar='command',
24 | help='command to execute, one of {}'.format(list(commands.keys())))
25 |
26 | args = parser.parse_args()
27 | if args.command not in commands:
28 | print("command {} is not available".format(args.command))
29 | parser.print_help()
30 | exit(20)
31 |
32 | return args
33 |
34 |
35 | def health(channel):
36 | try :
37 | healthStub = health_pb2_grpc.HealthStub(channel)
38 | reply = healthStub.Check(health_pb2.HealthCheckRequest(service=''))
39 | STATUSMAP = health_pb2._HEALTHCHECKRESPONSE_SERVINGSTATUS.values_by_name
40 | SERVING = STATUSMAP["SERVING"].number
41 | if reply.status != SERVING:
42 | raise Exception(reply.status)
43 | except Exception as e:
44 | print('Health check unsuccessful. FossilDB offline?')
45 | print(e)
46 | sys.exit(1)
47 | return reply
48 |
49 |
50 | def main():
51 | commands = {
52 | 'backup': lambda channel:
53 | proto_rpc.FossilDBStub(channel).Backup(proto.BackupRequest()),
54 | 'restore': lambda channel:
55 | proto_rpc.FossilDBStub(channel).RestoreFromBackup(proto.RestoreFromBackupRequest()),
56 | 'health': health
57 | }
58 |
59 | args = parse_args(commands)
60 | full_address = '{}:{}'.format(args.address, args.port)
61 |
62 | print('Connecting to FossilDB at', full_address)
63 | channel = grpc.insecure_channel(full_address)
64 |
65 | reply = commands[args.command](channel)
66 | print(reply)
67 | if hasattr(reply, 'success') and not reply.success:
68 | sys.exit(1)
69 |
70 |
71 | if __name__ == '__main__':
72 | main()
73 |
--------------------------------------------------------------------------------
/client/interactive/client.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import asyncio
3 | import logging
4 | import random
5 |
6 | from db_connection import connect, getMultipleKeys, listKeys, listVersions
7 | from record_explorer import RecordExplorer
8 | from rich.text import Text
9 | from textual import on, work
10 | from textual.app import App, ComposeResult
11 | from textual.binding import Binding
12 | from textual.containers import Horizontal, Vertical
13 | from textual.reactive import reactive
14 | from textual.suggester import SuggestFromList
15 | from textual.widget import Widget
16 | from textual.widgets import (
17 | Button,
18 | DataTable,
19 | Footer,
20 | Header,
21 | Input,
22 | Static,
23 | TabbedContent,
24 | TabPane,
25 | )
26 |
27 | logging.basicConfig(level=logging.DEBUG)
28 |
29 |
30 | class ListKeysWidget(Widget):
31 | def __init__(self, stub, **kwargs):
32 | super().__init__(**kwargs)
33 | self.stub = stub
34 |
35 | def compose(self) -> ComposeResult:
36 | with Horizontal(id="horizontal-list"):
37 | yield DataTable(id="list-keys-table")
38 | info_widget = KeyInfoWidget(id="key-info")
39 | info_widget.key = "No key selected"
40 | info_widget.stub = self.stub
41 | yield info_widget
42 |
43 | @on(DataTable.CellHighlighted)
44 | def on_data_table_row_highlighted(self, event: DataTable.CellHighlighted) -> None:
45 | selected = event.coordinate
46 | key_coordinate = (selected[0], 0)
47 | key = self.query_one(DataTable).get_cell_at(key_coordinate)
48 | # Last row always contains meta information
49 | if key_coordinate[0] != len(self.query_one(DataTable).rows) - 1:
50 | self.query_one(KeyInfoWidget).update_key(key)
51 | self.refresh()
52 | pass
53 |
54 |
55 | class FileNameHint(Widget):
56 | filename = reactive("none")
57 |
58 | def render(self) -> str:
59 | return f"Press the button above to download the value for the selected key to {self.filename}"
60 |
61 |
62 | class KeyInfoWidget(Widget):
63 | key = ""
64 | key_save_filename = ""
65 | versions = []
66 |
67 | def update_key(self, key: str) -> None:
68 | self.key = key
69 |
70 | key_info_label = self.query_one("#key-info-label")
71 |
72 | if key == "":
73 | key_info_label.update("No key selected")
74 | self.query_one("#explore-button").disabled = True
75 | return
76 |
77 | key_info_text = Text("Key: ", style="bold magenta")
78 | key_info_text.append(Text(key, style="bold white"))
79 | self.query_one("#explore-button").disabled = False
80 |
81 | if key == "More keys on the next page...":
82 | return
83 |
84 | try:
85 | self.versions = listVersions(stub, self.collection, key)
86 | self.versions.sort()
87 | num_versions = (
88 | f"{len(self.versions)} versions"
89 | if len(self.versions) > 1
90 | else "1 version"
91 | )
92 | key_info_text.append(Text(f"\n{num_versions}: "))
93 | versions_to_list = self.versions
94 | if len(self.versions) > 500:
95 | versions_to_list = self.versions[-500:]
96 | key_info_text.append(
97 | Text("Showing only last 500 versions. ", style="italic")
98 | )
99 | key_info_text.append(
100 | Text(",".join(map(str, versions_to_list)), style="bold white")
101 | )
102 | self.sanitized_key_name = RecordExplorer.sanitize_filename(
103 | f"{self.collection}_{key}_{self.versions[-1]}"
104 | )
105 | self.app.query_one(RecordBrowser).update_table_version_number(
106 | key, len(self.versions)
107 | )
108 | except Exception as e:
109 | key_info_text.append(Text("\nCould not load versions: " + str(e)))
110 | key_info_label.update(key_info_text)
111 |
112 | def explore_key(self) -> None:
113 | # Add new Explorer tab
114 | tabbed_content = self.app.query_one(TabbedContent)
115 | tab_id = f"record_explorer_tab_{self.sanitized_key_name}"
116 |
117 | record_explorer = RecordExplorer(
118 | stub=self.stub,
119 | key=self.key,
120 | collection=self.collection,
121 | id=f"record-explorer-{self.sanitized_key_name}",
122 | )
123 |
124 | if not tabbed_content.query(f"#{tab_id}"):
125 | tabbed_content.add_pane(
126 | TabPane(
127 | "Record Explorer " + self.sanitized_key_name,
128 | record_explorer,
129 | id=tab_id,
130 | )
131 | )
132 | # Set the active tab
133 | tabbed_content.active = tab_id
134 |
135 | def focus_explorer():
136 | record_explorer.acquire_focus()
137 |
138 | # By default focus is on the tabbed content, so shortcuts of the record explorer can not be used
139 | # Use a timer to wait for the explorer to be fully built
140 | self.set_timer(0.3, focus_explorer)
141 |
142 | def on_button_pressed(self, event: Button.Pressed) -> None:
143 | event.stop()
144 | if event.button.id == "explore-button":
145 | self.explore_key()
146 |
147 | def compose(self) -> ComposeResult:
148 | with Vertical():
149 | yield Static(id="key-info-label")
150 | yield Button(label="Explore record (e)", id="explore-button", disabled=True)
151 |
152 |
153 | class RecordBrowser(Static):
154 |
155 | BINDINGS = [
156 | ("q", "quit", "Quit the client"),
157 | ("r", "refresh", "Refresh the data"),
158 | Binding(
159 | "pagedown",
160 | "show_next",
161 | f"Show next page of keys",
162 | priority=True,
163 | show=True,
164 | ),
165 | Binding(
166 | "j",
167 | "show_next",
168 | f"Show next page of keys",
169 | show=True,
170 | ),
171 | Binding(
172 | "pageup",
173 | "show_prev",
174 | f"Show previous page of keys",
175 | priority=True,
176 | show=True,
177 | ),
178 | Binding(
179 | "k",
180 | "show_prev",
181 | f"Show previous page of keys",
182 | show=True,
183 | ),
184 | Binding("down", "next_key", "Select the next key", priority=True, show=False),
185 | Binding("up", "prev_key", "Select the previous key", priority=True, show=False),
186 | Binding("c", "go_to_collection_selection", "Select collection", show=True),
187 | Binding("e", "explore_key", "Explore the selected key", show=True),
188 | ]
189 |
190 | more_keys_available = False
191 | prefix = ""
192 | collection = "volumeData"
193 |
194 | performance_mode = True
195 |
196 | # found_keys stores all found keys of the current collection / prefix
197 | found_keys = []
198 |
199 | # the offset of the current key list
200 | query_offset = 0
201 |
202 | knownCollections = [
203 | "skeletons",
204 | "skeletonTreeBodies",
205 | "volumes",
206 | "volumeData",
207 | "volumeSegmentIndex",
208 | "editableMappingsInfo",
209 | "editableMappingsAgglomerateToGraph",
210 | "editableMappingsSegmentToAgglomerate",
211 | "annotations",
212 | "annotationUpdates",
213 | ]
214 |
215 | def __init__(
216 | self,
217 | stub,
218 | collection: str,
219 | prefix: str,
220 | key_list_limit: int,
221 | performance_mode: bool,
222 | **kwargs,
223 | ):
224 | super().__init__(**kwargs)
225 | self.stub = stub
226 | self.collection = collection
227 | self.prefix = prefix
228 | self.key_list_limit = key_list_limit
229 | self.performance_mode = performance_mode
230 |
231 | def compose(self) -> ComposeResult:
232 | with Vertical():
233 | yield Input(
234 | placeholder="Select collection:",
235 | id="collection",
236 | value=self.collection,
237 | suggester=SuggestFromList(self.knownCollections),
238 | )
239 | yield Input(
240 | placeholder="Find keys with prefix: (leave empty to list all keys)",
241 | value=self.prefix,
242 | id="prefix",
243 | )
244 | yield ListKeysWidget(id="list-keys", stub=self.stub)
245 |
246 | def reset_local_keys(self) -> None:
247 | self.query_one(KeyInfoWidget).update_key("")
248 | self.query_offset = 0
249 | self.found_keys = []
250 |
251 | @on(Input.Submitted)
252 | def on_input_submitted(self, event: Input.Submitted) -> None:
253 | if event.input.id == "collection":
254 | self.collection = event.input.value
255 | if event.input.id == "prefix":
256 | self.prefix = event.input.value
257 | self.reset_local_keys()
258 | self.refresh_data()
259 |
260 | @work
261 | async def load_version_number(self, key: str, key_index: int) -> None:
262 | table = self.query_one(DataTable)
263 | await asyncio.sleep(
264 | random.random() * 2
265 | ) # Having all updates at once slows down the app
266 | try:
267 | versions = listVersions(self.stub, self.collection, key)
268 | numVersions = len(versions)
269 | table.update_cell_at((key_index, 1), str(numVersions))
270 | except Exception as e:
271 | table.update_cell_at((key_index, 1), "Could not load versions: " + str(e))
272 |
273 | def update_table_version_number(self, key: str, version_number: int) -> None:
274 | table = self.query_one(DataTable)
275 | for row_index, row_key in enumerate(table.rows.keys()):
276 | content = table.get_row(row_key)
277 | if content[0] == key:
278 | table.update_cell_at((row_index, 1), str(version_number))
279 | break
280 |
281 | def fetch_keys(self, query_after_key: str, limit: int) -> list:
282 | if self.prefix != "":
283 | found_keys = getMultipleKeys(
284 | self.stub,
285 | self.collection,
286 | self.prefix,
287 | query_after_key,
288 | limit + 1, # +1 to check if there are more keys
289 | )
290 | else:
291 | found_keys = listKeys(
292 | self.stub, self.collection, query_after_key, limit + 1
293 | )
294 | return found_keys
295 |
296 | def refresh_data(self) -> None:
297 | """Refresh the data in the table."""
298 | table = self.query_one(DataTable)
299 | self.query_one(KeyInfoWidget).collection = self.collection
300 | table.clear(columns=True)
301 | table.add_column("key")
302 | table.add_column("#versions")
303 |
304 | app.sub_title = f"Collection: {self.collection}"
305 |
306 | # Query offset is the index of the key that will be the first key in the new list
307 | if self.query_offset != 0:
308 | query_after_key = self.found_keys[self.query_offset - 1]
309 | else:
310 | query_after_key = ""
311 |
312 | try:
313 | result_keys = self.fetch_keys(query_after_key, self.key_list_limit)
314 | self.more_keys_available = False
315 | if len(result_keys) > self.key_list_limit:
316 | self.more_keys_available = True
317 | result_keys.pop()
318 |
319 | if self.query_offset == 0:
320 | # First query, replace the list
321 | self.found_keys = result_keys
322 | elif self.query_offset < len(self.found_keys):
323 | # Querying keys that we already know, update the list
324 | for i in range(len(result_keys)):
325 | self.found_keys[self.query_offset + i] = result_keys[i]
326 | else:
327 | self.found_keys.extend(result_keys)
328 |
329 | if self.more_keys_available:
330 | if not self.performance_mode:
331 | self.estimate_key_count()
332 |
333 | for i, key in enumerate(result_keys):
334 | label = Text(str(i + self.query_offset), style="#B0FC38 italic")
335 | table.add_row(key, "", label=label)
336 | # Asynchronously fetch the number of versions for each key
337 | if not self.performance_mode:
338 | self.load_version_number(key, i)
339 | if self.more_keys_available:
340 | table.add_row(
341 | f"Found more than {self.key_list_limit} keys, more on the next page...",
342 | "",
343 | label=Text("...", style="#B0FC38 italic"),
344 | )
345 | else:
346 | self.more_keys_row_key = None
347 | table.add_row(
348 | f"Found {len(self.found_keys)} keys",
349 | "",
350 | label=Text("EOF", style="#B0FC38 italic"),
351 | )
352 | table.focus()
353 | except Exception as e:
354 | if "No store for column family" in str(e):
355 | table.add_row("Collection not found: " + self.collection)
356 | else:
357 | table.add_row("Could not load keys: " + str(e))
358 |
359 | @work(exclusive=True)
360 | async def estimate_key_count(self) -> None:
361 | """Estimate the number of keys in the collection."""
362 |
363 | def update_count(count, more_available=False):
364 | if self.more_keys_available:
365 | # This note is only shown if there are more keys available
366 | table = self.query_one(DataTable)
367 | more_keys_coords = (self.key_list_limit, 0)
368 | if more_available:
369 | table.update_cell_at(
370 | more_keys_coords,
371 | f"Found at least {count} keys, more on the next page...",
372 | )
373 | else:
374 | table.update_cell_at(
375 | more_keys_coords,
376 | f"Found {count} keys, more on the next page...",
377 | )
378 |
379 | # For huge collections, don't request more than TOTAL_REQUEST_COUNT times, as to not overload the server
380 | TOTAL_REQUEST_COUNT = 25
381 | KEY_LIST_LIMIT = 100
382 | request_count = 0
383 | count = len(self.found_keys)
384 | last_key = self.found_keys[-1]
385 | while request_count < TOTAL_REQUEST_COUNT:
386 | keys = self.fetch_keys(last_key, KEY_LIST_LIMIT)
387 | count += len(keys)
388 | if len(keys) < KEY_LIST_LIMIT:
389 | break
390 | last_key = keys[-1]
391 | request_count += 1
392 | update_count(count)
393 | # Wait here to not send all requests at once
394 | await asyncio.sleep(0.2)
395 | update_count(count, more_available=request_count == TOTAL_REQUEST_COUNT)
396 |
397 | def _on_mount(self, event):
398 | # Used when the collection is specified using the -c argument
399 | if self.collection != "":
400 | self.refresh_data()
401 | return super()._on_mount(event)
402 |
403 | def action_quit(self) -> None:
404 | """An action to quit the app."""
405 | self.app.exit()
406 |
407 | def action_refresh(self) -> None:
408 | """An action to refresh the data."""
409 | self.refresh_data()
410 |
411 | def action_show_next(self) -> None:
412 | """An action to show the next key_list_limit keys."""
413 | if self.more_keys_available:
414 | self.query_offset += self.key_list_limit
415 | self.refresh_data()
416 |
417 | def action_show_prev(self) -> None:
418 | """An action to show the previous key_list_limit keys."""
419 | if self.query_offset > 0:
420 | self.query_offset -= self.key_list_limit
421 | self.refresh_data()
422 |
423 | def action_next_key(self) -> None:
424 | """An action to select the next key."""
425 | table = self.query_one(DataTable)
426 | current_row = table.cursor_coordinate.row
427 | if current_row < self.key_list_limit - 1:
428 | table.cursor_coordinate = (current_row + 1, table.cursor_coordinate.column)
429 | else:
430 | self.action_show_next()
431 |
432 | def action_prev_key(self) -> None:
433 | """An action to select the previous key."""
434 | table = self.query_one(DataTable)
435 | current_row = table.cursor_coordinate.row
436 | if current_row > 0:
437 | table.cursor_coordinate = (current_row - 1, table.cursor_coordinate.column)
438 | else:
439 | if self.query_offset > 0:
440 | self.action_show_prev()
441 | table.cursor_coordinate = (
442 | len(table.rows) - 2, # -1 for last row, -1 for the More keys row
443 | table.cursor_coordinate.column,
444 | )
445 |
446 | def action_go_to_collection_selection(self) -> None:
447 | """An action to select the collection."""
448 | self.query_one("#collection").focus()
449 |
450 | def action_explore_key(self) -> None:
451 | """An action to explore the selected key."""
452 | self.query_one(KeyInfoWidget).explore_key()
453 |
454 |
455 | class FossilDBClient(App):
456 | """A Textual app to manage FossilDB databases."""
457 |
458 | CSS_PATH = "client.tcss"
459 |
460 | title = "FossilDB Client"
461 |
462 | def __init__(self, stub, collection, prefix, count, performance_mode):
463 | super().__init__()
464 | self.stub = stub
465 | self.collection = collection
466 | self.prefix = prefix
467 | self.key_list_limit = int(count)
468 | self.performance_mode = performance_mode
469 |
470 | def compose(self) -> ComposeResult:
471 | """Create child widgets for the app."""
472 | yield Header()
473 | with TabbedContent(id="main-tabs", initial="main-tab"):
474 | with TabPane(id="main-tab", title="FossilDB Browser"):
475 | yield RecordBrowser(
476 | id="record-browser",
477 | stub=self.stub,
478 | collection=self.collection,
479 | prefix=self.prefix,
480 | key_list_limit=self.key_list_limit,
481 | performance_mode=self.performance_mode,
482 | )
483 | yield Footer()
484 |
485 | def action_set_version(self, version) -> None:
486 | """An action to set the version of the record.
487 | Defined here because we can only access the app from the link.
488 | It would be better to update the version here directly, but the client crashes when doing so.
489 | """
490 | self.query_one(RecordExplorer).set_version_in_selector(version)
491 |
492 |
493 | def init_argument_parser() -> argparse.ArgumentParser:
494 | parser = argparse.ArgumentParser()
495 | parser.add_argument(
496 | "host",
497 | help="fossildb host and ip, e.g. localhost:7155",
498 | default="localhost:7155",
499 | nargs="?",
500 | )
501 | parser.add_argument("-c", "--collection", help="collection to use", default="")
502 | parser.add_argument("-p", "--prefix", help="prefix to search for", default="")
503 | parser.add_argument("-n", "--count", help="number of keys to list", default=40)
504 |
505 | # Performance mode is used to speed up the app by not requesting and updating too much
506 | # On by default, so you can disable it with the flag
507 | parser.add_argument(
508 | "--no-performance-mode",
509 | help="Disable performance mode",
510 | action="store_true",
511 | default=False,
512 | )
513 |
514 | return parser
515 |
516 |
517 | if __name__ == "__main__":
518 | parser = init_argument_parser()
519 | args = parser.parse_args()
520 | stub = connect(args.host)
521 | app = FossilDBClient(
522 | stub, args.collection, args.prefix, args.count, not args.no_performance_mode
523 | )
524 | app.run()
525 |
--------------------------------------------------------------------------------
/client/interactive/client.tcss:
--------------------------------------------------------------------------------
1 | DownloadNotification #list-keys {
2 | layout: horizontal;
3 | color: red;
4 | }
5 |
6 | #key-info {
7 | width: 30%;
8 | margin-left: 1;
9 | }
10 |
11 | #key-info-label {
12 | padding: 2 0
13 | }
14 |
15 | Button {
16 | width: 90%;
17 | }
18 |
19 | /* Record Explorer */
20 |
21 | #record-explorer-display {
22 | width: 70%;
23 | overflow: auto;
24 | }
25 |
26 | #hexdump-table {
27 | width: 70%;
28 | overflow: auto;
29 | }
30 |
31 | #record-explorer-info-panel {
32 | width: 30%;
33 | overflow: auto;
34 | }
35 |
36 | #version-selection {
37 |
38 | }
39 |
40 | #select-button {
41 | margin: 1 0 1 1
42 | }
43 |
44 | #version_list {
45 | }
46 |
47 | .fixed64 {
48 | background: #828C51;
49 | }
50 |
51 | .varint {
52 | background: #002626;
53 | }
54 |
55 | .string {
56 | background: #2E0E02;
57 | }
58 |
59 | .protobuf {
60 | background: #56667A
61 | }
--------------------------------------------------------------------------------
/client/interactive/db_connection.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | import fossildbapi_pb2 as proto
4 | import fossildbapi_pb2_grpc as proto_rpc
5 | import grpc
6 |
7 | MAX_MESSAGE_LENGTH = 1073741824
8 |
9 |
10 | def connect(host: str) -> proto_rpc.FossilDBStub:
11 | channel = grpc.insecure_channel(
12 | host,
13 | options=[
14 | ("grpc.max_send_message_length", MAX_MESSAGE_LENGTH),
15 | ("grpc.max_receive_message_length", MAX_MESSAGE_LENGTH),
16 | ],
17 | )
18 | stub = proto_rpc.FossilDBStub(channel)
19 | testHealth(stub, "destination fossildb at {}".format(host))
20 | return stub
21 |
22 |
23 | def testHealth(stub, label: str) -> None:
24 | try:
25 | reply = stub.Health(proto.HealthRequest())
26 | assertSuccess(reply)
27 | print("successfully connected to " + label)
28 | except Exception as e:
29 | print("failed to connect to " + label + ": " + str(e))
30 | sys.exit(1)
31 |
32 |
33 | def assertSuccess(reply) -> None:
34 | if not reply.success:
35 | raise Exception("reply.success failed: " + reply.errorMessage)
36 |
37 |
38 | def listKeys(
39 | stub: proto_rpc.FossilDBStub, collection: str, startAfterKey: str, limit: int
40 | ):
41 | if startAfterKey == "":
42 | reply = stub.ListKeys(proto.ListKeysRequest(collection=collection, limit=limit))
43 | else:
44 | reply = stub.ListKeys(
45 | proto.ListKeysRequest(
46 | collection=collection, startAfterKey=startAfterKey, limit=limit
47 | )
48 | )
49 | assertSuccess(reply)
50 | return reply.keys
51 |
52 |
53 | def getKey(stub: proto_rpc.FossilDBStub, collection: str, key: str, version: int):
54 | reply = stub.Get(proto.GetRequest(collection=collection, key=key, version=version))
55 | assertSuccess(reply)
56 | return reply.value
57 |
58 |
59 | def getMultipleKeys(
60 | stub: proto_rpc.FossilDBStub,
61 | collection: str,
62 | prefix: str,
63 | startAfterKey: str,
64 | limit: int,
65 | ):
66 | if startAfterKey != "":
67 | reply = stub.GetMultipleKeys(
68 | proto.GetMultipleKeysRequest(
69 | collection=collection,
70 | prefix=prefix,
71 | startAfterKey=startAfterKey,
72 | limit=limit,
73 | )
74 | )
75 | else:
76 | reply = stub.GetMultipleKeys(
77 | proto.GetMultipleKeysRequest(
78 | collection=collection, prefix=prefix, limit=limit
79 | )
80 | )
81 | assertSuccess(reply)
82 | return reply.keys
83 |
84 |
85 | def listVersions(stub: proto_rpc.FossilDBStub, collection: str, key: str):
86 | reply = stub.ListVersions(proto.ListVersionsRequest(collection=collection, key=key))
87 | assertSuccess(reply)
88 | return reply.versions
89 |
90 |
91 | def deleteVersion(
92 | stub: proto_rpc.FossilDBStub, collection: str, key: str, version: int
93 | ) -> None:
94 | reply = stub.Delete(
95 | proto.DeleteRequest(collection=collection, key=key, version=version)
96 | )
97 | assertSuccess(reply)
98 |
99 |
100 | def main() -> None:
101 | stub = connect()
102 | print(stub.ListKeys(proto.ListKeysRequest(collection="volumeData", limit=10)))
103 |
104 |
105 | if __name__ == "__main__":
106 | main()
107 |
--------------------------------------------------------------------------------
/client/interactive/modal.tcss:
--------------------------------------------------------------------------------
1 | DownloadNotification {
2 | align: center middle;
3 | }
4 |
5 | #dialog {
6 | padding: 0 1;
7 | margin: 5;
8 | width: 100;
9 | height: 11;
10 | border: thick $background 80%;
11 | background: $surface;
12 | }
13 |
14 | #question {
15 | column-span: 2;
16 | height: 1fr;
17 | width: 1fr;
18 | content-align: center middle;
19 | }
20 |
21 | #okay {
22 | margin: 3 0 0 0;
23 | }
24 |
25 | Button {
26 | width: 100%;
27 | }
28 |
--------------------------------------------------------------------------------
/client/interactive/record_explorer.py:
--------------------------------------------------------------------------------
1 | import re
2 | from typing import Generator
3 |
4 | from db_connection import deleteVersion, getKey, listVersions
5 | from protobuf_decoder.protobuf_decoder import Parser
6 | from rich.text import Text
7 | from textual import on
8 | from textual.app import ComposeResult
9 | from textual.binding import Binding
10 | from textual.containers import Horizontal, Vertical
11 | from textual.css.query import NoMatches
12 | from textual.reactive import reactive
13 | from textual.screen import Screen
14 | from textual.widgets import (
15 | Button,
16 | Collapsible,
17 | DataTable,
18 | Input,
19 | Label,
20 | Rule,
21 | Static,
22 | TabbedContent,
23 | )
24 |
25 |
26 | class RecordExplorer(Static):
27 | def __init__(self, stub, key: str, collection: str, **kwargs):
28 | super().__init__(**kwargs)
29 | self.stub = stub
30 | self.key = key
31 | self.collection = collection
32 | self.parser = Parser()
33 |
34 | try:
35 | self.versions = listVersions(stub, self.collection, key)
36 | self.versions.sort()
37 | self.selected_version = self.versions[-1]
38 | except Exception as e:
39 | print("Could not load versions: " + str(e))
40 |
41 | BINDINGS = {
42 | Binding("d", "download_data", "Download the selected version", show=True),
43 | Binding("delete", "delete_data", "Delete the selected version", show=True),
44 | Binding("k", "next_version", "Next version", show=True),
45 | Binding("j", "previous_version", "Previous version", show=True),
46 | Binding("x", "close_tab", "Close tab", show=True),
47 | }
48 |
49 | cached_data = {}
50 |
51 | def get_data(self) -> bytes:
52 | if self.selected_version in self.cached_data:
53 | return self.cached_data[self.selected_version]
54 | self.cached_data[self.selected_version] = getKey(
55 | self.stub, self.collection, self.key, self.selected_version
56 | )
57 | return self.cached_data[self.selected_version]
58 |
59 | def sanitize_filename(name: str) -> str:
60 | s = str(name).strip().replace(" ", "_")
61 | return re.sub(r"(?u)[^-\w.]", "_", s)
62 |
63 | def get_filename(self) -> str:
64 | sanitized_key = RecordExplorer.sanitize_filename(
65 | f"{self.collection}_{self.key}_{self.selected_version}"
66 | )
67 | return f"{sanitized_key}.bin"
68 |
69 | def action_download_data(self) -> None:
70 | data = self.get_data()
71 | with open(self.get_filename(), "wb") as f:
72 | f.write(data)
73 | self.app.push_screen(DownloadNotification(filename=self.get_filename()))
74 |
75 | def action_delete_data(self) -> None:
76 | async def delete_callback(result: bool):
77 | if result:
78 | deleteVersion(
79 | self.stub, self.collection, self.key, self.selected_version
80 | )
81 | self.versions = listVersions(self.stub, self.collection, self.key)
82 | self.versions.sort()
83 | if len(self.versions) == 0:
84 | self.app.pop_screen()
85 | else:
86 | self.selected_version = self.versions[-1]
87 | await self.recompose()
88 |
89 | self.app.push_screen(
90 | DeleteModal(
91 | stub=self.stub,
92 | collection=self.collection,
93 | key=self.key,
94 | version=self.selected_version,
95 | ),
96 | delete_callback,
97 | )
98 |
99 | def display_record(self) -> Vertical:
100 | data = self.get_data()
101 | parsed = self.parser.parse(data.hex()).to_dict()
102 | if len(parsed["results"]) == 0 and "remain_data" in parsed:
103 | return self.render_hex_dump(parsed["remain_data"])
104 | return Vertical(*self.render_wire(parsed), id="record-explorer-display")
105 |
106 | def decode_varint(self, value: int) -> list:
107 | def interpret_as_twos_complement(val: int, bits: int) -> int:
108 | """Interprets an unsigned integer as a two's complement signed integer with the given bit width."""
109 | if val >= (1 << (bits - 1)):
110 | return val - (1 << bits)
111 | return val
112 |
113 | def interpret_as_signed_type(val: int) -> int:
114 | """Custom logic to interpret as a signed type (similar to sint in proto)."""
115 | if val % 2 == 0:
116 | return val // 2
117 | else:
118 | return -(val // 2) - 1
119 |
120 | result = []
121 | uint_val = value
122 | result.append({"type": "uint", "value": str(uint_val)})
123 |
124 | for bits in [8, 16, 32, 64]:
125 | int_val = interpret_as_twos_complement(uint_val, bits)
126 | if int_val != uint_val:
127 | result.append({"type": f"int{bits}", "value": str(int_val)})
128 |
129 | signed_int_val = interpret_as_signed_type(uint_val)
130 | if signed_int_val != uint_val:
131 | result.append({"type": "sint", "value": str(signed_int_val)})
132 |
133 | return result
134 |
135 | def render_hex_dump(self, data: str) -> DataTable:
136 | print("render hexdump!")
137 | table = DataTable(id="hexdump-table", zebra_stripes=True)
138 | table.add_column("Bytes")
139 | table.add_column("ASCII")
140 |
141 | hex_str = data.replace(
142 | "\n", " "
143 | ).split() # Convert the data into a list of bytes
144 | addr = 0
145 |
146 | while hex_str:
147 | line_data = hex_str[:16] # Process 16 bytes per line
148 | hex_bytes = " ".join(f"{b}" for b in line_data)
149 | ascii_repr = "".join(
150 | chr(int(b, 16)) if 32 <= int(b, 16) <= 126 else "." for b in line_data
151 | )
152 |
153 | table.add_row(hex_bytes, ascii_repr, label=f"{addr:07x}")
154 | hex_str = hex_str[16:] # Move to the next 16 bytes
155 | addr += 16
156 | return table
157 |
158 | def render_wire(self, spec: dict) -> Generator[Collapsible, None, None]:
159 | for field in spec["results"]:
160 | field_number = field["field"]
161 | field_type = field["wire_type"]
162 | field_data = field["data"]
163 | if field_type == "length_delimited":
164 | yield Collapsible(
165 | *self.render_wire(field_data),
166 | title=f"Field {field_number} (Type protobuf)",
167 | classes="protobuf",
168 | )
169 | elif field_type == "varint":
170 | values = self.decode_varint(field_data)
171 | text = "\n".join([f"({val['type']}) {val['value']}" for val in values])
172 | yield Collapsible(
173 | Static(text),
174 | title=f"Field {field_number} (Type {field_type})",
175 | classes="varint",
176 | )
177 | elif field_type == "fixed64":
178 | double = field_data["value"]
179 | integer = field_data["signed_int"]
180 | yield Collapsible(
181 | Static(f"(double) {double}\n(int) {integer}"),
182 | title=f"Field {field_number} (Type {field_type})",
183 | classes="fixed64",
184 | )
185 | elif field_type == "string":
186 | yield Collapsible(
187 | Label(f"{field_data}"),
188 | title=f"Field {field_number} (Type {field_type})",
189 | classes="string",
190 | )
191 | else:
192 | yield Collapsible(
193 | Label(f"{field_data}"),
194 | title=f"Field {field_number} (Type {field_type})",
195 | )
196 |
197 | def render_version_list(self) -> Static:
198 | versions = list(self.versions)
199 | if len(versions) >= 500:
200 | # Do not render links if there are too many versions (slows everything down)
201 | return Static(
202 | "Available versions: " + ", ".join(f"{v}" for v in versions),
203 | id="version_list",
204 | )
205 | return Static(
206 | "Available versions: "
207 | + ", ".join(f"[@click=app.set_version({v})]{v}[/]" for v in versions),
208 | id="version_list",
209 | )
210 |
211 | def render_info_panel(self) -> Vertical:
212 | info_text = Text("Exploring record/wire for ")
213 | info_text.append(self.collection, style="bold magenta")
214 | info_text.append(":")
215 | info_text.append(self.key, style="bold magenta")
216 | info_text.append(".\nCurrently viewing version ")
217 | info_text.append(str(self.selected_version), style="bold blue")
218 |
219 | return Vertical(
220 | Static(info_text),
221 | Rule(),
222 | Button("Download selected version", id="download-button"),
223 | Button("Delete selected version", id="delete-button"),
224 | Rule(),
225 | Label("Select a version to view"),
226 | Input(value=str(self.selected_version), id="version-selection"),
227 | Button("Select", id="select-button"),
228 | self.render_version_list(),
229 | id="record-explorer-info-panel",
230 | )
231 |
232 | def compose(self) -> ComposeResult:
233 | with Horizontal():
234 | yield self.display_record()
235 | yield self.render_info_panel()
236 |
237 | @on(Input.Submitted)
238 | async def on_input_submitted(self, event: Input.Submitted) -> None:
239 | if event.input.id == "version-selection":
240 | try:
241 | version = int(event.input.value)
242 | await self.set_version(version)
243 | except ValueError:
244 | pass
245 |
246 | async def set_version(self, version: int) -> None:
247 | if version in self.versions:
248 | self.selected_version = version
249 | await self.recompose()
250 |
251 | def set_version_in_selector(self, version: int) -> None:
252 | """Called from app, triggered via link on version number."""
253 | self.query_one(Input).value = str(version)
254 |
255 | async def on_button_pressed(self, event) -> None:
256 | if event.button.id.startswith("version_"):
257 | version = int(event.button.id.split("_")[1])
258 | self.selected_version = version
259 | await self.recompose()
260 | if event.button.id == "download-button":
261 | self.action_download_data()
262 | if event.button.id == "delete-button":
263 | self.action_delete_data()
264 | if event.button.id == "select-button":
265 | try:
266 | version = int(self.query_one(Input).value)
267 | await self.set_version(version)
268 | except ValueError:
269 | pass
270 |
271 | async def action_previous_version(self) -> None:
272 | current_index = list(self.versions).index(self.selected_version)
273 | if current_index == 0:
274 | return
275 | await self.set_version(self.versions[current_index - 1])
276 |
277 | async def action_next_version(self) -> None:
278 | current_index = list(self.versions).index(self.selected_version)
279 | if current_index == len(self.versions) - 1:
280 | return
281 | await self.set_version(self.versions[current_index + 1])
282 |
283 | def action_close_tab(self) -> None:
284 | tabbed_content = self.app.query_one(TabbedContent)
285 | tabbed_content.active = "main-tab"
286 | tabbed_content.remove_pane(self.parent.id)
287 |
288 | def acquire_focus(self) -> None:
289 | try:
290 | table = self.query_one(DataTable)
291 | table.focus()
292 | except NoMatches:
293 | collapsible = self.query_one(Collapsible)
294 | collapsible.focus()
295 |
296 |
297 | class DownloadNotification(Screen):
298 | """Screen with a note on where the downloaded file is stored."""
299 |
300 | CSS_PATH = "modal.tcss"
301 |
302 | def __init__(self, name=None, id=None, classes=None, filename=None, **kwargs):
303 | super().__init__(name, id, classes)
304 | self.filename = filename
305 |
306 | def compose(self) -> ComposeResult:
307 | yield Vertical(
308 | Static(f"The version has been stored in {self.filename}", id="note"),
309 | Button("Okay", variant="primary", id="okay"),
310 | id="dialog",
311 | )
312 |
313 | def on_button_pressed(self, event: Button.Pressed) -> None:
314 | if event.button.id == "okay":
315 | self.app.pop_screen()
316 |
317 |
318 | class DeleteModal(Screen):
319 | """Screen with a note on where the downloaded file is stored."""
320 |
321 | CSS_PATH = "modal.tcss"
322 |
323 | def __init__(
324 | self,
325 | name=None,
326 | id=None,
327 | classes=None,
328 | collection=None,
329 | key=None,
330 | version=None,
331 | **kwargs,
332 | ):
333 | super().__init__(name, id, classes)
334 | self.collection = collection
335 | self.key = key
336 | self.version = version
337 |
338 | def compose(self) -> ComposeResult:
339 | yield Vertical(
340 | Label(
341 | f"Are you sure you want to delete version {self.version} of {self.collection}:{self.key}?",
342 | id="note",
343 | ),
344 | Button("Yes", variant="error", id="yes"),
345 | Button("No", variant="primary", id="no"),
346 | id="dialog",
347 | )
348 |
349 | def on_button_pressed(self, event: Button.Pressed) -> None:
350 | if event.button.id == "yes":
351 | self.dismiss(True)
352 | if event.button.id == "no":
353 | self.dismiss(False)
354 |
--------------------------------------------------------------------------------
/client/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "client"
3 | version = "0.1.0"
4 | description = "FossilDB interactive client"
5 | readme = "README.md"
6 | requires-python = ">=3.10"
7 | dependencies = [
8 | "argparse>=1.4.0",
9 | "grpcio-health-checking>=1.70.0",
10 | "grpcio-tools>=1.70.0",
11 | "protobuf-decoder>=0.4.0",
12 | "rich>=13.9.4",
13 | "textual>=2.1.2",
14 | "textual-dev>=1.7.0",
15 | ]
16 |
17 | [dependency-groups]
18 | dev = [
19 | "black>=25.1.0",
20 | "isort>=6.0.1",
21 | ]
22 |
--------------------------------------------------------------------------------
/client/update_api.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -Eeuo pipefail
3 |
4 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
5 |
6 | python3 -m grpc_tools.protoc -I$SCRIPTPATH/../src/main/protobuf --python_out=$SCRIPTPATH --grpc_python_out=$SCRIPTPATH $SCRIPTPATH/../src/main/protobuf/fossildbapi.proto
7 | cp $SCRIPTPATH/fossildbapi_pb2* $SCRIPTPATH/interactive/
8 |
--------------------------------------------------------------------------------
/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '2.1'
2 |
3 | services:
4 | fossildb:
5 | image: scalableminds/fossildb:${FOSSILDB_TAG:-master}
6 | entrypoint: fossildb
7 | command:
8 | - -c
9 | - default
10 | ports:
11 | - "7155:7155"
12 | volumes:
13 | - "./data:/fossildb/data"
14 | - "./backup:/fossildb/backup"
15 |
16 | fossildb-health-check:
17 | image: scalableminds/fossildb-client:${FOSSILDB_CLIENT_TAG:-master}
18 | command:
19 | - fossildb
20 | - health
21 | links:
22 | - fossildb
23 |
24 | client:
25 | image: scalableminds/fossildb-client:${FOSSILDB_CLIENT_TAG:-master}
26 | volumes:
27 | - ".:/app"
28 | working_dir: /app
29 | entrypoint: /bin/bash
30 | network_mode: host
31 |
32 | sbt:
33 | image: scalableminds/sbt:${SBT_VERSION_TAG:-master__7830403826}
34 | environment:
35 | - USER_UID
36 | - USER_GID
37 | - TZ
38 | working_dir: /fossildb
39 | user: ${USER_UID:-1000}:${USER_GID:-1000}
40 | volumes:
41 | - ".:/fossildb"
42 |
--------------------------------------------------------------------------------
/fossildb:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -Eeuo pipefail
3 |
4 | FOSSILDB_HOME=$(dirname $(readlink -f $0))
5 |
6 | if [ "$(id -u)" = '0' ]; then
7 | for SUBDIR in data backup; do
8 | DIR=$FOSSILDB_HOME/$SUBDIR
9 | if [ -d "$DIR" ]; then
10 | chown -R fossildb $DIR
11 | fi
12 | done
13 | exec gosu fossildb java -jar $FOSSILDB_HOME/fossildb.jar $@
14 | else
15 | exec java -jar $FOSSILDB_HOME/fossildb.jar $@
16 | fi
17 |
--------------------------------------------------------------------------------
/project/assembly.sbt:
--------------------------------------------------------------------------------
1 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.6")
2 |
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version = 1.9.6
2 |
--------------------------------------------------------------------------------
/project/buildinfo.sbt:
--------------------------------------------------------------------------------
1 | addSbtPlugin("com.eed3si9n" % "sbt-buildinfo" % "0.9.0")
2 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | logLevel := Level.Warn
2 |
3 |
--------------------------------------------------------------------------------
/project/scalapb.sbt:
--------------------------------------------------------------------------------
1 | addSbtPlugin("com.thesamet" % "sbt-protoc" % "1.0.2")
2 |
3 | libraryDependencies += "com.thesamet.scalapb" %% "compilerplugin" % "0.11.15"
4 |
--------------------------------------------------------------------------------
/src/main/protobuf/fossildbapi.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package com.scalableminds.fossildb.proto;
4 |
5 | message VersionedKeyValuePairProto {
6 | required string key = 1;
7 | required uint64 version = 2;
8 | required bytes value = 3;
9 | }
10 |
11 | message KeyVersionsValuesPairProto {
12 | required string key = 1;
13 | repeated VersionValuePairProto versionValuePairs = 2;
14 | }
15 |
16 | message VersionValuePairProto {
17 | required uint64 actualVersion = 1;
18 | required bytes value = 2;
19 | }
20 |
21 | message VersionValueBoxProto {
22 | optional VersionValuePairProto versionValuePair = 1;
23 | optional string errorMessage = 2;
24 | }
25 |
26 | message HealthRequest {}
27 | message HealthReply {
28 | required bool success = 1;
29 | optional string errorMessage = 2;
30 | }
31 |
32 | message GetRequest {
33 | required string collection = 1;
34 | required string key = 2;
35 | optional uint64 version = 3;
36 | optional bool mayBeEmpty = 4;
37 | }
38 |
39 | message GetReply {
40 | required bool success = 1;
41 | optional string errorMessage = 2;
42 | required bytes value = 3;
43 | required uint64 actualVersion = 4;
44 | }
45 |
46 | message PutRequest {
47 | required string collection = 1;
48 | required string key = 2;
49 | optional uint64 version = 3;
50 | required bytes value = 4;
51 | }
52 |
53 | message PutReply {
54 | required bool success = 1;
55 | optional string errorMessage = 2;
56 | }
57 |
58 | message PutMultipleVersionsRequest {
59 | required string collection = 1;
60 | required string key = 2;
61 | repeated uint64 versions = 3;
62 | repeated bytes values = 4;
63 | }
64 |
65 | message PutMultipleVersionsReply {
66 | required bool success = 1;
67 | optional string errorMessage = 2;
68 | }
69 |
70 | message PutMultipleKeysWithMultipleVersionsRequest {
71 | required string collection = 1;
72 | repeated VersionedKeyValuePairProto versionedKeyValuePairs = 2;
73 | }
74 |
75 | message PutMultipleKeysWithMultipleVersionsReply {
76 | required bool success = 1;
77 | optional string errorMessage = 2;
78 | }
79 |
80 | message DeleteRequest {
81 | required string collection = 1;
82 | required string key = 2;
83 | required uint64 version = 3;
84 | }
85 |
86 | message DeleteReply {
87 | required bool success = 1;
88 | optional string errorMessage = 2;
89 | }
90 |
91 | message DeleteAllByPrefixRequest {
92 | required string collection = 1;
93 | required string prefix = 2;
94 | }
95 |
96 | message DeleteAllByPrefixReply {
97 | required bool success = 1;
98 | optional string errorMessage = 2;
99 | }
100 |
101 | message GetMultipleVersionsRequest {
102 | required string collection = 1;
103 | required string key = 2;
104 | optional uint64 newestVersion = 4;
105 | optional uint64 oldestVersion = 3;
106 | }
107 |
108 | message GetMultipleVersionsReply {
109 | required bool success = 1;
110 | optional string errorMessage = 2;
111 | repeated bytes values = 3;
112 | repeated uint64 versions = 4;
113 | }
114 |
115 | message GetMultipleKeysRequest {
116 | required string collection = 1;
117 | optional string startAfterKey = 2;
118 | optional string prefix = 3;
119 | optional uint64 version = 4;
120 | optional uint32 limit = 5;
121 | }
122 |
123 | message GetMultipleKeysReply {
124 | required bool success = 1;
125 | optional string errorMessage = 2;
126 | repeated string keys = 3;
127 | repeated bytes values = 4;
128 | repeated uint64 actualVersions = 5;
129 | }
130 |
131 | message GetMultipleKeysByListRequest {
132 | required string collection = 1;
133 | repeated string keys = 2;
134 | optional uint64 version = 3; // Applied to all requested keys
135 | }
136 |
137 | message GetMultipleKeysByListReply {
138 | required bool success = 1;
139 | optional string errorMessage = 2;
140 | repeated VersionValueBoxProto versionValueBoxes = 3;
141 | }
142 |
143 | message GetMultipleKeysByListWithMultipleVersionsRequest {
144 | required string collection = 1;
145 | repeated string keys = 2;
146 | optional uint64 newestVersion = 3; // Applied to all requested keys
147 | optional uint64 oldestVersion = 4; // Applied to all requested keys
148 | }
149 |
150 | message GetMultipleKeysByListWithMultipleVersionsReply {
151 | required bool success = 1;
152 | optional string errorMessage = 2;
153 | repeated KeyVersionsValuesPairProto keyVersionsValuesPairs = 3;
154 | }
155 |
156 | message DeleteMultipleVersionsRequest {
157 | required string collection = 1;
158 | required string key = 2;
159 | optional uint64 newestVersion = 4;
160 | optional uint64 oldestVersion = 3;
161 | }
162 |
163 | message DeleteMultipleVersionsReply {
164 | required bool success = 1;
165 | optional string errorMessage = 2;
166 | }
167 |
168 | message ListKeysRequest {
169 | required string collection = 1;
170 | optional uint32 limit = 2;
171 | optional string startAfterKey = 3;
172 | optional string prefix = 4;
173 | }
174 |
175 | message ListKeysReply {
176 | required bool success = 1;
177 | optional string errorMessage = 2;
178 | repeated string keys = 3;
179 | }
180 |
181 | message ListVersionsRequest {
182 | required string collection = 1;
183 | required string key = 2;
184 | optional uint32 limit = 3;
185 | optional uint32 offset = 4;
186 | }
187 |
188 | message ListVersionsReply {
189 | required bool success = 1;
190 | optional string errorMessage = 2;
191 | repeated uint64 versions = 3;
192 | }
193 |
194 |
195 | message BackupRequest {}
196 |
197 | message BackupReply {
198 | required bool success = 1;
199 | optional string errorMessage = 2;
200 | required uint32 id = 3;
201 | required uint64 timestamp = 4;
202 | required uint64 size = 5;
203 | }
204 |
205 | message RestoreFromBackupRequest {}
206 |
207 | message RestoreFromBackupReply {
208 | required bool success = 1;
209 | optional string errorMessage = 2;
210 | }
211 |
212 | message CompactAllDataRequest {}
213 |
214 | message CompactAllDataReply {
215 | required bool success = 1;
216 | optional string errorMessage = 2;
217 | }
218 |
219 | message ExportDBRequest {
220 | required string newDataDir = 1;
221 | optional string optionsFile = 2;
222 | }
223 |
224 | message ExportDBReply {
225 | required bool success = 1;
226 | optional string errorMessage = 2;
227 | }
228 |
229 |
230 | service FossilDB {
231 | rpc Health (HealthRequest) returns (HealthReply) {}
232 | rpc Get (GetRequest) returns (GetReply) {}
233 | rpc GetMultipleVersions (GetMultipleVersionsRequest) returns (GetMultipleVersionsReply) {}
234 | rpc GetMultipleKeys (GetMultipleKeysRequest) returns (GetMultipleKeysReply) {}
235 | rpc GetMultipleKeysByList (GetMultipleKeysByListRequest) returns (GetMultipleKeysByListReply) {}
236 | rpc GetMultipleKeysByListWithMultipleVersions (GetMultipleKeysByListWithMultipleVersionsRequest) returns (GetMultipleKeysByListWithMultipleVersionsReply) {}
237 | rpc Put (PutRequest) returns (PutReply) {}
238 | rpc PutMultipleVersions (PutMultipleVersionsRequest) returns (PutMultipleVersionsReply) {}
239 | rpc PutMultipleKeysWithMultipleVersions (PutMultipleKeysWithMultipleVersionsRequest) returns (PutMultipleKeysWithMultipleVersionsReply) {}
240 | rpc Delete (DeleteRequest) returns (DeleteReply) {}
241 | rpc DeleteMultipleVersions (DeleteMultipleVersionsRequest) returns (DeleteMultipleVersionsReply) {}
242 | rpc DeleteAllByPrefix (DeleteAllByPrefixRequest) returns (DeleteAllByPrefixReply) {}
243 | rpc ListKeys (ListKeysRequest) returns (ListKeysReply) {}
244 | rpc ListVersions (ListVersionsRequest) returns (ListVersionsReply) {}
245 | rpc Backup (BackupRequest) returns (BackupReply) {}
246 | rpc RestoreFromBackup (RestoreFromBackupRequest) returns (RestoreFromBackupReply) {}
247 | rpc CompactAllData (CompactAllDataRequest) returns (CompactAllDataReply) {}
248 | rpc ExportDB (ExportDBRequest) returns (ExportDBReply) {}
249 | }
250 |
--------------------------------------------------------------------------------
/src/main/resources/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/scalableminds/fossildb/8b57bfe1b752dfbf4978752d3b3fb74a44c4dcd2/src/main/resources/.gitignore
--------------------------------------------------------------------------------
/src/main/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/src/main/scala/com/scalableminds/fossildb/FossilDB.scala:
--------------------------------------------------------------------------------
1 | package com.scalableminds.fossildb
2 |
3 | import java.nio.file.Paths
4 |
5 | import com.scalableminds.fossildb.db.StoreManager
6 | import com.typesafe.scalalogging.LazyLogging
7 | import fossildb.BuildInfo
8 |
9 | import scala.concurrent.ExecutionContext
10 |
11 | object ConfigDefaults {val port: Int = 7155; val dataDir: String = "data"; val backupDir: String = "backup"; val columnFamilies: List[String] = List(); val rocksOptionsFile: Option[String] = None}
12 | case class Config(port: Int = ConfigDefaults.port, dataDir: String = ConfigDefaults.dataDir,
13 | backupDir: String = ConfigDefaults.backupDir, columnFamilies: List[String] = ConfigDefaults.columnFamilies,
14 | rocksOptionsFile: Option[String] = ConfigDefaults.rocksOptionsFile)
15 |
16 | object FossilDB extends LazyLogging {
17 | def main(args: Array[String]): Unit = {
18 |
19 | if (args.contains("--version"))
20 | println(BuildInfo.version)
21 | else {
22 | parseArguments(args) match {
23 | case Some(config) =>
24 | logger.info("Starting FossilDB")
25 | logger.info("BuildInfo: (" + BuildInfo + ")")
26 | logger.info("Config: " + config)
27 |
28 | val storeManager = new StoreManager(Paths.get(config.dataDir), Paths.get(config.backupDir), config.columnFamilies, config.rocksOptionsFile)
29 |
30 | val server = new FossilDBServer(storeManager, config.port, ExecutionContext.global)
31 |
32 | server.start()
33 | server.blockUntilShutdown()
34 | case None => ()
35 | }
36 | }
37 | }
38 |
39 | private def parseArguments(args: Array[String]) = {
40 | val parser = new scopt.OptionParser[Config]("fossildb") {
41 |
42 | opt[Int]('p', "port").valueName("").action( (x, c) =>
43 | c.copy(port = x) ).text("port to listen on. Default: " + ConfigDefaults.port)
44 |
45 | opt[String]('d', "dataDir").valueName("").action( (x, c) =>
46 | c.copy(dataDir = x) ).text("database directory. Default: " + ConfigDefaults.dataDir)
47 |
48 | opt[String]('b', "backupDir").valueName("").action( (x, c) =>
49 | c.copy(backupDir = x) ).text("backup directory. Default: " + ConfigDefaults.backupDir)
50 |
51 | opt[Seq[String]]('c', "columnFamilies").required().valueName(",...").action( (x, c) =>
52 | c.copy(columnFamilies = x.toList) ).text("column families of the database (created if there is no db yet)")
53 |
54 | opt[String]('r', "rocksOptionsFile").valueName("").action( (x, c) =>
55 | c.copy(rocksOptionsFile = Some(x)) ).text("rocksdb options file. Default: " + ConfigDefaults.rocksOptionsFile)
56 | }
57 |
58 | parser.parse(args, Config())
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/src/main/scala/com/scalableminds/fossildb/FossilDBGrpcImpl.scala:
--------------------------------------------------------------------------------
1 | package com.scalableminds.fossildb
2 |
3 | import java.io.{PrintWriter, StringWriter}
4 | import com.google.protobuf.ByteString
5 | import com.scalableminds.fossildb.db.StoreManager
6 | import com.scalableminds.fossildb.proto.fossildbapi._
7 | import scalapb.GeneratedMessage
8 | import com.typesafe.scalalogging.LazyLogging
9 |
10 | import scala.concurrent.Future
11 |
12 | class FossilDBGrpcImpl(storeManager: StoreManager)
13 | extends FossilDBGrpc.FossilDB
14 | with LazyLogging {
15 |
16 | override def health(req: HealthRequest): Future[HealthReply] = withExceptionHandler(req) {
17 | HealthReply(success = true)
18 | } { errorMsg => HealthReply(success = false, errorMsg) }
19 |
20 | override def get(req: GetRequest): Future[GetReply] = withExceptionHandler(req) {
21 | val store = storeManager.getStore(req.collection)
22 | val versionedKeyValuePairOpt = store.withRawRocksIterator{rocksIt => store.get(rocksIt, req.key, req.version)}
23 | versionedKeyValuePairOpt match {
24 | case Some(pair) => GetReply(success = true, None, ByteString.copyFrom(pair.value), pair.version)
25 | case None =>
26 | if (!req.mayBeEmpty.getOrElse(false)) throw new NoSuchElementException
27 | GetReply(success = false, Some("No such element"), ByteString.EMPTY, 0)
28 | }
29 | } { errorMsg => GetReply(success = false, errorMsg, ByteString.EMPTY, 0) }
30 |
31 | override def put(req: PutRequest): Future[PutReply] = withExceptionHandler(req) {
32 | val store = storeManager.getStore(req.collection)
33 | val version = store.withRawRocksIterator{rocksIt => req.version.getOrElse(store.get(rocksIt, req.key, None).map(_.version + 1).getOrElse(0L))}
34 | require(version >= 0, "Version numbers must be non-negative")
35 | store.put(req.key, version, req.value.toByteArray)
36 | PutReply(success = true)
37 | } { errorMsg => PutReply(success = false, errorMsg) }
38 |
39 | override def putMultipleVersions(req: PutMultipleVersionsRequest): Future[PutMultipleVersionsReply] = withExceptionHandler(req) {
40 | val store = storeManager.getStore(req.collection)
41 | require(req.versions.length == req.values.length, s"Must supply as many versions as values, got ${req.versions.length} versions vs ${req.values.length} values.")
42 | require(req.versions.forall(_ >= 0), "Version numbers must be non-negative")
43 | req.versions.zip(req.values).foreach { case (version, value) =>
44 | store.put(req.key, version, value.toByteArray)
45 | }
46 | PutMultipleVersionsReply(success = true)
47 | } { errorMsg => PutMultipleVersionsReply(success = false, errorMsg)}
48 |
49 | override def delete(req: DeleteRequest): Future[DeleteReply] = withExceptionHandler(req) {
50 | val store = storeManager.getStore(req.collection)
51 | store.delete(req.key, req.version)
52 | DeleteReply(success = true)
53 | } { errorMsg => DeleteReply(success = false, errorMsg) }
54 |
55 | override def getMultipleVersions(req: GetMultipleVersionsRequest): Future[GetMultipleVersionsReply] = withExceptionHandler(req) {
56 | val store = storeManager.getStore(req.collection)
57 | val (values, versions) = store.withRawRocksIterator{rocksIt => store.getMultipleVersions(rocksIt, req.key, req.oldestVersion, req.newestVersion)}
58 | GetMultipleVersionsReply(success = true, None, values.map(ByteString.copyFrom), versions)
59 | } { errorMsg => GetMultipleVersionsReply(success = false, errorMsg) }
60 |
61 | override def getMultipleKeys(req: GetMultipleKeysRequest): Future[GetMultipleKeysReply] = withExceptionHandler(req) {
62 | val store = storeManager.getStore(req.collection)
63 | val (keys, values, versions) = store.withRawRocksIterator{rocksIt => store.getMultipleKeys(rocksIt, req.startAfterKey, req.prefix, req.version, req.limit)}
64 | GetMultipleKeysReply(success = true, None, keys, values.map(ByteString.copyFrom), versions)
65 | } { errorMsg => GetMultipleKeysReply(success = false, errorMsg) }
66 |
67 | override def getMultipleKeysByListWithMultipleVersions(req: GetMultipleKeysByListWithMultipleVersionsRequest): Future[GetMultipleKeysByListWithMultipleVersionsReply] = withExceptionHandler(req) {
68 | val store = storeManager.getStore(req.collection)
69 | val keyVersionsValuesPairs = req.keys.map { key =>
70 | val (values, versions) = store.withRawRocksIterator{rocksIt => store.getMultipleVersions(rocksIt, key, req.oldestVersion, req.newestVersion)}
71 | val versionValuePairs = values.zip(versions).map { case (value, version) =>
72 | VersionValuePairProto(version, ByteString.copyFrom(value))
73 | }
74 | KeyVersionsValuesPairProto(key, versionValuePairs)
75 | }.filter(_.versionValuePairs.nonEmpty)
76 | GetMultipleKeysByListWithMultipleVersionsReply(success = true, None, keyVersionsValuesPairs)
77 | } { errorMsg => GetMultipleKeysByListWithMultipleVersionsReply(success = false, errorMsg) }
78 |
79 | override def getMultipleKeysByList(req: GetMultipleKeysByListRequest): Future[GetMultipleKeysByListReply] = withExceptionHandler(req) {
80 | val store = storeManager.getStore(req.collection)
81 | val versionValueBoxes = req.keys.map { key =>
82 | val versionedKeyValuePairOpt = store.withRawRocksIterator { rocksIt => store.get(rocksIt, key, req.version) }
83 | versionedKeyValuePairOpt match {
84 | case Some(pair) => VersionValueBoxProto(Some(VersionValuePairProto(pair.version, ByteString.copyFrom(pair.value))), errorMessage = None)
85 | case None => VersionValueBoxProto(None, errorMessage = None)
86 | }
87 | }
88 | GetMultipleKeysByListReply(success = true, None, versionValueBoxes)
89 | } { errorMsg => GetMultipleKeysByListReply(success = false, errorMsg) }
90 |
91 | override def putMultipleKeysWithMultipleVersions(req: PutMultipleKeysWithMultipleVersionsRequest): Future[PutMultipleKeysWithMultipleVersionsReply] = withExceptionHandler(req) {
92 | val store = storeManager.getStore(req.collection)
93 | require(req.versionedKeyValuePairs.forall(_.version >= 0), "Version numbers must be non-negative")
94 | req.versionedKeyValuePairs.foreach { pair =>
95 | store.put(pair.key, pair.version, pair.value.toByteArray)
96 | }
97 | PutMultipleKeysWithMultipleVersionsReply(success = true, None)
98 | } { errorMsg => PutMultipleKeysWithMultipleVersionsReply(success = false, errorMsg) }
99 |
100 | override def deleteMultipleVersions(req: DeleteMultipleVersionsRequest): Future[DeleteMultipleVersionsReply] = withExceptionHandler(req) {
101 | val store = storeManager.getStore(req.collection)
102 | store.withRawRocksIterator{rocksIt => store.deleteMultipleVersions(rocksIt, req.key, req.oldestVersion, req.newestVersion)}
103 | DeleteMultipleVersionsReply(success = true)
104 | } { errorMsg => DeleteMultipleVersionsReply(success = false, errorMsg) }
105 |
106 | override def deleteAllByPrefix(req: DeleteAllByPrefixRequest): Future[DeleteAllByPrefixReply] = withExceptionHandler(req) {
107 | val store = storeManager.getStore(req.collection)
108 | store.withRawRocksIterator{rocksIt => store.deleteAllByPrefix(rocksIt, req.prefix)}
109 | DeleteAllByPrefixReply(success = true)
110 | } { errorMsg => DeleteAllByPrefixReply(success = false, errorMsg)}
111 |
112 | override def listKeys(req: ListKeysRequest): Future[ListKeysReply] = withExceptionHandler(req) {
113 | val store = storeManager.getStore(req.collection)
114 | val keys = store.withRawRocksIterator{rocksIt => store.listKeys(rocksIt, req.limit, req.startAfterKey, req.prefix)}
115 | ListKeysReply(success = true, None, keys)
116 | } { errorMsg => ListKeysReply(success = false, errorMsg) }
117 |
118 | override def listVersions(req: ListVersionsRequest): Future[ListVersionsReply] = withExceptionHandler(req) {
119 | val store = storeManager.getStore(req.collection)
120 | val versions = store.withRawRocksIterator{rocksIt => store.listVersions(rocksIt, req.key, req.limit, req.offset)}
121 | ListVersionsReply(success = true, None, versions)
122 | } { errorMsg => ListVersionsReply(success = false, errorMsg) }
123 |
124 | override def backup(req: BackupRequest): Future[BackupReply] = withExceptionHandler(req) {
125 | val backupInfoOpt = storeManager.backup
126 | backupInfoOpt match {
127 | case Some(backupInfo) => BackupReply(success = true, None, backupInfo.id, backupInfo.timestamp, backupInfo.size)
128 | case _ => throw new Exception("Backup did not return valid BackupInfo")
129 | }
130 | } { errorMsg => BackupReply(success = false, errorMsg, 0, 0, 0) }
131 |
132 | override def restoreFromBackup(req: RestoreFromBackupRequest): Future[RestoreFromBackupReply] = withExceptionHandler(req) {
133 | storeManager.restoreFromBackup()
134 | RestoreFromBackupReply(success = true)
135 | } { errorMsg => RestoreFromBackupReply(success = false, errorMsg) }
136 |
137 | override def compactAllData(req: CompactAllDataRequest): Future[CompactAllDataReply] = withExceptionHandler(req) {
138 | storeManager.compactAllData()
139 | CompactAllDataReply(success = true)
140 | } { errorMsg => CompactAllDataReply(success = false, errorMsg) }
141 |
142 | override def exportDB(req: ExportDBRequest): Future[ExportDBReply] = withExceptionHandler(req) {
143 | storeManager.exportDB(req.newDataDir, req.optionsFile)
144 | ExportDBReply(success = true)
145 | } { errorMsg => ExportDBReply(success = false, errorMsg) }
146 |
147 | private def withExceptionHandler[T, R <: GeneratedMessage](request: R)(tryBlock: => T)(onErrorBlock: Option[String] => T): Future[T] = {
148 | try {
149 | logger.debug("received " + requestToString(request))
150 | Future.successful(tryBlock)
151 | } catch {
152 | case e: Exception =>
153 | log(e, request)
154 | Future.successful(onErrorBlock(Some(e.toString)))
155 | }
156 | }
157 |
158 | private def log[R <: GeneratedMessage](e: Exception, request: R): Unit = {
159 | logger.warn(getStackTraceAsString(e) + "\nrequest that caused this error: " + requestToString(request) + "\n")
160 | }
161 |
162 | private def requestToString[R <: GeneratedMessage](request: R) =
163 | request.getClass.getSimpleName + "(" + request.toString.replaceAll("\n", " ") + ")"
164 |
165 | private def getStackTraceAsString(t: Throwable) = {
166 | val sw = new StringWriter
167 | t.printStackTrace(new PrintWriter(sw))
168 | sw.toString.dropRight(1)
169 | }
170 | }
171 |
--------------------------------------------------------------------------------
/src/main/scala/com/scalableminds/fossildb/FossilDBServer.scala:
--------------------------------------------------------------------------------
1 | package com.scalableminds.fossildb
2 |
3 | import com.scalableminds.fossildb.db.StoreManager
4 | import com.scalableminds.fossildb.proto.fossildbapi.FossilDBGrpc
5 | import io.grpc.health.v1.HealthCheckResponse
6 | import com.typesafe.scalalogging.LazyLogging
7 | import io.grpc.Server
8 | import io.grpc.netty.NettyServerBuilder
9 | import io.grpc.protobuf.services.HealthStatusManager
10 |
11 | import scala.concurrent.ExecutionContext
12 |
13 | class FossilDBServer(storeManager: StoreManager, port: Int, executionContext: ExecutionContext) extends LazyLogging
14 | { self =>
15 | private[this] var server: Server = null
16 | private[this] var healthStatusManager: HealthStatusManager = null
17 |
18 | def start(): Unit = {
19 | healthStatusManager = new HealthStatusManager()
20 | server = NettyServerBuilder.forPort(port).maxInboundMessageSize(Int.MaxValue)
21 | .addService(FossilDBGrpc.bindService(new FossilDBGrpcImpl(storeManager), executionContext))
22 | .addService(healthStatusManager.getHealthService)
23 | .build.start
24 | healthStatusManager.setStatus("", HealthCheckResponse.ServingStatus.SERVING)
25 | logger.info("Server started, listening on " + port)
26 | sys.addShutdownHook {
27 | logger.info("Shutting down gRPC server since JVM is shutting down")
28 | self.stop()
29 | logger.info("Server shut down")
30 | }
31 | }
32 |
33 | def stop(): Unit = {
34 | if (server != null) {
35 | server.shutdown()
36 | storeManager.close
37 | healthStatusManager.setStatus("", HealthCheckResponse.ServingStatus.NOT_SERVING)
38 | }
39 | }
40 |
41 | def blockUntilShutdown(): Unit = {
42 | if (server != null) {
43 | server.awaitTermination()
44 | }
45 | }
46 |
47 | }
48 |
--------------------------------------------------------------------------------
/src/main/scala/com/scalableminds/fossildb/db/RocksDBStore.scala:
--------------------------------------------------------------------------------
1 | package com.scalableminds.fossildb.db
2 |
3 | import com.typesafe.scalalogging.LazyLogging
4 | import org.rocksdb._
5 |
6 | import java.nio.file.{Files, Path}
7 | import java.util
8 | import scala.collection.mutable
9 | import scala.concurrent.Future
10 | import scala.jdk.CollectionConverters.{BufferHasAsJava, ListHasAsScala, SeqHasAsJava}
11 | import scala.language.postfixOps
12 |
13 | case class BackupInfo(id: Int, timestamp: Long, size: Long)
14 |
15 | case class KeyValuePair[T](key: String, value: T)
16 |
17 | class RocksDBManager(dataDir: Path, columnFamilies: List[String], optionsFilePathOpt: Option[String]) extends LazyLogging {
18 |
19 | private val (db: RocksDB, columnFamilyHandles) = {
20 | RocksDB.loadLibrary()
21 | val columnOptions = new ColumnFamilyOptions()
22 | .setArenaBlockSize(4L * 1024 * 1024) // 4MB
23 | .setTargetFileSizeBase(1024L * 1024 * 1024) // 1GB
24 | .setMaxBytesForLevelBase(10L * 1024 * 1024 * 1024) // 10GB
25 | val options = new DBOptions()
26 | val cfListRef: mutable.Buffer[ColumnFamilyDescriptor] = mutable.Buffer()
27 | optionsFilePathOpt.foreach { optionsFilePath =>
28 | try {
29 | val configOptions = new ConfigOptions()
30 | org.rocksdb.OptionsUtil.loadOptionsFromFile(configOptions, optionsFilePath, options, cfListRef.asJava)
31 | logger.info("successfully loaded rocksdb options from " + optionsFilePath)
32 | } catch {
33 | case e: Exception =>
34 | throw new Exception("Failed to load rocksdb options from file " + optionsFilePath, e)
35 | }
36 | }
37 | options.setCreateIfMissing(true).setCreateMissingColumnFamilies(true)
38 | val defaultColumnFamilyOptions: ColumnFamilyOptions = cfListRef.find(_.getName sameElements RocksDB.DEFAULT_COLUMN_FAMILY).map(_.getOptions).getOrElse(columnOptions)
39 | val newColumnFamilyDescriptors = (columnFamilies.map(_.getBytes) :+ RocksDB.DEFAULT_COLUMN_FAMILY).diff(cfListRef.toList.map(_.getName)).map(new ColumnFamilyDescriptor(_, defaultColumnFamilyOptions))
40 | val columnFamilyDescriptors = cfListRef.toList ::: newColumnFamilyDescriptors
41 | logger.info("Opening RocksDB at " + dataDir.toAbsolutePath)
42 | val columnFamilyHandles = new util.ArrayList[ColumnFamilyHandle]
43 | val db = RocksDB.open(
44 | options,
45 | dataDir.toAbsolutePath.toString,
46 | columnFamilyDescriptors.asJava,
47 | columnFamilyHandles)
48 | (db, columnFamilies.zip(columnFamilyHandles.asScala).toMap)
49 | }
50 |
51 | def getStoreForColumnFamily(columnFamily: String): Option[RocksDBStore] = {
52 | columnFamilyHandles.get(columnFamily).map(new RocksDBStore(db, _))
53 | }
54 |
55 | def backup(backupDir: Path): Option[BackupInfo] = {
56 | if (!Files.exists(backupDir) || !Files.isDirectory(backupDir))
57 | Files.createDirectories(backupDir)
58 |
59 | RocksDB.loadLibrary()
60 | val backupEngine = BackupEngine.open(Env.getDefault, new BackupEngineOptions(backupDir.toString))
61 | backupEngine.createNewBackup(db)
62 | backupEngine.purgeOldBackups(1)
63 | backupEngine.getBackupInfo.asScala.headOption.map(info => BackupInfo(info.backupId, info.timestamp, info.size))
64 | }
65 |
66 | def restoreFromBackup(backupDir: Path): Unit = {
67 | logger.info("Restoring from backup. RocksDB temporarily unavailable")
68 | close()
69 | RocksDB.loadLibrary()
70 | val backupEngine = BackupEngine.open(Env.getDefault, new BackupEngineOptions(backupDir.toString))
71 | backupEngine.restoreDbFromLatestBackup(dataDir.toString, dataDir.toString, new RestoreOptions(true))
72 | logger.info("Restoring from backup complete. Reopening RocksDB")
73 | }
74 |
75 | def compactAllData(): Unit = {
76 | logger.info("Compacting all data")
77 | RocksDB.loadLibrary()
78 | db.compactRange()
79 | logger.info("All data has been compacted to last level containing data")
80 | }
81 |
82 | def exportToNewDB(newDataDir: Path, newOptionsFilePathOpt: Option[String]): Unit = {
83 | RocksDB.loadLibrary()
84 | logger.info(s"Exporting to new DB at ${newDataDir.toString} with options file $newOptionsFilePathOpt")
85 | val newManager = new RocksDBManager(newDataDir, columnFamilies, newOptionsFilePathOpt)
86 | newManager.columnFamilyHandles.foreach { case (name, handle) =>
87 | val store = getStoreForColumnFamily(name).get
88 | store.withRawRocksIterator { rocksIt =>
89 | val dataIterator = RocksDBStore.scan(rocksIt, "", None)
90 | dataIterator.foreach(el => newManager.db.put(handle, el.key.getBytes, el.value))
91 | }
92 | }
93 | logger.info("Writing data completed. Start compaction")
94 | newManager.db.compactRange()
95 | logger.info("Compaction finished")
96 | }
97 |
98 | def close(): Future[Unit] = {
99 | logger.info("Closing RocksDB handle")
100 | Future.successful(db.close())
101 | }
102 | }
103 |
104 | class RocksDBKeyIterator(it: RocksIterator, prefix: Option[String]) extends Iterator[String] with LazyLogging {
105 |
106 | override def hasNext: Boolean = it.isValid && prefix.forall(it.key().startsWith(_))
107 |
108 | override def next(): String = {
109 | val key = new String(it.key().map(_.toChar))
110 | it.next()
111 | key
112 | }
113 |
114 | def peek: String = {
115 | new String(it.key().map(_.toChar))
116 | }
117 |
118 | }
119 |
120 | class RocksDBIterator(it: RocksIterator, prefix: Option[String]) extends Iterator[KeyValuePair[Array[Byte]]] {
121 |
122 | override def hasNext: Boolean = it.isValid && prefix.forall(it.key().startsWith(_))
123 |
124 | override def next(): KeyValuePair[Array[Byte]] = {
125 | val value = KeyValuePair(new String(it.key().map(_.toChar)), it.value())
126 | it.next()
127 | value
128 | }
129 |
130 | }
131 |
132 | class RocksDBStore(db: RocksDB, handle: ColumnFamilyHandle) extends LazyLogging {
133 |
134 | def withRawRocksIterator[T](block: RocksIterator => T): T = {
135 | val rocksIt = db.newIterator(handle)
136 | try {
137 | block(rocksIt)
138 | } finally {
139 | rocksIt.close()
140 | }
141 | }
142 |
143 | def get(key: String): Array[Byte] = {
144 | db.get(handle, key.getBytes())
145 | }
146 |
147 | def put(key: String, value: Array[Byte]): Unit = {
148 | db.put(handle, key.getBytes(), value)
149 | }
150 |
151 | def delete(key: String): Unit = {
152 | db.delete(handle, key.getBytes())
153 | }
154 |
155 | }
156 |
157 | object RocksDBStore {
158 |
159 | def scan(rocksIt: RocksIterator, key: String, prefix: Option[String]): RocksDBIterator = {
160 | rocksIt.seek(key.getBytes())
161 | new RocksDBIterator(rocksIt, prefix)
162 | }
163 |
164 | def scanKeysOnly(rocksIt: RocksIterator, key: String, prefix: Option[String]): RocksDBKeyIterator = {
165 | rocksIt.seek(key.getBytes())
166 | new RocksDBKeyIterator(rocksIt, prefix)
167 | }
168 |
169 | }
170 |
--------------------------------------------------------------------------------
/src/main/scala/com/scalableminds/fossildb/db/StoreManager.scala:
--------------------------------------------------------------------------------
1 | package com.scalableminds.fossildb.db
2 |
3 | import java.nio.file.{Path, Paths}
4 | import java.util.concurrent.atomic.AtomicBoolean
5 | import scala.concurrent.Future
6 |
7 | class StoreManager(dataDir: Path, backupDir: Path, columnFamilies: List[String], rocksdbOptionsFile: Option[String]) {
8 |
9 | private var rocksDBManager: Option[RocksDBManager] = None
10 | private var stores: Option[Map[String, VersionedKeyValueStore]] = None
11 |
12 | reInitialize()
13 |
14 | private def reInitialize(): Unit = {
15 | rocksDBManager.map(_.close())
16 | rocksDBManager = Some(new RocksDBManager(dataDir, columnFamilies, rocksdbOptionsFile))
17 | stores = Some(columnFamilies.map { cf =>
18 | val store: VersionedKeyValueStore = new VersionedKeyValueStore(rocksDBManager.get.getStoreForColumnFamily(cf).get)
19 | cf -> store
20 | }.toMap)
21 | }
22 |
23 | def getStore(columnFamily: String): VersionedKeyValueStore = {
24 | failDuringRestore()
25 | try {
26 | val existingStores = stores.get
27 | existingStores(columnFamily)
28 | } catch {
29 | case _: Exception => throw new NoSuchElementException("No store for column family " + columnFamily)
30 | }
31 | }
32 |
33 |
34 | private val backupInProgress = new AtomicBoolean(false)
35 | private val restoreInProgress = new AtomicBoolean(false)
36 |
37 | private def failDuringRestore(): Unit = if (restoreInProgress.get) throw new Exception("Unavailable during restore-from-backup operation")
38 | private def failDuringBackup(): Unit = if (backupInProgress.get) throw new Exception("Unavailable during backup")
39 |
40 |
41 | def backup: Option[BackupInfo] = {
42 | failDuringRestore()
43 | if (backupInProgress.compareAndSet(false, true)) {
44 | try {
45 | rocksDBManager.get.backup(backupDir)
46 | } finally {
47 | backupInProgress.set(false)
48 | }
49 | } else {
50 | throw new Exception("Backup already in progress")
51 | }
52 | }
53 |
54 | def restoreFromBackup(): Unit = {
55 | failDuringBackup()
56 | if (restoreInProgress.compareAndSet(false, true)) {
57 | try {
58 | rocksDBManager.get.restoreFromBackup(backupDir)
59 | } finally {
60 | reInitialize()
61 | restoreInProgress.set(false)
62 | }
63 | } else {
64 | throw new Exception("Restore-from-backup already in progress")
65 | }
66 | }
67 |
68 | def compactAllData(): Unit = {
69 | failDuringBackup()
70 | failDuringRestore()
71 | rocksDBManager.get.compactAllData()
72 | }
73 |
74 | def exportDB(newDataDir: String, newOptionsFilePathOpt: Option[String]): Unit = {
75 | failDuringRestore()
76 | rocksDBManager.get.exportToNewDB(Paths.get(newDataDir), newOptionsFilePathOpt)
77 | }
78 |
79 | def close: Option[Future[Unit]] = {
80 | rocksDBManager.map(_.close())
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/src/main/scala/com/scalableminds/fossildb/db/VersionedKeyValueStore.scala:
--------------------------------------------------------------------------------
1 | package com.scalableminds.fossildb.db
2 |
3 | import org.rocksdb.RocksIterator
4 |
5 | import scala.annotation.tailrec
6 | import scala.util.Try
7 |
8 |
9 | case class VersionedKey(key: String, version: Long) {
10 | override def toString: String = VersionedKey.asString(key, version)
11 | }
12 |
13 | object VersionedKey {
14 |
15 | def asString(key: String, version: Long) = s"$key${VersionedKey.versionSeparator}${(~version).toHexString.toUpperCase}${VersionedKey.versionSeparator}$version"
16 |
17 | val versionSeparator: Char = '@'
18 |
19 | def apply(key: String): Option[VersionedKey] = {
20 | val parts = key.split(versionSeparator)
21 | for {
22 | key <- parts.headOption
23 | versionString <- parts.lastOption
24 | version <- Try(versionString.toLong).toOption
25 | } yield {
26 | VersionedKey(key, version)
27 | }
28 | }
29 |
30 | }
31 |
32 | case class VersionedKeyValuePair[T](versionedKey: VersionedKey, value: T) {
33 |
34 | def key: String = versionedKey.key
35 |
36 | def version: Long = versionedKey.version
37 |
38 | }
39 |
40 |
41 | class VersionFilterIterator(it: RocksDBIterator, version: Option[Long]) extends Iterator[VersionedKeyValuePair[Array[Byte]]] {
42 |
43 | private var currentKey: Option[String] = None
44 |
45 | private var versionedIterator = it.flatMap{ pair =>
46 | VersionedKey(pair.key).map(VersionedKeyValuePair(_, pair.value))
47 | }
48 |
49 | override def hasNext: Boolean = {
50 | versionedIterator = versionedIterator.dropWhile { pair =>
51 | currentKey.contains(pair.key) || version.exists(pair.version > _)
52 | }
53 | versionedIterator.hasNext
54 | }
55 |
56 | override def next(): VersionedKeyValuePair[Array[Byte]] = {
57 | val value = versionedIterator.next()
58 | currentKey = Some(value.key)
59 | value
60 | }
61 |
62 | }
63 |
64 | class KeyOnlyIterator[T](rocksIt: RocksIterator, startAfterKey: Option[String], prefix: Option[String]) extends Iterator[String] {
65 |
66 | /*
67 | Note that seek in the underlying iterators either hits precisely or goes to the
68 | lexicographically *next* key. To achieve correct behavior with startAfterKey,
69 | we have to advance once in case of the exact hit.
70 | */
71 |
72 | private var currentKey: Option[String] = startAfterKey
73 |
74 | private def compositeKeyFor(keyOpt: Option[String]) = keyOpt match {
75 | case Some(key) => VersionedKey(key, 0).toString
76 | // If the currentKey is not yet set, seek to the very beginning, or, if set, to the prefix.
77 | case None => prefix.getOrElse("")
78 | }
79 |
80 | override def hasNext: Boolean = {
81 | val it = RocksDBStore.scanKeysOnly(rocksIt, compositeKeyFor(currentKey), prefix)
82 | if (it.hasNext && currentKey.isDefined && currentKey.contains(VersionedKey(it.peek).get.key)) it.next()
83 | it.hasNext
84 | }
85 |
86 | override def next(): String = {
87 | val it = RocksDBStore.scanKeysOnly(rocksIt, compositeKeyFor(currentKey), prefix)
88 | if (it.hasNext && currentKey.isDefined && currentKey.contains(VersionedKey(it.peek).get.key)) it.next()
89 | val nextKey = VersionedKey(it.next()).get.key
90 | currentKey = Some(nextKey)
91 | nextKey
92 | }
93 |
94 | }
95 |
96 |
97 | class VersionedKeyValueStore(underlying: RocksDBStore) {
98 |
99 | def withRawRocksIterator[T](block: RocksIterator => T): T = underlying.withRawRocksIterator(block)
100 |
101 | def get(rocksIt: RocksIterator, key: String, version: Option[Long] = None): Option[VersionedKeyValuePair[Array[Byte]]] =
102 | scanVersionValuePairs(rocksIt, key, version).nextOption()
103 |
104 | def getMultipleVersions(rocksIt: RocksIterator, key: String, oldestVersion: Option[Long] = None, newestVersion: Option[Long] = None): (List[Array[Byte]], List[Long]) = {
105 |
106 | @tailrec
107 | def toListIter(versionIterator: Iterator[VersionedKeyValuePair[Array[Byte]]],
108 | accValues: List[Array[Byte]], accVersions: List[Long]): (List[Array[Byte]], List[Long]) = {
109 | if (!versionIterator.hasNext) (accValues, accVersions)
110 | else {
111 | val item = versionIterator.next()
112 | if (item.version < oldestVersion.getOrElse(0L)) (accValues, accVersions)
113 | else toListIter(versionIterator, item.value :: accValues, item.version :: accVersions)
114 | }
115 | }
116 |
117 | val iterator = scanVersionValuePairs(rocksIt, key, newestVersion)
118 | val (versions, keys) = toListIter(iterator, List(), List())
119 | (versions.reverse, keys.reverse)
120 | }
121 |
122 | private def scanVersionValuePairs(rocksIt: RocksIterator, key: String, version: Option[Long] = None): Iterator[VersionedKeyValuePair[Array[Byte]]] = {
123 | requireValidKey(key)
124 | val prefix = s"$key${VersionedKey.versionSeparator}"
125 | RocksDBStore.scan(rocksIt, version.map(VersionedKey(key, _).toString).getOrElse(prefix), Some(prefix)).flatMap { pair =>
126 | VersionedKey(pair.key).map(VersionedKeyValuePair(_, pair.value))
127 | }
128 | }
129 |
130 | private def scanVersionsOnly(rocksIt: RocksIterator, key: String, version: Option[Long] = None): Iterator[VersionedKey] = {
131 | requireValidKey(key)
132 | val prefix = s"$key${VersionedKey.versionSeparator}"
133 | RocksDBStore.scanKeysOnly(rocksIt, version.map(VersionedKey(key, _).toString).getOrElse(prefix), Some(prefix)).flatMap { key =>
134 | VersionedKey(key)
135 | }
136 | }
137 |
138 | def getMultipleKeys(rocksIt: RocksIterator, startAfterKey: Option[String], prefix: Option[String] = None, version: Option[Long] = None, limit: Option[Int]): (Seq[String], Seq[Array[Byte]], Seq[Long]) = {
139 | startAfterKey.foreach(requireValidKey)
140 | prefix.foreach{ p => requireValidKey(p)}
141 | val iterator: VersionFilterIterator = scanKeys(rocksIt, startAfterKey, prefix, version)
142 |
143 | /*
144 | Note that seek in the underlying iterators either hits precisely or goes to the
145 | lexicographically *next* key. To achieve correct behavior with startAfterKey,
146 | we have to advance once in case of the exact hit.
147 | */
148 | val firstItemOpt: Option[VersionedKeyValuePair[Array[Byte]]] = if (iterator.hasNext) {
149 | val firstItem = iterator.next()
150 | if (startAfterKey.contains(firstItem.key)) {
151 | None
152 | } else {
153 | Some(firstItem)
154 | }
155 | } else None
156 |
157 | val limitPadded = limit.map(_ + 1).getOrElse(Int.MaxValue)
158 | val asVector = iterator.take(limitPadded).toVector
159 | val asSequenceAdvancedIfNeeded = firstItemOpt.map(_ +: asVector).getOrElse(asVector).take(limit.getOrElse(Int.MaxValue))
160 | val keys = asSequenceAdvancedIfNeeded.map(_.key)
161 | val values = asSequenceAdvancedIfNeeded.map(_.value)
162 | val versions = asSequenceAdvancedIfNeeded.map(_.version)
163 | (keys, values, versions)
164 | }
165 |
166 | private def scanKeys(rocksIt: RocksIterator, startAfterKey: Option[String], prefix: Option[String] = None, version: Option[Long] = None): VersionFilterIterator = {
167 | val fullKey = startAfterKey.map(key => s"$key${VersionedKey.versionSeparator}").orElse(prefix).getOrElse("")
168 | new VersionFilterIterator(RocksDBStore.scan(rocksIt, fullKey, prefix), version)
169 | }
170 |
171 | def deleteMultipleVersions(rocksIt: RocksIterator, key: String, oldestVersion: Option[Long] = None, newestVersion: Option[Long] = None): Unit = {
172 | @tailrec
173 | def deleteIter(versionIterator: Iterator[VersionedKey]): Unit = {
174 | if (versionIterator.hasNext) {
175 | val item = versionIterator.next()
176 | if (item.version >= oldestVersion.getOrElse(0L)) {
177 | delete(item.key, item.version)
178 | deleteIter(versionIterator)
179 | }
180 | }
181 | }
182 |
183 | val versionsIterator = scanVersionsOnly(rocksIt, key, newestVersion)
184 | deleteIter(versionsIterator)
185 | }
186 |
187 | def deleteAllByPrefix(rocksIt: RocksIterator, prefix: String): Unit = {
188 | RocksDBStore.scanKeysOnly(rocksIt, prefix, Some(prefix)).foreach(underlying.delete)
189 | }
190 |
191 | def put(key: String, version: Long, value: Array[Byte]): Unit = {
192 | requireValidKey(key)
193 | underlying.put(VersionedKey.asString(key, version), value)
194 | }
195 |
196 | def delete(key: String, version: Long): Unit = {
197 | requireValidKey(key)
198 | underlying.delete(VersionedKey.asString(key, version))
199 | }
200 |
201 | def listKeys(rocksIt: RocksIterator, limit: Option[Int], startAfterKey: Option[String], prefix: Option[String]): Seq[String] = {
202 | val iterator = new KeyOnlyIterator(rocksIt, startAfterKey, prefix)
203 | iterator.take(limit.getOrElse(Int.MaxValue)).toSeq
204 | }
205 |
206 | def listVersions(rocksIt: RocksIterator, key: String, limit: Option[Int], offset: Option[Int]): Seq[Long] = {
207 | val iterator = scanVersionsOnly(rocksIt, key)
208 | iterator.map(_.version).drop(offset.getOrElse(0)).take(limit.getOrElse(Int.MaxValue)).toSeq
209 | }
210 |
211 | private def requireValidKey(key: String): Unit = {
212 | require(!key.contains(VersionedKey.versionSeparator), s"keys cannot contain the char ${VersionedKey.versionSeparator}")
213 | }
214 |
215 | }
216 |
--------------------------------------------------------------------------------
/src/test/resources/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/scalableminds/fossildb/8b57bfe1b752dfbf4978752d3b3fb74a44c4dcd2/src/test/resources/.gitignore
--------------------------------------------------------------------------------
/src/test/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/src/test/scala/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/scalableminds/fossildb/8b57bfe1b752dfbf4978752d3b3fb74a44c4dcd2/src/test/scala/.gitignore
--------------------------------------------------------------------------------
/src/test/scala/com/scalableminds/fossildb/FossilDBSuite.scala:
--------------------------------------------------------------------------------
1 | package com.scalableminds.fossildb
2 |
3 | import java.io.File
4 | import java.nio.file.Paths
5 | import com.google.protobuf.ByteString
6 | import com.scalableminds.fossildb.db.StoreManager
7 | import com.scalableminds.fossildb.proto.fossildbapi._
8 | import com.typesafe.scalalogging.LazyLogging
9 | import io.grpc.health.v1._
10 | import io.grpc.netty.NettyChannelBuilder
11 | import org.scalatest.BeforeAndAfterEach
12 | import org.scalatest.flatspec.AnyFlatSpec
13 |
14 | import scala.concurrent.ExecutionContext
15 |
16 | class FossilDBSuite extends AnyFlatSpec with BeforeAndAfterEach with TestHelpers with LazyLogging {
17 | private val testTempDir = "testData1"
18 | private val dataDir = Paths.get(testTempDir, "data")
19 | private val backupDir = Paths.get(testTempDir, "backup")
20 |
21 | private val port = 21505
22 | private var serverOpt: Option[FossilDBServer] = None
23 | private val channel = NettyChannelBuilder.forAddress("127.0.0.1", port).maxInboundMessageSize(Int.MaxValue).usePlaintext().build
24 | private val client = FossilDBGrpc.blockingStub(channel)
25 | private val healthClient = HealthGrpc.newBlockingStub(channel)
26 | private val collectionA = "collectionA"
27 | private val collectionB = "collectionB"
28 |
29 | private val testData1 = ByteString.copyFromUtf8("testData1")
30 | private val testData2 = ByteString.copyFromUtf8("testData2")
31 | private val testData3 = ByteString.copyFromUtf8("testData3")
32 |
33 | private val aKey = "aKey"
34 | private val aNotherKey = "aNotherKey"
35 | private val aThirdKey = "aThirdKey"
36 |
37 | override def beforeEach(): Unit = {
38 | deleteRecursively(new File(testTempDir))
39 | new File(testTempDir).mkdir()
40 |
41 | val columnFamilies = List(collectionA, collectionB)
42 |
43 | val storeManager = new StoreManager(dataDir, backupDir, columnFamilies, None)
44 |
45 | serverOpt.foreach(_.stop())
46 | serverOpt = Some(new FossilDBServer(storeManager, port, ExecutionContext.global))
47 | serverOpt.foreach(_.start())
48 | }
49 |
50 | override def afterEach(): Unit = {
51 | serverOpt.foreach(_.stop())
52 | deleteRecursively(new File(testTempDir))
53 | }
54 |
55 | "Health" should "reply" in {
56 | val reply = client.health(HealthRequest())
57 | assert(reply.success)
58 | }
59 |
60 | "GRPC Standard Health Check" should "report SERVING" in {
61 | val reply = healthClient.check(HealthCheckRequest.getDefaultInstance)
62 | assert(reply.getStatus.toString == "SERVING")
63 | }
64 |
65 | "Put" should "overwrite old value" in {
66 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
67 | client.put(PutRequest(collectionA, aKey, Some(0), testData2))
68 | val reply = client.get(GetRequest(collectionA, aKey, Some(0)))
69 | assert(testData2 == reply.value)
70 | }
71 |
72 | "PutMultipleVersions" should "overwrite old values, leave others untouched" in {
73 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
74 | client.put(PutRequest(collectionA, aKey, Some(2), testData1))
75 | client.putMultipleVersions(PutMultipleVersionsRequest(collectionA, aKey, Seq(1,2,3), Seq(testData2, testData3, testData3)))
76 | val reply = client.getMultipleVersions(GetMultipleVersionsRequest(collectionA, aKey))
77 | assert(reply.values.length == 4)
78 | assert(reply.versions == Seq(3,2,1,0))
79 | assert(reply.values == Seq(testData3, testData3, testData2, testData1))
80 | }
81 |
82 | it should "fail on non-existent collection" in {
83 | val reply = client.put(PutRequest("nonExistentCollection", aKey, Some(0), testData1))
84 | assert(!reply.success)
85 | }
86 |
87 | it should "increment version if none is supplied" in {
88 | client.put(PutRequest(collectionA, aKey, Some(4), testData1))
89 | client.put(PutRequest(collectionA, aKey, None, testData1))
90 | val reply = client.get(GetRequest(collectionA, aKey))
91 | assert(reply.actualVersion == 5)
92 | }
93 |
94 | it should "start at version 0 if none is supplied" in {
95 | client.put(PutRequest(collectionA, aKey, None, testData1))
96 | val reply = client.get(GetRequest(collectionA, aKey))
97 | assert(reply.actualVersion == 0)
98 | }
99 |
100 | "PutMultipleKeysWithMultipleVersions" should "write all versions of all specified keys" in {
101 | client.putMultipleKeysWithMultipleVersions(PutMultipleKeysWithMultipleVersionsRequest(collectionA, Seq(VersionedKeyValuePairProto(aKey, 0, testData1), VersionedKeyValuePairProto(aKey, 2, testData2), VersionedKeyValuePairProto(aNotherKey, 5, testData3))))
102 | val reply = client.get(GetRequest(collectionA, aKey))
103 | assert(reply.actualVersion == 2)
104 | val reply2 = client.get(GetRequest(collectionA, aKey, version = Some(0)))
105 | assert(reply2.actualVersion == 0)
106 | val reply3 = client.get(GetRequest(collectionA, aNotherKey))
107 | assert(reply3.actualVersion == 5)
108 | assert(reply3.value == testData3)
109 | }
110 |
111 | "Get" should "return matching value after matching Put" in {
112 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
113 | val reply = client.get(GetRequest(collectionA, aKey, Some(0)))
114 | assert(testData1 == reply.value)
115 | }
116 |
117 | it should "return matching value after multiple versioned Puts" in {
118 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
119 | client.put(PutRequest(collectionA, aKey, Some(5), testData1))
120 | client.put(PutRequest(collectionA, aKey, Some(2), testData2))
121 | val reply = client.get(GetRequest(collectionA, aKey, Some(2)))
122 | assert(testData2 == reply.value)
123 | }
124 |
125 | it should "return value of closest older version" in {
126 | client.put(PutRequest(collectionA, aKey, Some(2), testData1))
127 | client.put(PutRequest(collectionA, aKey, Some(5), testData2))
128 |
129 | val reply = client.get(GetRequest(collectionA, aKey, Some(7)))
130 | assert(testData2 == reply.value)
131 | }
132 |
133 | it should "fail if called on empty db" in {
134 | val reply = client.get(GetRequest(collectionA, aKey))
135 | assert(!reply.success)
136 | }
137 |
138 | it should "fail after Put with other key" in {
139 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
140 | val reply = client.get(GetRequest(collectionA, aKey))
141 | assert(!reply.success)
142 | }
143 |
144 | it should "fail after Put with only newer version" in {
145 | client.put(PutRequest(collectionA, aKey, Some(5), testData1))
146 | val reply = client.get(GetRequest(collectionA, aKey, Some(3)))
147 | assert(!reply.success)
148 | }
149 |
150 | "Delete" should "delete a value at specific version" in {
151 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
152 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
153 | client.delete(DeleteRequest(collectionA, aKey, 1))
154 | val reply = client.get(GetRequest(collectionA, aKey, Some(1)))
155 | assert(testData1 == reply.value)
156 | }
157 |
158 | "DeleteAllByPrefix" should "delete all versions of all values matching this prefix" in {
159 | client.put(PutRequest(collectionA, "prefixedA", Some(0), testData1))
160 | client.put(PutRequest(collectionA, "prefixedA", Some(1), testData1))
161 | client.put(PutRequest(collectionA, "prefixedB", Some(0), testData2))
162 | client.put(PutRequest(collectionA, "prefixedC", Some(0), testData2))
163 | client.put(PutRequest(collectionA, "differentKey", Some(0), testData2))
164 | client.put(PutRequest(collectionA, "differentKey", Some(1), testData2))
165 | client.put(PutRequest(collectionA, "yetDifferentKey", Some(0), testData2))
166 | client.deleteAllByPrefix(DeleteAllByPrefixRequest(collectionA, "prefixed"))
167 | val reply = client.listKeys(ListKeysRequest(collectionA))
168 | assert(reply.keys.length == 2)
169 | assert(reply.keys.contains("differentKey"))
170 | assert(reply.keys.contains("yetDifferentKey"))
171 | }
172 |
173 | "ListKeys" should "list all keys of a collection" in {
174 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
175 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
176 | client.put(PutRequest(collectionA, aNotherKey, Some(4), testData2))
177 | client.put(PutRequest(collectionB, aThirdKey, Some(1), testData1))
178 | val reply = client.listKeys(ListKeysRequest(collectionA))
179 | assert(reply.keys.contains(aKey))
180 | assert(reply.keys.contains(aNotherKey))
181 | assert(reply.keys.length == 2)
182 | }
183 |
184 | it should "support pagination with startAfterKey" in {
185 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
186 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
187 | client.put(PutRequest(collectionA, aNotherKey, Some(4), testData2))
188 | client.put(PutRequest(collectionB, aThirdKey, Some(1), testData1))
189 | val reply = client.listKeys(ListKeysRequest(collectionA, Some(1)))
190 | assert(reply.keys.length == 1)
191 | assert(reply.keys.contains(aKey))
192 | val reply2 = client.listKeys(ListKeysRequest(collectionA, Some(1), Some(reply.keys.last)))
193 | assert(reply2.keys.contains(aNotherKey))
194 | assert(reply2.keys.length == 1)
195 | }
196 |
197 | it should "return all keys despite lexicographic similarity" in {
198 | client.put(PutRequest(collectionA, "abb/1/1-[1,1,1]", Some(1), testData1))
199 | client.put(PutRequest(collectionA, "abc/1/1481800838-[3600,2717,121]", Some(123), testData2))
200 | client.put(PutRequest(collectionA, "abc/1/1481800839-[3601,2717,121]", Some(123), testData3))
201 | client.put(PutRequest(collectionA, "abc/1/1481800839-[3601,2717,121]", Some(125), testData3))
202 | client.put(PutRequest(collectionA, "abc/1/1481800839-[3601,2717,121]", Some(128), testData3))
203 | client.put(PutRequest(collectionA, "abc/1/1481800846-[3602,2717,121]", Some(123), testData2))
204 |
205 | val reply = client.listKeys(ListKeysRequest(collectionA, None, Some("abb")))
206 | assert(reply.keys.length == 3)
207 | }
208 |
209 | it should "respect prefix argument" in {
210 | client.put(PutRequest(collectionA, "123456", Some(1), testData1))
211 | client.put(PutRequest(collectionA, "123457", Some(123), testData2))
212 | client.put(PutRequest(collectionA, "12345800", Some(123), testData3))
213 | client.put(PutRequest(collectionA, "12345801", Some(123), testData3))
214 | client.put(PutRequest(collectionA, "12345802", Some(123), testData3))
215 | client.put(PutRequest(collectionA, "123458", Some(123), testData3))
216 | client.put(PutRequest(collectionA, "123459", Some(123), testData3))
217 |
218 | val reply = client.listKeys(ListKeysRequest(collectionA, None, None, prefix = Some("123458")))
219 | assert(reply.keys.length == 4)
220 | assert(reply.keys(0) == "12345800")
221 | assert(reply.keys(1) == "12345801")
222 | }
223 |
224 | it should "respect prefix argument and startAfterKey together" in {
225 | client.put(PutRequest(collectionA, "123456", Some(1), testData1))
226 | client.put(PutRequest(collectionA, "123457", Some(123), testData2))
227 | client.put(PutRequest(collectionA, "12345800", Some(123), testData3))
228 | client.put(PutRequest(collectionA, "12345801", Some(123), testData3))
229 | client.put(PutRequest(collectionA, "12345802", Some(123), testData3))
230 | client.put(PutRequest(collectionA, "123458", Some(123), testData3))
231 | client.put(PutRequest(collectionA, "123459", Some(123), testData3))
232 |
233 | val reply = client.listKeys(ListKeysRequest(collectionA, None, startAfterKey = Some("12345800"), prefix = Some("123458")))
234 | assert(reply.keys.length == 3)
235 | assert(reply.keys(0) == "12345801")
236 | assert(reply.keys(1) == "12345802")
237 | assert(reply.keys(2) == "123458")
238 | }
239 |
240 | "GetMultipleVersions" should "return all versions in descending order if called without limits" in {
241 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
242 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
243 | client.put(PutRequest(collectionA, aKey, Some(2), testData3))
244 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
245 | val reply = client.getMultipleVersions(GetMultipleVersionsRequest(collectionA, aKey))
246 | assert(reply.versions(0) == 2)
247 | assert(reply.versions(1) == 1)
248 | assert(reply.versions(2) == 0)
249 | assert(reply.values(0) == testData3)
250 | assert(reply.values(1) == testData2)
251 | assert(reply.values(2) == testData1)
252 | assert(reply.versions.length == 3)
253 | assert(reply.values.length == 3)
254 | }
255 |
256 | it should "return versions specified by bounds (inclusive)" in {
257 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
258 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
259 | client.put(PutRequest(collectionA, aKey, Some(3), testData3))
260 | client.put(PutRequest(collectionA, aKey, Some(4), testData1))
261 | client.put(PutRequest(collectionA, aKey, Some(5), testData1))
262 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
263 |
264 | val reply = client.getMultipleVersions(GetMultipleVersionsRequest(collectionA, aKey, Some(4), Some(2)))
265 | assert(reply.versions(0) == 4)
266 | assert(reply.versions(1) == 3)
267 | assert(reply.values(0) == testData1)
268 | assert(reply.values(1) == testData3)
269 | assert(reply.versions.length == 2)
270 | assert(reply.values.length == 2)
271 | }
272 |
273 | "GetMultipleKeys" should "return all keys" in {
274 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
275 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData2))
276 | client.put(PutRequest(collectionA, aThirdKey, Some(0), testData3))
277 | val reply = client.getMultipleKeys(GetMultipleKeysRequest(collectionA))
278 | assert(reply.keys.length == 3)
279 | assert(reply.keys.contains(aNotherKey))
280 | assert(reply.keys.contains(aThirdKey))
281 | assert(reply.values.length == 3)
282 | assert(reply.values.contains(testData2))
283 | assert(reply.values.contains(testData3))
284 | assert(reply.actualVersions.length == 3)
285 | assert(reply.actualVersions.contains(0))
286 | }
287 |
288 | it should "return keys of matching version" in {
289 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
290 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
291 | client.put(PutRequest(collectionA, aThirdKey, Some(0), testData1))
292 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
293 | client.put(PutRequest(collectionA, aNotherKey, Some(1), testData2))
294 | client.put(PutRequest(collectionA, aThirdKey, Some(1), testData2))
295 | client.put(PutRequest(collectionA, aKey, Some(2), testData3))
296 | client.put(PutRequest(collectionA, aNotherKey, Some(2), testData3))
297 | client.put(PutRequest(collectionA, aThirdKey, Some(2), testData3))
298 | val reply = client.getMultipleKeys(GetMultipleKeysRequest(collectionA, None, None, Some(1)))
299 | assert(reply.keys.length == 3)
300 | assert(reply.values.contains(testData2))
301 | }
302 |
303 | it should "return keys of matching version, matching prefix" in {
304 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
305 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
306 | client.put(PutRequest(collectionA, aThirdKey, Some(0), testData1))
307 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
308 | client.put(PutRequest(collectionA, aNotherKey, Some(1), testData2))
309 | client.put(PutRequest(collectionA, aThirdKey, Some(1), testData2))
310 | client.put(PutRequest(collectionA, aKey, Some(2), testData3))
311 | client.put(PutRequest(collectionA, aNotherKey, Some(2), testData3))
312 | client.put(PutRequest(collectionA, aThirdKey, Some(2), testData3))
313 | val reply = client.getMultipleKeys(GetMultipleKeysRequest(collectionA, None, Some("aN"), Some(1)))
314 | assert(reply.keys.length == 1)
315 | assert(reply.keys.contains(aNotherKey))
316 | assert(reply.values.contains(testData2))
317 | assert(reply.actualVersions.contains(1))
318 | }
319 |
320 | it should "return keys of matching version, matching prefix even if it is exact match" in {
321 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
322 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
323 | client.put(PutRequest(collectionA, aThirdKey, Some(0), testData1))
324 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
325 | client.put(PutRequest(collectionA, aNotherKey, Some(1), testData2))
326 | client.put(PutRequest(collectionA, aThirdKey, Some(1), testData2))
327 | client.put(PutRequest(collectionA, aKey, Some(2), testData3))
328 | client.put(PutRequest(collectionA, aNotherKey, Some(2), testData3))
329 | client.put(PutRequest(collectionA, aThirdKey, Some(2), testData3))
330 | val reply = client.getMultipleKeys(GetMultipleKeysRequest(collectionA, None, Some(aNotherKey), Some(1)))
331 | assert(reply.keys.length == 1)
332 | assert(reply.keys.contains(aNotherKey))
333 | assert(reply.values.contains(testData2))
334 | assert(reply.actualVersions.contains(1))
335 | }
336 |
337 | it should "with limit return only the first n keys of matching version " in {
338 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
339 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
340 | client.put(PutRequest(collectionA, aThirdKey, Some(0), testData1))
341 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
342 | client.put(PutRequest(collectionA, aNotherKey, Some(1), testData2))
343 | client.put(PutRequest(collectionA, aThirdKey, Some(1), testData2))
344 | client.put(PutRequest(collectionA, aKey, Some(2), testData3))
345 | client.put(PutRequest(collectionA, aNotherKey, Some(2), testData3))
346 | client.put(PutRequest(collectionA, aThirdKey, Some(2), testData3))
347 | val reply = client.getMultipleKeys(GetMultipleKeysRequest(collectionA, None, None, Some(1), Some(2)))
348 | assert(reply.keys.length == 2)
349 | assert(reply.values.contains(testData2))
350 | assert(reply.actualVersions.contains(1))
351 | }
352 |
353 | it should "support pagination with startAfterKey" in {
354 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
355 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
356 | client.put(PutRequest(collectionA, aThirdKey, Some(0), testData1))
357 | val reply = client.getMultipleKeys(GetMultipleKeysRequest(collectionA, Some(aKey), None, None, Some(2)))
358 | assert(reply.keys.length == 2)
359 | assert(reply.values.contains(testData1))
360 | assert(reply.actualVersions.contains(0))
361 | }
362 |
363 | it should "support pagination with startAfterKey, with prefix and version" in {
364 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
365 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
366 | client.put(PutRequest(collectionA, aThirdKey, Some(0), testData1))
367 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
368 | client.put(PutRequest(collectionA, aNotherKey, Some(1), testData2))
369 | client.put(PutRequest(collectionA, aThirdKey, Some(1), testData2))
370 | client.put(PutRequest(collectionA, aKey, Some(2), testData3))
371 | client.put(PutRequest(collectionA, aNotherKey, Some(2), testData3))
372 | client.put(PutRequest(collectionA, aThirdKey, Some(2), testData3))
373 | val reply = client.getMultipleKeys(GetMultipleKeysRequest(collectionA, Some(aKey), Some("a"), Some(1), Some(1)))
374 | assert(reply.keys.length == 1)
375 | assert(reply.values.contains(testData2))
376 | assert(reply.actualVersions.contains(1))
377 | }
378 |
379 | it should "support pagination with startAfterKey, with prefix and version where no keys match the prefix" in {
380 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
381 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
382 | client.put(PutRequest(collectionA, aThirdKey, Some(0), testData1))
383 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
384 | client.put(PutRequest(collectionA, aNotherKey, Some(1), testData2))
385 | client.put(PutRequest(collectionA, aThirdKey, Some(1), testData2))
386 | client.put(PutRequest(collectionA, aKey, Some(2), testData3))
387 | client.put(PutRequest(collectionA, aNotherKey, Some(2), testData3))
388 | client.put(PutRequest(collectionA, aThirdKey, Some(2), testData3))
389 | val reply = client.getMultipleKeys(GetMultipleKeysRequest(collectionA, Some(aKey), Some("BogusPrefix"), Some(1), Some(2)))
390 | assert(reply.keys.isEmpty)
391 | }
392 |
393 | "GetMultipleKeysByListWithVersions" should "return selected keys with versions in descending order" in {
394 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
395 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
396 | client.put(PutRequest(collectionA, aThirdKey, Some(0), testData1))
397 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
398 | client.put(PutRequest(collectionA, aNotherKey, Some(1), testData2))
399 | client.put(PutRequest(collectionA, aThirdKey, Some(1), testData2))
400 | client.put(PutRequest(collectionA, aKey, Some(2), testData3))
401 | client.put(PutRequest(collectionA, aNotherKey, Some(2), testData3))
402 | client.put(PutRequest(collectionA, aThirdKey, Some(2), testData3))
403 | val reply = client.getMultipleKeysByListWithMultipleVersions(GetMultipleKeysByListWithMultipleVersionsRequest(collectionA, keys = Seq(aNotherKey, aThirdKey)))
404 | assert(reply.keyVersionsValuesPairs.map(_.key) == Seq(aNotherKey, aThirdKey))
405 | assert(reply.keyVersionsValuesPairs(0).versionValuePairs.length == 3)
406 | assert(reply.keyVersionsValuesPairs(1).versionValuePairs.length == 3)
407 | assert(reply.keyVersionsValuesPairs(0).versionValuePairs(0) == VersionValuePairProto(2L, testData3))
408 | assert(reply.keyVersionsValuesPairs(0).versionValuePairs(1) == VersionValuePairProto(1L, testData2))
409 | assert(reply.keyVersionsValuesPairs(0).versionValuePairs(2) == VersionValuePairProto(0L, testData1))
410 | assert(reply.keyVersionsValuesPairs(1).versionValuePairs(0) == VersionValuePairProto(2L, testData3))
411 | assert(reply.keyVersionsValuesPairs(1).versionValuePairs(1) == VersionValuePairProto(1L, testData2))
412 | assert(reply.keyVersionsValuesPairs(1).versionValuePairs(2) == VersionValuePairProto(0L, testData1))
413 | }
414 |
415 | it should "limit the versions if specified" in {
416 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
417 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
418 | client.put(PutRequest(collectionA, aThirdKey, Some(0), testData1))
419 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
420 | client.put(PutRequest(collectionA, aNotherKey, Some(1), testData2))
421 | client.put(PutRequest(collectionA, aThirdKey, Some(1), testData2))
422 | client.put(PutRequest(collectionA, aKey, Some(2), testData3))
423 | client.put(PutRequest(collectionA, aNotherKey, Some(2), testData3))
424 | client.put(PutRequest(collectionA, aThirdKey, Some(2), testData3))
425 | val reply = client.getMultipleKeysByListWithMultipleVersions(GetMultipleKeysByListWithMultipleVersionsRequest(collectionA, keys = Seq(aNotherKey, aThirdKey), newestVersion = Some(1), oldestVersion = Some(1)))
426 | assert(reply.keyVersionsValuesPairs.map(_.key) == Seq(aNotherKey, aThirdKey))
427 | assert(reply.keyVersionsValuesPairs(0).versionValuePairs.length == 1)
428 | assert(reply.keyVersionsValuesPairs(1).versionValuePairs.length == 1)
429 | assert(reply.keyVersionsValuesPairs(0).versionValuePairs(0) == VersionValuePairProto(1L, testData2))
430 | assert(reply.keyVersionsValuesPairs(1).versionValuePairs(0) == VersionValuePairProto(1L, testData2))
431 | }
432 |
433 | it should "return an empty list if no versions match" in {
434 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
435 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
436 | client.put(PutRequest(collectionA, aKey, Some(1), testData2))
437 | client.put(PutRequest(collectionA, aNotherKey, Some(1), testData2))
438 | client.put(PutRequest(collectionA, aKey, Some(5), testData3))
439 | client.put(PutRequest(collectionA, aNotherKey, Some(5), testData3))
440 | val reply = client.getMultipleKeysByListWithMultipleVersions(GetMultipleKeysByListWithMultipleVersionsRequest(collectionA, keys = Seq(aNotherKey, aThirdKey, aThirdKey), newestVersion = Some(3), oldestVersion = Some(4)))
441 | assert(reply.keyVersionsValuesPairs.isEmpty)
442 | }
443 |
444 | "GetMultipleKeysByList" should "return version-value tuples for existing, and empty for missing keys" in {
445 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
446 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData2))
447 | client.put(PutRequest(collectionA, aNotherKey, Some(1), testData3))
448 | val reply = client.getMultipleKeysByList(GetMultipleKeysByListRequest(collectionA, keys = Seq(aKey, aNotherKey, aThirdKey)))
449 | assert(reply.versionValueBoxes.length == 3)
450 | assert(reply.versionValueBoxes(0).versionValuePair.exists(_.value == testData1))
451 | assert(reply.versionValueBoxes(1).versionValuePair.exists(_.value == testData3))
452 | assert(reply.versionValueBoxes(1).versionValuePair.exists(_.actualVersion == 1))
453 | assert(reply.versionValueBoxes(2).versionValuePair.isEmpty)
454 | }
455 |
456 | it should "not return something newer than the requested version" in {
457 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
458 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
459 | client.put(PutRequest(collectionA, aNotherKey, Some(1), testData2))
460 | client.put(PutRequest(collectionA, aNotherKey, Some(2), testData3))
461 | client.put(PutRequest(collectionA, aThirdKey, Some(2), testData3))
462 | val reply = client.getMultipleKeysByList(GetMultipleKeysByListRequest(collectionA, keys = Seq(aKey, aNotherKey, aThirdKey), version = Some(1)))
463 | assert(reply.versionValueBoxes.length == 3)
464 | assert(reply.versionValueBoxes(0).versionValuePair.exists(_.value == testData1))
465 | assert(reply.versionValueBoxes(0).versionValuePair.exists(_.actualVersion == 0))
466 | assert(reply.versionValueBoxes(1).versionValuePair.exists(_.value == testData2))
467 | assert(reply.versionValueBoxes(1).versionValuePair.exists(_.actualVersion == 1))
468 | assert(reply.versionValueBoxes(2).versionValuePair.isEmpty)
469 | }
470 |
471 | it should "return only empty boxes if nothing matches" in {
472 | client.put(PutRequest(collectionA, aKey, Some(2), testData1))
473 | client.put(PutRequest(collectionA, aNotherKey, Some(2), testData1))
474 | val reply = client.getMultipleKeysByList(GetMultipleKeysByListRequest(collectionA, keys = Seq(aKey, aNotherKey, aThirdKey), version = Some(1)))
475 | assert(reply.versionValueBoxes.length == 3)
476 | assert(reply.versionValueBoxes.forall(_.versionValuePair.isEmpty))
477 | }
478 |
479 | "Backup" should "create non-empty backup directory" in {
480 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
481 | client.backup(BackupRequest())
482 | val dir = new File(backupDir.toString)
483 | assert(dir.exists)
484 | assert(dir.isDirectory)
485 | assert(dir.listFiles.length > 0)
486 | }
487 |
488 | "Restore" should "fail if there are no backups" in {
489 | val reply = client.restoreFromBackup(RestoreFromBackupRequest())
490 | assert(!reply.success)
491 | }
492 |
493 | it should "restore old state after backup" in {
494 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
495 | client.backup(BackupRequest())
496 | client.delete(DeleteRequest(collectionA, aKey, 0))
497 | client.restoreFromBackup(RestoreFromBackupRequest())
498 | val reply = client.get(GetRequest(collectionA, aKey, Some(0)))
499 | assert(testData1 == reply.value)
500 | }
501 |
502 | it should "restore even after deletion of data dir" in {
503 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
504 | client.backup(BackupRequest())
505 | deleteRecursively(new File(dataDir.toString))
506 | client.restoreFromBackup(RestoreFromBackupRequest())
507 | val reply = client.get(GetRequest(collectionA, aKey, Some(0)))
508 | assert(testData1 == reply.value)
509 | }
510 |
511 | "ListVersions" should "list all versions" in {
512 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
513 | client.put(PutRequest(collectionA, aKey, Some(2), testData1))
514 | client.put(PutRequest(collectionA, aKey, Some(3), testData1))
515 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
516 | val reply = client.listVersions(ListVersionsRequest(collectionA, aKey))
517 | assert(reply.versions.length == 3)
518 | assert(reply.versions.contains(0))
519 | assert(!reply.versions.contains(1))
520 | assert(reply.versions.contains(2))
521 | assert(reply.versions.contains(3))
522 | }
523 |
524 | "ListVersions" should "support pagination" in {
525 | client.put(PutRequest(collectionA, aKey, Some(0), testData1))
526 | client.put(PutRequest(collectionA, aKey, Some(1), testData1))
527 | client.put(PutRequest(collectionA, aKey, Some(2), testData1))
528 | client.put(PutRequest(collectionA, aKey, Some(3), testData1))
529 | client.put(PutRequest(collectionA, aNotherKey, Some(0), testData1))
530 | val reply = client.listVersions(ListVersionsRequest(collectionA, aKey, offset = Some(1), limit = Some(2)))
531 | assert(reply.versions.length == 2)
532 | assert(!reply.versions.contains(0))
533 | assert(reply.versions.contains(1))
534 | assert(reply.versions.contains(2))
535 | }
536 |
537 | }
538 |
--------------------------------------------------------------------------------
/src/test/scala/com/scalableminds/fossildb/RocksOptionsSuite.scala:
--------------------------------------------------------------------------------
1 | package com.scalableminds.fossildb
2 |
3 | import java.io.File
4 | import java.nio.file.Paths
5 | import com.scalableminds.fossildb.db.StoreManager
6 | import org.rocksdb.{ColumnFamilyDescriptor, ConfigOptions, DBOptions, Env}
7 | import org.scalatest.BeforeAndAfterEach
8 | import org.scalatest.flatspec.AnyFlatSpec
9 |
10 | import scala.collection.mutable
11 | import scala.jdk.CollectionConverters.BufferHasAsJava
12 |
13 |
14 | class RocksOptionsSuite extends AnyFlatSpec with BeforeAndAfterEach with TestHelpers {
15 |
16 | private val testTempDir = "testData2"
17 | private val dataDir = Paths.get(testTempDir, "data")
18 | private val backupDir = Paths.get(testTempDir, "backup")
19 |
20 | private val collectionA = "collectionA"
21 | private val collectionB = "collectionB"
22 |
23 | private val columnFamilies = List(collectionA, collectionB)
24 |
25 |
26 | override def beforeEach(): Unit = {
27 | deleteRecursively(new File(testTempDir))
28 | new File(testTempDir).mkdir()
29 | }
30 |
31 | override def afterEach(): Unit = {
32 | deleteRecursively(new File(testTempDir))
33 | }
34 |
35 |
36 | "Initializing the StoreManager" should "load and use a specified config file" in {
37 | val file = new File(testTempDir, "testConfig.ini")
38 | writeToFile(file, "[Version]\n rocksdb_version=5.11.3\n options_file_version=1.1\n\n[DBOptions]\n stats_dump_period_sec=700\n\n[CFOptions \"default\"]\n\n")
39 |
40 | val storeManager = new StoreManager(dataDir, backupDir, columnFamilies, Some(file.getPath))
41 |
42 | val options = new DBOptions()
43 | .setStatsDumpPeriodSec(100)
44 | val cfListRef: mutable.Buffer[ColumnFamilyDescriptor] = mutable.Buffer()
45 | val configOptions = new ConfigOptions()
46 | // if successful, the rocksdb writes the loaded options to a file that can then be retrieved with loadLatestOptions
47 | // we test that that one now includes the value 700 from the file above, rather than the 100 specified as a default
48 | org.rocksdb.OptionsUtil.loadLatestOptions(configOptions, dataDir.toString, options, cfListRef.asJava)
49 | assert(options.statsDumpPeriodSec() == 700)
50 | storeManager.close
51 | }
52 |
53 | it should "fail if specified config file does not exist" in {
54 | assertThrows[Exception] {
55 | new StoreManager(dataDir, backupDir, columnFamilies, Some("nonExistingPath.ini"))
56 | }
57 | }
58 |
59 | it should "fail if specified config file is invalid" in {
60 | val file = new File(testTempDir, "testConfig.ini")
61 | writeToFile(file, "[Version]\n rocksdb_version=5.11.3\n options_file_version=1.1\n\n[DBOptions]\n stats_dump_period_sec=700")
62 |
63 | assertThrows[Exception] {
64 | new StoreManager(dataDir, backupDir, columnFamilies, Some(file.getPath))
65 | }
66 | }
67 |
68 |
69 | }
70 |
--------------------------------------------------------------------------------
/src/test/scala/com/scalableminds/fossildb/TestHelpers.scala:
--------------------------------------------------------------------------------
1 | package com.scalableminds.fossildb
2 |
3 | import java.io.{BufferedWriter, File, FileWriter}
4 |
5 | trait TestHelpers {
6 |
7 | protected def deleteRecursively(file: File): Unit = {
8 | if (file.isDirectory)
9 | file.listFiles.foreach(deleteRecursively)
10 | if (file.exists && !file.delete)
11 | throw new Exception(s"Unable to delete ${file.getAbsolutePath}")
12 | }
13 |
14 | protected def writeToFile(file: File, content: String): Unit = {
15 | val bw = new BufferedWriter(new FileWriter(file))
16 | bw.write(content)
17 | bw.close()
18 | }
19 |
20 | }
21 |
--------------------------------------------------------------------------------