├── .gitignore ├── .editorconfig ├── kcache ├── src │ ├── test │ │ ├── resources │ │ │ └── log4j.properties │ │ └── java │ │ │ └── io │ │ │ └── kcache │ │ │ ├── utils │ │ │ ├── CustomPartitioner.java │ │ │ ├── StringUpdateHandler.java │ │ │ ├── EnumRecommenderTest.java │ │ │ ├── OffsetCheckpointTest.java │ │ │ ├── SSLClusterTestHarness.java │ │ │ ├── ClusterTestHarness.java │ │ │ └── SASLClusterTestHarness.java │ │ │ ├── KafkaDelegatingCacheTest.java │ │ │ ├── KafkaCacheSASLTest.java │ │ │ ├── KafkaCacheSSLTest.java │ │ │ ├── KafkaBoundedCacheTest.java │ │ │ ├── CacheUtils.java │ │ │ ├── KafkaPersistentCacheTest.java │ │ │ ├── KafkaReadOnlyCacheTest.java │ │ │ └── KafkaCacheOffsetTest.java │ └── main │ │ └── java │ │ └── io │ │ └── kcache │ │ ├── CacheLoader.java │ │ ├── exceptions │ │ ├── EntryTooLargeException.java │ │ ├── CacheException.java │ │ ├── CacheTimeoutException.java │ │ └── CacheInitializationException.java │ │ ├── utils │ │ ├── Streams.java │ │ ├── KeyComparator.java │ │ ├── KeyBytesComparator.java │ │ ├── KeyBufferComparator.java │ │ ├── ShutdownableThread.java │ │ ├── FileCheckpointHandler.java │ │ ├── EnumRecommender.java │ │ ├── InMemoryCache.java │ │ └── InMemoryBoundedCache.java │ │ ├── KeyValueIterator.java │ │ ├── CacheType.java │ │ ├── CheckpointHandler.java │ │ ├── KeyValue.java │ │ ├── CompositeCacheUpdateHandler.java │ │ ├── CacheUpdateHandler.java │ │ └── Cache.java └── pom.xml ├── .github ├── workflows │ └── build.yml └── dependabot.yml ├── kcache-rdbms ├── src │ ├── main │ │ └── java │ │ │ └── io │ │ │ └── kcache │ │ │ └── rdbms │ │ │ └── jooq │ │ │ ├── Tables.java │ │ │ ├── Keys.java │ │ │ ├── DefaultCatalog.java │ │ │ ├── Kcache.java │ │ │ └── tables │ │ │ ├── records │ │ │ └── KvRecord.java │ │ │ └── Kv.java │ └── test │ │ └── java │ │ └── io │ │ └── kcache │ │ └── rdbms │ │ ├── KafkaRdbmsCacheTest.java │ │ └── RdbmsCacheTest.java └── pom.xml ├── kcache-lmdb ├── src │ ├── test │ │ └── java │ │ │ └── io │ │ │ └── kcache │ │ │ └── lmdb │ │ │ ├── KafkaLmdbCacheTest.java │ │ │ └── LmdbCacheTest.java │ └── main │ │ └── java │ │ └── io │ │ └── kcache │ │ └── lmdb │ │ └── LmdbIterator.java └── pom.xml ├── kcache-bdbje ├── src │ ├── test │ │ └── java │ │ │ └── io │ │ │ └── kcache │ │ │ └── bdbje │ │ │ ├── KafkaBdbJECacheTest.java │ │ │ └── BdbJECacheTest.java │ └── main │ │ └── java │ │ └── io │ │ └── kcache │ │ └── bdbje │ │ └── SerdeWrapper.java └── pom.xml ├── kcache-mapdb ├── src │ ├── test │ │ └── java │ │ │ └── io │ │ │ └── kcache │ │ │ └── mapdb │ │ │ ├── KafkaMapDBCacheTest.java │ │ │ └── MapDBCacheTest.java │ └── main │ │ └── java │ │ └── io │ │ └── kcache │ │ └── mapdb │ │ ├── CustomSerializerByteArray.java │ │ └── MapDBCache.java └── pom.xml ├── kcache-rocksdb ├── src │ ├── test │ │ └── java │ │ │ └── io │ │ │ └── kcache │ │ │ └── rocksdb │ │ │ ├── KafkaRocksDBCacheTest.java │ │ │ └── RocksDBCacheTest.java │ └── main │ │ └── java │ │ └── io │ │ └── kcache │ │ └── rocksdb │ │ ├── RocksDBKeySliceComparator.java │ │ ├── RocksDBIterator.java │ │ └── RocksDBRangeIterator.java └── pom.xml ├── kcache-benchmark ├── README.md ├── pom.xml └── src │ └── main │ └── java │ └── io │ └── kcache │ └── benchmark │ └── Common.java ├── findbugs-exclude.xml ├── kcache-caffeine ├── pom.xml └── src │ ├── test │ └── java │ │ └── io │ │ └── kcache │ │ └── caffeine │ │ └── KafkaCaffeineCacheTest.java │ └── main │ └── java │ └── io │ └── kcache │ └── caffeine │ └── CaffeineCache.java └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | *.iml 3 | lib_managed 4 | src_managed 5 | target 6 | *.ipr 7 | *.iws 8 | *.swp 9 | .DS_Store 10 | dependency-reduced-pom.xml 11 | derby.log 12 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | charset = utf-8 7 | indent_style = space 8 | 9 | [*.java] 10 | indent_style = space 11 | indent_size = 4 12 | 13 | [*.md] 14 | indent_style = space 15 | indent_size = 2 16 | 17 | [*.xml] 18 | indent_style = space 19 | indent_size = 4 20 | -------------------------------------------------------------------------------- /kcache/src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=WARN, stdout 2 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 3 | log4j.appender.stdout.Target=System.out 4 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 5 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c - %m%n 6 | log4j.logger.io.kcache=TRACE, stdout 7 | log4j.additivity.io.kcache=false 8 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - uses: actions/checkout@v2 11 | - name: Set up JDK 11 12 | uses: actions/setup-java@v2 13 | with: 14 | java-version: '11' 15 | distribution: 'adopt' 16 | - name: Build with Maven 17 | run: mvn -B package --file pom.xml 18 | -------------------------------------------------------------------------------- /kcache-rdbms/src/main/java/io/kcache/rdbms/jooq/Tables.java: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is generated by jOOQ. 3 | */ 4 | package io.kcache.rdbms.jooq; 5 | 6 | 7 | import io.kcache.rdbms.jooq.tables.Kv; 8 | 9 | 10 | /** 11 | * Convenience access to all tables in kcache. 12 | */ 13 | @SuppressWarnings({ "all", "unchecked", "rawtypes" }) 14 | public class Tables { 15 | 16 | /** 17 | * The table kcache.KV. 18 | */ 19 | public static final Kv KV = Kv.KV; 20 | } 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: maven 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | open-pull-requests-limit: 10 8 | ignore: 9 | - dependency-name: org.rocksdb:rocksdbjni 10 | versions: 11 | - 6.15.2 12 | - 6.15.5 13 | - 6.16.4 14 | - dependency-name: com.github.spotbugs:spotbugs-maven-plugin 15 | versions: 16 | - 4.2.2 17 | - dependency-name: com.github.ben-manes.caffeine:caffeine 18 | versions: 19 | - 2.9.0 20 | - 3.0.0 21 | -------------------------------------------------------------------------------- /kcache-rdbms/src/main/java/io/kcache/rdbms/jooq/Keys.java: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is generated by jOOQ. 3 | */ 4 | package io.kcache.rdbms.jooq; 5 | 6 | 7 | import org.jooq.TableField; 8 | import org.jooq.UniqueKey; 9 | import org.jooq.impl.DSL; 10 | import org.jooq.impl.Internal; 11 | 12 | import io.kcache.rdbms.jooq.tables.Kv; 13 | import io.kcache.rdbms.jooq.tables.records.KvRecord; 14 | 15 | 16 | /** 17 | * A class modelling foreign key relationships and constraints of tables in 18 | * kcache. 19 | */ 20 | @SuppressWarnings({ "all", "unchecked", "rawtypes" }) 21 | public class Keys { 22 | 23 | // ------------------------------------------------------------------------- 24 | // UNIQUE and PRIMARY KEY definitions 25 | // ------------------------------------------------------------------------- 26 | 27 | public static final UniqueKey KEY_KV_PRIMARY = Internal.createUniqueKey(Kv.KV, DSL.name("KEY_KV_PRIMARY"), new TableField[] { Kv.KV.KV_KEY }, true); 28 | } 29 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/CacheLoader.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache; 18 | 19 | public interface CacheLoader { 20 | V load(K key); 21 | } -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/exceptions/EntryTooLargeException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package io.kcache.exceptions; 16 | 17 | public class EntryTooLargeException extends CacheException { 18 | 19 | public EntryTooLargeException(String message) { 20 | super(message); 21 | } 22 | 23 | public EntryTooLargeException(String message, Throwable cause) { 24 | super(message, cause); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/exceptions/CacheException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.exceptions; 18 | 19 | public class CacheException extends RuntimeException { 20 | 21 | public CacheException(String message) { 22 | super(message); 23 | } 24 | 25 | public CacheException(String message, Throwable cause) { 26 | super(message, cause); 27 | } 28 | 29 | } 30 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/exceptions/CacheTimeoutException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2015-2018 Confluent Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.exceptions; 18 | 19 | public class CacheTimeoutException extends CacheException { 20 | 21 | public CacheTimeoutException(String message) { 22 | super(message); 23 | } 24 | 25 | public CacheTimeoutException(String message, Throwable cause) { 26 | super(message, cause); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/utils/Streams.java: -------------------------------------------------------------------------------- 1 | package io.kcache.utils; 2 | 3 | import java.util.Iterator; 4 | import java.util.Spliterator; 5 | import java.util.Spliterators; 6 | import java.util.stream.Stream; 7 | import java.util.stream.StreamSupport; 8 | 9 | public final class Streams { 10 | 11 | private Streams() { 12 | } 13 | 14 | public static Stream streamOf(Iterable iterable) { 15 | return StreamSupport.stream(iterable.spliterator(), false); 16 | } 17 | 18 | public static Stream streamOf(Iterator iterator) { 19 | return StreamSupport.stream(Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED), false); 20 | } 21 | 22 | public static Stream parallelStreamOf(Iterable iterable) { 23 | return StreamSupport.stream(iterable.spliterator(), true); 24 | } 25 | 26 | public static Stream parallelStreamOf(Iterator iterator) { 27 | return StreamSupport.stream(Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED), true); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/exceptions/CacheInitializationException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.exceptions; 18 | 19 | public class CacheInitializationException extends CacheException { 20 | 21 | public CacheInitializationException(String message) { 22 | super(message); 23 | } 24 | 25 | public CacheInitializationException(String message, Throwable cause) { 26 | super(message, cause); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /kcache-rdbms/src/main/java/io/kcache/rdbms/jooq/DefaultCatalog.java: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is generated by jOOQ. 3 | */ 4 | package io.kcache.rdbms.jooq; 5 | 6 | 7 | import java.util.Arrays; 8 | import java.util.List; 9 | 10 | import org.jooq.Schema; 11 | import org.jooq.impl.CatalogImpl; 12 | 13 | 14 | /** 15 | * This class is generated by jOOQ. 16 | */ 17 | @SuppressWarnings({ "all", "unchecked", "rawtypes" }) 18 | public class DefaultCatalog extends CatalogImpl { 19 | 20 | private static final long serialVersionUID = 1L; 21 | 22 | /** 23 | * The reference instance of DEFAULT_CATALOG 24 | */ 25 | public static final DefaultCatalog DEFAULT_CATALOG = new DefaultCatalog(); 26 | 27 | /** 28 | * The schema kcache. 29 | */ 30 | public final Kcache KCACHE = Kcache.KCACHE; 31 | 32 | /** 33 | * No further instances allowed 34 | */ 35 | private DefaultCatalog() { 36 | super(""); 37 | } 38 | 39 | @Override 40 | public final List getSchemas() { 41 | return Arrays.asList( 42 | Kcache.KCACHE); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /kcache-rdbms/src/main/java/io/kcache/rdbms/jooq/Kcache.java: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is generated by jOOQ. 3 | */ 4 | package io.kcache.rdbms.jooq; 5 | 6 | 7 | import java.util.Arrays; 8 | import java.util.List; 9 | 10 | import org.jooq.Catalog; 11 | import org.jooq.Table; 12 | import org.jooq.impl.SchemaImpl; 13 | 14 | import io.kcache.rdbms.jooq.tables.Kv; 15 | 16 | 17 | /** 18 | * This class is generated by jOOQ. 19 | */ 20 | @SuppressWarnings({ "all", "unchecked", "rawtypes" }) 21 | public class Kcache extends SchemaImpl { 22 | 23 | private static final long serialVersionUID = 1L; 24 | 25 | /** 26 | * The reference instance of kcache 27 | */ 28 | public static final Kcache KCACHE = new Kcache(); 29 | 30 | /** 31 | * The table kcache.KV. 32 | */ 33 | public final Kv KV = Kv.KV; 34 | 35 | /** 36 | * No further instances allowed 37 | */ 38 | private Kcache() { 39 | super("kcache", null); 40 | } 41 | 42 | 43 | @Override 44 | public Catalog getCatalog() { 45 | return DefaultCatalog.DEFAULT_CATALOG; 46 | } 47 | 48 | @Override 49 | public final List> getTables() { 50 | return Arrays.>asList( 51 | Kv.KV); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /kcache-lmdb/src/test/java/io/kcache/lmdb/KafkaLmdbCacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.lmdb; 18 | 19 | import io.kcache.CacheType; 20 | import io.kcache.KafkaCacheConfig; 21 | import io.kcache.KafkaPersistentCacheTest; 22 | import java.util.Properties; 23 | 24 | public class KafkaLmdbCacheTest extends KafkaPersistentCacheTest { 25 | 26 | @Override 27 | protected Properties getKafkaCacheProperties() throws Exception { 28 | Properties props = super.getKafkaCacheProperties(); 29 | props.put(KafkaCacheConfig.KAFKACACHE_BACKING_CACHE_CONFIG, CacheType.LMDB.toString()); 30 | return props; 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /kcache-bdbje/src/test/java/io/kcache/bdbje/KafkaBdbJECacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.bdbje; 18 | 19 | import io.kcache.CacheType; 20 | import io.kcache.KafkaCacheConfig; 21 | import io.kcache.KafkaPersistentCacheTest; 22 | import java.util.Properties; 23 | 24 | public class KafkaBdbJECacheTest extends KafkaPersistentCacheTest { 25 | 26 | @Override 27 | protected Properties getKafkaCacheProperties() throws Exception { 28 | Properties props = super.getKafkaCacheProperties(); 29 | props.put(KafkaCacheConfig.KAFKACACHE_BACKING_CACHE_CONFIG, CacheType.BDBJE.toString()); 30 | return props; 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /kcache-mapdb/src/test/java/io/kcache/mapdb/KafkaMapDBCacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.mapdb; 18 | 19 | import io.kcache.CacheType; 20 | import io.kcache.KafkaCacheConfig; 21 | import io.kcache.KafkaPersistentCacheTest; 22 | import java.util.Properties; 23 | 24 | public class KafkaMapDBCacheTest extends KafkaPersistentCacheTest { 25 | 26 | @Override 27 | protected Properties getKafkaCacheProperties() throws Exception { 28 | Properties props = super.getKafkaCacheProperties(); 29 | props.put(KafkaCacheConfig.KAFKACACHE_BACKING_CACHE_CONFIG, CacheType.MAPDB.toString()); 30 | return props; 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /kcache-rocksdb/src/test/java/io/kcache/rocksdb/KafkaRocksDBCacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.rocksdb; 18 | 19 | import io.kcache.CacheType; 20 | import io.kcache.KafkaCacheConfig; 21 | import io.kcache.KafkaPersistentCacheTest; 22 | import java.util.Properties; 23 | 24 | public class KafkaRocksDBCacheTest extends KafkaPersistentCacheTest { 25 | 26 | @Override 27 | protected Properties getKafkaCacheProperties() throws Exception { 28 | Properties props = super.getKafkaCacheProperties(); 29 | props.put(KafkaCacheConfig.KAFKACACHE_BACKING_CACHE_CONFIG, CacheType.ROCKSDB.toString()); 30 | return props; 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /kcache-lmdb/src/test/java/io/kcache/lmdb/LmdbCacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache.lmdb; 18 | 19 | import io.kcache.Cache; 20 | import io.kcache.PersistentCacheTest; 21 | import org.apache.kafka.common.serialization.Serdes; 22 | import org.apache.kafka.common.utils.Bytes; 23 | 24 | public class LmdbCacheTest extends PersistentCacheTest { 25 | 26 | @Override 27 | protected Cache createCache() { 28 | return new LmdbCache<>(DB_NAME, dir.getRoot().toString(), Serdes.Bytes(), Serdes.ByteArray()); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /kcache-bdbje/src/test/java/io/kcache/bdbje/BdbJECacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache.bdbje; 18 | 19 | import io.kcache.Cache; 20 | import io.kcache.PersistentCacheTest; 21 | import org.apache.kafka.common.serialization.Serdes; 22 | import org.apache.kafka.common.utils.Bytes; 23 | 24 | public class BdbJECacheTest extends PersistentCacheTest { 25 | 26 | @Override 27 | protected Cache createCache() { 28 | return new BdbJECache<>(DB_NAME, dir.getRoot().toString(), Serdes.Bytes(), Serdes.ByteArray()); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /kcache-mapdb/src/test/java/io/kcache/mapdb/MapDBCacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache.mapdb; 18 | 19 | import io.kcache.Cache; 20 | import io.kcache.PersistentCacheTest; 21 | import org.apache.kafka.common.serialization.Serdes; 22 | import org.apache.kafka.common.utils.Bytes; 23 | 24 | public class MapDBCacheTest extends PersistentCacheTest { 25 | 26 | @Override 27 | protected Cache createCache() { 28 | return new MapDBCache<>(DB_NAME, dir.getRoot().toString(), Serdes.Bytes(), Serdes.ByteArray()); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /kcache-rocksdb/src/test/java/io/kcache/rocksdb/RocksDBCacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache.rocksdb; 18 | 19 | import io.kcache.Cache; 20 | import io.kcache.PersistentCacheTest; 21 | import org.apache.kafka.common.serialization.Serdes; 22 | import org.apache.kafka.common.utils.Bytes; 23 | 24 | public class RocksDBCacheTest extends PersistentCacheTest { 25 | 26 | @Override 27 | protected Cache createCache() { 28 | return new RocksDBCache<>(DB_NAME, dir.getRoot().toString(), Serdes.Bytes(), Serdes.ByteArray()); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /kcache-benchmark/README.md: -------------------------------------------------------------------------------- 1 | # KCache Persistent Cache JMH Microbenchmarks 2 | 3 | This module is for JMH micro-benchmarking persistent caches in KCache. 4 | 5 | ### How to run 6 | 7 | The benchmarks can be run either from `PersistentCacheBenchmark.java` directly through IntelliJ, or via the 8 | command line as follows, after building the module to produce `target/benchmarks.jar`: 9 | 10 | ``` 11 | java -jar ./target/benchmarks.jar 12 | ``` 13 | 14 | ### Running a subset of benchmarks 15 | 16 | To run only a subset of the benchmarks, you can specify parameters to run with. For example, 17 | to run only `rocksdb` benchmarks: 18 | ``` 19 | java -jar ./target/benchmarks.jar -p cacheType=rocksdb 20 | ``` 21 | 22 | ### Running with non-default parameters 23 | 24 | JMH parameters of interest may include the number of forks to use (`-f`), the number of warmup and 25 | measurement iterations (`-wi` and `-i`, respectively), the duration of each iteration 26 | (`-w` and `-r` for warmup and measurement iterations, respectively, with units of seconds), 27 | and the number of threads (`-t`). 28 | By default, `PersistentCacheBenchmark.java` is set up to run with 1 fork, 3 warmup iterations, and 3 measurement 29 | iterations. 30 | 31 | As an example, to run benchmarks with only one warmup iteration: 32 | ``` 33 | java -jar ./target/benchmarks.jar -wi 1 34 | ``` 35 | 36 | The full list of JMH command line options can be viewed with: 37 | ``` 38 | java -jar ./target/benchmarks.jar -h 39 | ``` 40 | 41 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/KeyValueIterator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache; 18 | 19 | import java.io.Closeable; 20 | import java.util.Iterator; 21 | 22 | /** 23 | * Iterator interface of {@link KeyValue}. 24 | *

25 | * Users must call its {@code close} method explicitly upon completeness to release resources, 26 | * or use try-with-resources statement (available since JDK7) for this {@link Closeable} class. 27 | * 28 | * @param Type of keys 29 | * @param Type of values 30 | */ 31 | public interface KeyValueIterator extends Iterator>, Closeable { 32 | 33 | @Override 34 | void close(); 35 | } 36 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/utils/CustomPartitioner.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.utils; 18 | 19 | import java.util.Map; 20 | import java.util.Set; 21 | import java.util.concurrent.ConcurrentHashMap; 22 | import org.apache.kafka.clients.producer.Partitioner; 23 | import org.apache.kafka.common.Cluster; 24 | 25 | public class CustomPartitioner implements Partitioner { 26 | 27 | public static final Set keys = ConcurrentHashMap.newKeySet(); 28 | public static final Set values = ConcurrentHashMap.newKeySet(); 29 | 30 | public void configure(Map configs) { 31 | } 32 | 33 | public int partition(String topic, Object key, byte[] keyBytes, 34 | Object value, byte[] valueBytes, Cluster cluster) { 35 | keys.add(key); 36 | values.add(value); 37 | return 0; 38 | } 39 | 40 | public void close() { 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /findbugs-exclude.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/CacheType.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package io.kcache; 19 | 20 | import java.util.EnumSet; 21 | import java.util.HashMap; 22 | import java.util.Locale; 23 | import java.util.Map; 24 | 25 | public enum CacheType { 26 | MEMORY, 27 | BDBJE, 28 | CAFFEINE, 29 | LMDB, 30 | MAPDB, 31 | RDBMS, 32 | ROCKSDB; 33 | 34 | private static final Map lookup = new HashMap<>(); 35 | 36 | static { 37 | for (CacheType v : EnumSet.allOf(CacheType.class)) { 38 | lookup.put(v.toString(), v); 39 | } 40 | } 41 | 42 | public static CacheType get(String name) { 43 | return lookup.get(name.toLowerCase(Locale.ROOT)); 44 | } 45 | 46 | @Override 47 | public String toString() { 48 | return name().toLowerCase(Locale.ROOT); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/utils/StringUpdateHandler.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.utils; 18 | 19 | import io.kcache.CacheUpdateHandler; 20 | import org.apache.kafka.common.TopicPartition; 21 | import org.slf4j.Logger; 22 | import org.slf4j.LoggerFactory; 23 | 24 | public class StringUpdateHandler implements CacheUpdateHandler { 25 | 26 | private static final Logger log = LoggerFactory.getLogger(StringUpdateHandler.class); 27 | 28 | /** 29 | * Invoked on every new K,V pair written to the store 30 | * 31 | * @param key key associated with the data 32 | * @param value data written to the store 33 | * @param oldValue the previous value associated with key, or null if there was no mapping for key 34 | * @param timestamp timestamp 35 | */ 36 | @Override 37 | public void handleUpdate(String key, String value, String oldValue, 38 | TopicPartition tp, long offset, long timestamp) { 39 | log.info("Handle update for ({}, {}, {}, {}, {}, {})", key, value, oldValue, tp, offset, timestamp); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /kcache-mapdb/src/main/java/io/kcache/mapdb/CustomSerializerByteArray.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * https://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package io.kcache.mapdb; 20 | 21 | import java.util.Comparator; 22 | import org.apache.kafka.common.serialization.Serde; 23 | import org.mapdb.serializer.SerializerByteArray; 24 | 25 | public class CustomSerializerByteArray extends SerializerByteArray { 26 | 27 | private final Serde keySerde; 28 | private final Comparator comparator; 29 | 30 | public CustomSerializerByteArray(Serde keySerde, Comparator comparator) { 31 | this.keySerde = keySerde; 32 | this.comparator = comparator; 33 | } 34 | 35 | @Override 36 | public int compare(byte[] b1, byte[] b2) { 37 | K key1 = keySerde.deserializer().deserialize(null, b1); 38 | K key2 = keySerde.deserializer().deserialize(null, b2); 39 | return comparator.compare(key1, key2); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/utils/KeyComparator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * https://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package io.kcache.utils; 20 | 21 | import java.io.Serializable; 22 | import java.util.Comparator; 23 | import org.apache.kafka.common.serialization.Serde; 24 | 25 | public class KeyComparator implements Comparator, Serializable { 26 | private static final long serialVersionUID = -6821331368183556503L; 27 | 28 | private final Serde keySerde; 29 | private final Comparator keyComparator; 30 | 31 | public KeyComparator(Serde keySerde, Comparator keyComparator) { 32 | this.keySerde = keySerde; 33 | this.keyComparator = keyComparator; 34 | } 35 | 36 | @Override 37 | public int compare(K k1, K k2) { 38 | byte[] b1 = keySerde.serializer().serialize(null, k1); 39 | byte[] b2 = keySerde.serializer().serialize(null, k2); 40 | return keyComparator.compare(b1, b2); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/utils/KeyBytesComparator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * https://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package io.kcache.utils; 20 | 21 | import java.io.Serializable; 22 | import java.util.Comparator; 23 | import org.apache.kafka.common.serialization.Serde; 24 | 25 | public class KeyBytesComparator implements Comparator, Serializable { 26 | private static final long serialVersionUID = -7158035266954550359L; 27 | 28 | private final Serde keySerde; 29 | private final Comparator keyComparator; 30 | 31 | public KeyBytesComparator(Serde keySerde, Comparator keyComparator) { 32 | this.keySerde = keySerde; 33 | this.keyComparator = keyComparator; 34 | } 35 | 36 | @Override 37 | public int compare(byte[] b1, byte[] b2) { 38 | K key1 = keySerde.deserializer().deserialize(null, b1); 39 | K key2 = keySerde.deserializer().deserialize(null, b2); 40 | return keyComparator.compare(key1, key2); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/CheckpointHandler.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2024 Confluent Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache; 18 | 19 | import io.kcache.exceptions.CacheInitializationException; 20 | import java.io.Closeable; 21 | import java.io.IOException; 22 | import java.util.Map; 23 | import org.apache.kafka.common.Configurable; 24 | import org.apache.kafka.common.TopicPartition; 25 | 26 | public interface CheckpointHandler extends Configurable, Closeable { 27 | 28 | /** 29 | * Configures the checkpoint handler. 30 | */ 31 | default void configure(Map configs) { 32 | } 33 | 34 | /** 35 | * Initializes the checkpoint handler. 36 | */ 37 | void init() throws CacheInitializationException; 38 | 39 | /** 40 | * Returns the current checkpoints. 41 | * 42 | * @return the current checkpoints 43 | */ 44 | Map checkpoints(); 45 | 46 | /** 47 | * Updates the checkpoints. 48 | * 49 | * @param checkpoints the new checkpoints 50 | */ 51 | void updateCheckpoints(Map checkpoints) throws IOException; 52 | 53 | @Override 54 | default void close() throws IOException { 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/utils/EnumRecommenderTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package io.kcache.utils; 19 | 20 | import com.google.common.collect.ImmutableList; 21 | import com.google.common.collect.ImmutableMap; 22 | import java.util.Arrays; 23 | import java.util.List; 24 | 25 | import org.apache.kafka.common.config.ConfigDef.Recommender; 26 | import org.junit.Test; 27 | 28 | import static org.junit.Assert.*; 29 | 30 | public class EnumRecommenderTest { 31 | 32 | enum TestEnum { 33 | ONE, TWO, THREE 34 | } 35 | 36 | @Test 37 | public void visible() { 38 | Recommender recommender = new EnumRecommender<>(TestEnum.class, String::toLowerCase); 39 | final List actual = recommender.validValues("asdf", ImmutableMap.of()); 40 | final List expected = ImmutableList.copyOf( 41 | Arrays.stream(TestEnum.values()) 42 | .map(Enum::toString) 43 | .map(String::toLowerCase) 44 | .toArray() 45 | ); 46 | assertEquals(expected, actual); 47 | } 48 | } -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/utils/KeyBufferComparator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * https://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package io.kcache.utils; 20 | 21 | import java.io.Serializable; 22 | import java.nio.ByteBuffer; 23 | import java.util.Comparator; 24 | import org.apache.kafka.common.serialization.Serde; 25 | 26 | public class KeyBufferComparator implements Comparator, Serializable { 27 | private static final long serialVersionUID = 7847770324097153442L; 28 | 29 | private final Serde keySerde; 30 | private final Comparator keyComparator; 31 | 32 | public KeyBufferComparator(Serde keySerde, Comparator keyComparator) { 33 | this.keySerde = keySerde; 34 | this.keyComparator = keyComparator; 35 | } 36 | 37 | @Override 38 | public int compare(ByteBuffer b1, ByteBuffer b2) { 39 | byte[] key1Bytes = new byte[b1.remaining()]; 40 | byte[] key2Bytes = new byte[b2.remaining()]; 41 | b1.duplicate().get(key1Bytes); 42 | b2.duplicate().get(key2Bytes); 43 | K key1 = keySerde.deserializer().deserialize(null, key1Bytes); 44 | K key2 = keySerde.deserializer().deserialize(null, key2Bytes); 45 | return keyComparator.compare(key1, key2); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /kcache-rocksdb/src/main/java/io/kcache/rocksdb/RocksDBKeySliceComparator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * https://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package io.kcache.rocksdb; 20 | 21 | import io.kcache.utils.KeyBytesComparator; 22 | import java.nio.ByteBuffer; 23 | import org.apache.kafka.common.serialization.Serde; 24 | import org.rocksdb.AbstractComparator; 25 | import org.rocksdb.ComparatorOptions; 26 | 27 | import java.util.Comparator; 28 | 29 | public class RocksDBKeySliceComparator extends AbstractComparator { 30 | 31 | // Ensure ComparatorOptions is assigned to a variable 32 | // See https://github.com/facebook/rocksdb/issues/6608 33 | private final static ComparatorOptions OPTIONS = new ComparatorOptions(); 34 | private final Comparator comparator; 35 | 36 | public RocksDBKeySliceComparator(Serde keySerde, Comparator keyComparator) { 37 | super(OPTIONS); 38 | this.comparator = new KeyBytesComparator<>(keySerde, keyComparator); 39 | } 40 | 41 | @Override 42 | public String name() { 43 | return getClass().getName(); 44 | } 45 | 46 | @Override 47 | public int compare(ByteBuffer buf1, ByteBuffer buf2) { 48 | byte[] arr1 = new byte[buf1.remaining()]; 49 | buf1.get(arr1); 50 | byte[] arr2 = new byte[buf2.remaining()]; 51 | buf2.get(arr2); 52 | return comparator.compare(arr1, arr2); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/KeyValue.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache; 18 | 19 | import java.util.Objects; 20 | 21 | /** 22 | * A key-value pair. 23 | * 24 | * @param Key type 25 | * @param Value type 26 | */ 27 | public class KeyValue { 28 | 29 | /** 30 | * The key of the key-value pair. 31 | */ 32 | public final K key; 33 | /** 34 | * The value of the key-value pair. 35 | */ 36 | public final V value; 37 | 38 | /** 39 | * Create a new key-value pair. 40 | * 41 | * @param key the key 42 | * @param value the value 43 | */ 44 | public KeyValue(final K key, final V value) { 45 | this.key = key; 46 | this.value = value; 47 | } 48 | 49 | @Override 50 | public String toString() { 51 | return "KeyValue(" + key + ", " + value + ")"; 52 | } 53 | 54 | @Override 55 | public boolean equals(final Object obj) { 56 | if (this == obj) { 57 | return true; 58 | } 59 | 60 | if (!(obj instanceof KeyValue)) { 61 | return false; 62 | } 63 | 64 | final KeyValue other = (KeyValue) obj; 65 | return Objects.equals(key, other.key) && Objects.equals(value, other.value); 66 | } 67 | 68 | @Override 69 | public int hashCode() { 70 | return Objects.hash(key, value); 71 | } 72 | 73 | } 74 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/utils/ShutdownableThread.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.utils; 18 | 19 | import org.slf4j.Logger; 20 | import org.slf4j.LoggerFactory; 21 | 22 | import java.util.concurrent.CountDownLatch; 23 | import java.util.concurrent.atomic.AtomicBoolean; 24 | 25 | public abstract class ShutdownableThread extends Thread { 26 | 27 | private static final Logger log = LoggerFactory.getLogger(ShutdownableThread.class); 28 | 29 | private final AtomicBoolean isRunning = new AtomicBoolean(true); 30 | private final CountDownLatch shutdownLatch = new CountDownLatch(1); 31 | 32 | public ShutdownableThread(String name) { 33 | super(name); 34 | this.setDaemon(false); 35 | } 36 | 37 | public void shutdown() throws InterruptedException { 38 | if (initiateShutdown()) { 39 | awaitShutdown(); 40 | } 41 | } 42 | 43 | protected boolean initiateShutdown() { 44 | return isRunning.getAndSet(false); 45 | } 46 | 47 | protected void awaitShutdown() throws InterruptedException { 48 | shutdownLatch.await(); 49 | log.info("Shutdown completed"); 50 | } 51 | 52 | /** 53 | * This method is repeatedly invoked until the thread shuts down or this method throws an exception 54 | */ 55 | protected abstract void doWork(); 56 | 57 | @Override 58 | public void run() { 59 | log.info("Starting"); 60 | try { 61 | while (isRunning()) { 62 | doWork(); 63 | } 64 | } catch (Error | RuntimeException e) { 65 | log.error("Thread {} exiting with uncaught exception: ", getName(), e); 66 | throw e; 67 | } finally { 68 | shutdownLatch.countDown(); 69 | } 70 | log.info("Stopped"); 71 | } 72 | 73 | public boolean isRunning() { 74 | return isRunning.get(); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /kcache-rdbms/src/test/java/io/kcache/rdbms/KafkaRdbmsCacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.rdbms; 18 | 19 | import io.kcache.CacheType; 20 | import io.kcache.KafkaCacheConfig; 21 | import io.kcache.KafkaPersistentCacheTest; 22 | import java.util.Properties; 23 | 24 | public class KafkaRdbmsCacheTest extends KafkaPersistentCacheTest { 25 | 26 | @Override 27 | protected Properties getKafkaCacheProperties() throws Exception { 28 | Properties props = super.getKafkaCacheProperties(); 29 | props.put(KafkaCacheConfig.KAFKACACHE_BACKING_CACHE_CONFIG, CacheType.RDBMS.toString()); 30 | String prefix = KafkaCacheConfig.KAFKACACHE_BACKING_CACHE_CONFIG + "." 31 | + CacheType.RDBMS + "."; 32 | 33 | //props.put(prefix + RdbmsCache.JDBC_URL_CONFIG, "jdbc:mysql://localhost:3306/kcache"); 34 | //props.put(prefix + RdbmsCache.DIALECT_CONFIG, "MYSQL"); 35 | //props.put(prefix + RdbmsCache.USERNAME_CONFIG, "root"); 36 | 37 | //props.put(prefix + RdbmsCache.JDBC_URL_CONFIG, "jdbc:postgresql:postgres"); 38 | //props.put(prefix + RdbmsCache.DIALECT_CONFIG, "POSTGRES"); 39 | //props.put(prefix + RdbmsCache.USERNAME_CONFIG, "postgres"); 40 | //props.put(prefix + RdbmsCache.PASSWORD_CONFIG, "postgres"); 41 | 42 | //props.put(prefix + RdbmsCache.JDBC_URL_CONFIG, "jdbc:h2:" + dir.newFolder().getAbsolutePath() + "/kcache"); 43 | //props.put(prefix + RdbmsCache.DIALECT_CONFIG, "H2"); 44 | 45 | //props.put(prefix + RdbmsCache.JDBC_URL_CONFIG, "jdbc:hsqldb:file:" + dir.newFolder().getAbsolutePath() + "/kcache"); 46 | //props.put(prefix + RdbmsCache.DIALECT_CONFIG, "HSQLDB"); 47 | 48 | props.put(prefix + RdbmsCache.JDBC_URL_CONFIG, "jdbc:derby:" + dir.newFolder().getAbsolutePath() + "/kcache;create=true"); 49 | props.put(prefix + RdbmsCache.DIALECT_CONFIG, "DERBY"); 50 | 51 | return props; 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/KafkaDelegatingCacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache; 18 | 19 | import io.kcache.utils.Caches; 20 | import io.kcache.utils.InMemoryCache; 21 | import io.kcache.utils.StringUpdateHandler; 22 | import org.apache.kafka.common.serialization.Serdes; 23 | import org.slf4j.Logger; 24 | import org.slf4j.LoggerFactory; 25 | 26 | import java.util.Properties; 27 | 28 | public class KafkaDelegatingCacheTest extends KafkaCacheTest { 29 | 30 | private static final Logger log = LoggerFactory.getLogger(KafkaDelegatingCacheTest.class); 31 | 32 | @Override 33 | protected Cache createAndInitKafkaCacheInstance() { 34 | Cache inMemoryCache = new InMemoryCache<>(); 35 | Properties props = new Properties(); 36 | props.put(KafkaCacheConfig.KAFKACACHE_BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); 37 | KafkaCacheConfig config = new KafkaCacheConfig(props); 38 | Cache kafkaCache = 39 | new KafkaCache<>(config, 40 | Serdes.String(), 41 | Serdes.String(), 42 | new StringUpdateHandler(), 43 | inMemoryCache); 44 | 45 | // Create a Kafka cache that delegates to another Kafka cache 46 | Properties props2 = new Properties(); 47 | props2.put(KafkaCacheConfig.KAFKACACHE_BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); 48 | props2.put(KafkaCacheConfig.KAFKACACHE_TOPIC_CONFIG, "_cache2"); 49 | KafkaCacheConfig config2 = new KafkaCacheConfig(props2); 50 | Cache kafkaDelegatingCache = Caches.concurrentCache( 51 | new KafkaCache<>(config2, 52 | Serdes.String(), 53 | Serdes.String(), 54 | new StringUpdateHandler(), 55 | kafkaCache)); 56 | kafkaDelegatingCache.init(); 57 | return kafkaDelegatingCache; 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /kcache-bdbje/src/main/java/io/kcache/bdbje/SerdeWrapper.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * https://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package io.kcache.bdbje; 20 | 21 | import com.esotericsoftware.kryo.Kryo; 22 | import com.esotericsoftware.kryo.io.Input; 23 | import com.esotericsoftware.kryo.io.Output; 24 | import java.io.ByteArrayOutputStream; 25 | import java.io.IOException; 26 | import java.io.Serializable; 27 | import org.apache.kafka.common.serialization.Deserializer; 28 | import org.apache.kafka.common.serialization.Serde; 29 | import org.apache.kafka.common.serialization.Serializer; 30 | 31 | public class SerdeWrapper implements Serde, Serializable { 32 | private static final long serialVersionUID = -302623791968470800L; 33 | 34 | private static final Kryo kryo = new Kryo(); 35 | 36 | static { 37 | kryo.setRegistrationRequired(false); 38 | } 39 | 40 | private Serde serde; 41 | 42 | public SerdeWrapper(Serde serde) { 43 | this.serde = serde; 44 | } 45 | 46 | @Override 47 | public Serializer serializer() { 48 | return serde.serializer(); 49 | } 50 | 51 | @Override 52 | public Deserializer deserializer() { 53 | return serde.deserializer(); 54 | } 55 | 56 | private void writeObject(java.io.ObjectOutputStream stream) 57 | throws IOException { 58 | ByteArrayOutputStream baos = new ByteArrayOutputStream(32); 59 | Output output = new Output(baos); 60 | kryo.writeClassAndObject(output, serde); 61 | output.close(); 62 | stream.write(baos.toByteArray()); 63 | } 64 | 65 | @SuppressWarnings("unchecked") 66 | private void readObject(java.io.ObjectInputStream stream) 67 | throws IOException, ClassNotFoundException { 68 | Input input = new Input(stream); 69 | serde = (Serde) kryo.readClassAndObject(input); 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/KafkaCacheSASLTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to you under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache; 18 | 19 | import io.kcache.exceptions.CacheInitializationException; 20 | import io.kcache.utils.SASLClusterTestHarness; 21 | import java.util.Properties; 22 | import org.junit.Test; 23 | 24 | import static org.junit.Assert.assertEquals; 25 | 26 | public class KafkaCacheSASLTest extends SASLClusterTestHarness { 27 | @Test 28 | public void testInitialization() throws Exception { 29 | Cache kafkaCache = createAndInitKafkaCacheInstance(); 30 | kafkaCache.close(); 31 | } 32 | 33 | @Test(expected = CacheInitializationException.class) 34 | public void testDoubleInitialization() throws Exception { 35 | try (Cache kafkaCache = createAndInitKafkaCacheInstance()) { 36 | kafkaCache.init(); 37 | } 38 | } 39 | 40 | @Test 41 | public void testSimplePut() throws Exception { 42 | try (Cache kafkaCache = createAndInitKafkaCacheInstance()) { 43 | String key = "Kafka"; 44 | String value = "Rocks"; 45 | kafkaCache.put(key, value); 46 | String retrievedValue = kafkaCache.get(key); 47 | assertEquals("Retrieved value should match entered value", value, retrievedValue); 48 | } 49 | } 50 | 51 | protected Cache createAndInitKafkaCacheInstance() { 52 | Properties props = getKafkaCacheProperties(); 53 | return CacheUtils.createAndInitSASLCacheInstance(props); 54 | } 55 | 56 | protected Properties getKafkaCacheProperties() { 57 | Properties props = new Properties(); 58 | props.put(KafkaCacheConfig.KAFKACACHE_BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); 59 | props.put(KafkaCacheConfig.KAFKACACHE_BACKING_CACHE_CONFIG, CacheType.MEMORY.toString()); 60 | return props; 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /kcache-rdbms/src/test/java/io/kcache/rdbms/RdbmsCacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache.rdbms; 18 | 19 | import io.kcache.Cache; 20 | import io.kcache.PersistentCacheTest; 21 | import java.util.HashMap; 22 | import java.util.Map; 23 | import org.apache.kafka.common.serialization.Serdes; 24 | import org.apache.kafka.common.utils.Bytes; 25 | import org.junit.After; 26 | 27 | public class RdbmsCacheTest extends PersistentCacheTest { 28 | 29 | @Override 30 | protected Cache createCache() throws Exception { 31 | Cache cache = 32 | new RdbmsCache<>(DB_NAME, dir.getRoot().toString(), Serdes.Bytes(), Serdes.ByteArray()); 33 | Map configs = new HashMap<>(); 34 | 35 | //configs.put(RdbmsCache.JDBC_URL_CONFIG, "jdbc:mysql://localhost:3306/kcache"); 36 | //configs.put(RdbmsCache.DIALECT_CONFIG, "MYSQL"); 37 | //configs.put(RdbmsCache.USERNAME_CONFIG, "root"); 38 | 39 | //configs.put(RdbmsCache.JDBC_URL_CONFIG, "jdbc:postgresql:postgres"); 40 | //configs.put(RdbmsCache.DIALECT_CONFIG, "POSTGRES"); 41 | //configs.put(RdbmsCache.USERNAME_CONFIG, "postgres"); 42 | //configs.put(RdbmsCache.PASSWORD_CONFIG, "postgres"); 43 | 44 | //configs.put(RdbmsCache.JDBC_URL_CONFIG, "jdbc:h2:" + dir.newFolder().getAbsolutePath() + "/kcache"); 45 | //configs.put(RdbmsCache.DIALECT_CONFIG, "H2"); 46 | 47 | //configs.put(RdbmsCache.JDBC_URL_CONFIG, "jdbc:hsqldb:file:" + dir.newFolder().getAbsolutePath() + "/kcache"); 48 | //configs.put(RdbmsCache.DIALECT_CONFIG, "HSQLDB"); 49 | 50 | configs.put(RdbmsCache.JDBC_URL_CONFIG, "jdbc:derby:" + dir.newFolder().getAbsolutePath() + "/kcache;create=true"); 51 | configs.put(RdbmsCache.DIALECT_CONFIG, "DERBY"); 52 | 53 | cache.configure(configs); 54 | return cache; 55 | } 56 | 57 | @After 58 | public void clearCache() { 59 | getCache().clear(); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /kcache-mapdb/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | 4 | 5 | kcache-parent 6 | io.kcache 7 | 5.2.4-SNAPSHOT 8 | 9 | 10 | io.kcache 11 | kcache-mapdb 12 | jar 13 | kcache-mapdb 14 | 15 | 16 | 17 | io.kcache 18 | kcache 19 | ${project.version} 20 | 21 | 22 | io.kcache 23 | kcache 24 | ${project.version} 25 | test-jar 26 | test 27 | 28 | 29 | com.google.guava 30 | guava 31 | 32 | 33 | org.apache.kafka 34 | kafka-clients 35 | 36 | 37 | org.slf4j 38 | slf4j-api 39 | 40 | 41 | org.mapdb 42 | mapdb 43 | 44 | 45 | 46 | org.apache.kafka 47 | kafka-clients 48 | test 49 | test 50 | 51 | 52 | org.apache.kafka 53 | kafka_${kafka.scala.version} 54 | test 55 | 56 | 57 | org.apache.kafka 58 | kafka_${kafka.scala.version} 59 | test 60 | test 61 | 62 | 63 | junit 64 | junit 65 | test 66 | 67 | 68 | org.slf4j 69 | slf4j-log4j12 70 | test 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /kcache-lmdb/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | 4 | 5 | kcache-parent 6 | io.kcache 7 | 5.2.4-SNAPSHOT 8 | 9 | 10 | io.kcache 11 | kcache-lmdb 12 | jar 13 | kcache-lmdb 14 | 15 | 16 | 17 | io.kcache 18 | kcache 19 | ${project.version} 20 | 21 | 22 | io.kcache 23 | kcache 24 | ${project.version} 25 | test-jar 26 | test 27 | 28 | 29 | com.google.guava 30 | guava 31 | 32 | 33 | org.apache.kafka 34 | kafka-clients 35 | 36 | 37 | org.slf4j 38 | slf4j-api 39 | 40 | 41 | org.lmdbjava 42 | lmdbjava 43 | 44 | 45 | 46 | org.apache.kafka 47 | kafka-clients 48 | test 49 | test 50 | 51 | 52 | org.apache.kafka 53 | kafka_${kafka.scala.version} 54 | test 55 | 56 | 57 | org.apache.kafka 58 | kafka_${kafka.scala.version} 59 | test 60 | test 61 | 62 | 63 | junit 64 | junit 65 | test 66 | 67 | 68 | org.slf4j 69 | slf4j-log4j12 70 | test 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /kcache-rocksdb/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | 4 | 5 | kcache-parent 6 | io.kcache 7 | 5.2.4-SNAPSHOT 8 | 9 | 10 | io.kcache 11 | kcache-rocksdb 12 | jar 13 | kcache-rocksdb 14 | 15 | 16 | 17 | io.kcache 18 | kcache 19 | ${project.version} 20 | 21 | 22 | io.kcache 23 | kcache 24 | ${project.version} 25 | test-jar 26 | test 27 | 28 | 29 | com.google.guava 30 | guava 31 | 32 | 33 | org.apache.kafka 34 | kafka-clients 35 | 36 | 37 | org.slf4j 38 | slf4j-api 39 | 40 | 41 | org.rocksdb 42 | rocksdbjni 43 | 44 | 45 | 46 | org.apache.kafka 47 | kafka-clients 48 | test 49 | test 50 | 51 | 52 | org.apache.kafka 53 | kafka_${kafka.scala.version} 54 | test 55 | 56 | 57 | org.apache.kafka 58 | kafka_${kafka.scala.version} 59 | test 60 | test 61 | 62 | 63 | junit 64 | junit 65 | test 66 | 67 | 68 | org.slf4j 69 | slf4j-log4j12 70 | test 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/utils/FileCheckpointHandler.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2024 Confluent Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.utils; 18 | 19 | import io.kcache.CheckpointHandler; 20 | import io.kcache.KafkaCacheConfig; 21 | import io.kcache.exceptions.CacheInitializationException; 22 | import java.io.IOException; 23 | import java.util.HashMap; 24 | import java.util.Map; 25 | import org.apache.kafka.common.TopicPartition; 26 | import org.slf4j.Logger; 27 | import org.slf4j.LoggerFactory; 28 | 29 | public class FileCheckpointHandler implements CheckpointHandler { 30 | 31 | private static final Logger log = LoggerFactory.getLogger(FileCheckpointHandler.class); 32 | 33 | private String topic; 34 | private String checkpointDir; 35 | private int checkpointVersion; 36 | private OffsetCheckpoint checkpointFile; 37 | private final Map checkpointFileCache = new HashMap<>(); 38 | 39 | @Override 40 | public void configure(Map configs) { 41 | KafkaCacheConfig config = new KafkaCacheConfig(configs); 42 | this.topic = config.getString(KafkaCacheConfig.KAFKACACHE_TOPIC_CONFIG); 43 | this.checkpointDir = config.getString(KafkaCacheConfig.KAFKACACHE_CHECKPOINT_DIR_CONFIG); 44 | this.checkpointVersion = config.getInt(KafkaCacheConfig.KAFKACACHE_CHECKPOINT_VERSION_CONFIG); 45 | } 46 | 47 | @Override 48 | public void init() throws CacheInitializationException { 49 | try { 50 | checkpointFile = new OffsetCheckpoint(checkpointDir, checkpointVersion, topic); 51 | checkpointFileCache.putAll(checkpointFile.read()); 52 | } catch (IOException e) { 53 | throw new CacheInitializationException("Failed to read checkpoints", e); 54 | } 55 | } 56 | 57 | @Override 58 | public Map checkpoints() { 59 | return new HashMap<>(checkpointFileCache); 60 | } 61 | 62 | @Override 63 | public void updateCheckpoints(Map checkpoints) throws IOException { 64 | checkpointFileCache.putAll(checkpoints); 65 | checkpointFile.write(checkpointFileCache); 66 | } 67 | 68 | @Override 69 | public void close() throws IOException { 70 | if (checkpointFile != null) { 71 | checkpointFile.close(); 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /kcache-bdbje/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | 4 | 5 | kcache-parent 6 | io.kcache 7 | 5.2.4-SNAPSHOT 8 | 9 | 10 | io.kcache 11 | kcache-bdbje 12 | jar 13 | kcache-bdbje 14 | 15 | 16 | 17 | io.kcache 18 | kcache 19 | ${project.version} 20 | 21 | 22 | io.kcache 23 | kcache 24 | ${project.version} 25 | test-jar 26 | test 27 | 28 | 29 | com.google.guava 30 | guava 31 | 32 | 33 | com.esotericsoftware 34 | kryo 35 | 36 | 37 | org.apache.kafka 38 | kafka-clients 39 | 40 | 41 | org.slf4j 42 | slf4j-api 43 | 44 | 45 | com.sleepycat 46 | je 47 | 48 | 49 | 50 | org.apache.kafka 51 | kafka-clients 52 | test 53 | test 54 | 55 | 56 | org.apache.kafka 57 | kafka_${kafka.scala.version} 58 | test 59 | 60 | 61 | org.apache.kafka 62 | kafka_${kafka.scala.version} 63 | test 64 | test 65 | 66 | 67 | junit 68 | junit 69 | test 70 | 71 | 72 | org.slf4j 73 | slf4j-log4j12 74 | test 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/utils/EnumRecommender.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package io.kcache.utils; 19 | 20 | import com.google.common.collect.ImmutableList; 21 | import com.google.common.collect.ImmutableSet; 22 | import java.util.LinkedHashSet; 23 | import java.util.List; 24 | import java.util.Map; 25 | import java.util.Set; 26 | import java.util.function.Function; 27 | 28 | import org.apache.kafka.common.config.ConfigDef; 29 | import org.apache.kafka.common.config.ConfigException; 30 | 31 | public class EnumRecommender> implements ConfigDef.Validator, ConfigDef.Recommender { 32 | 33 | private final Set validValues; 34 | private final Class enumClass; 35 | 36 | public EnumRecommender( 37 | Class enumClass, 38 | Function conversion, 39 | T... excludedValues 40 | ) { 41 | this.enumClass = enumClass; 42 | Set validEnums = new LinkedHashSet<>(); 43 | for (Object o : enumClass.getEnumConstants()) { 44 | String key = conversion.apply(o.toString()); 45 | validEnums.add(key); 46 | } 47 | for (Object excluded : excludedValues) { 48 | String key = conversion.apply(excluded.toString()); 49 | validEnums.remove(key); 50 | } 51 | this.validValues = ImmutableSet.copyOf(validEnums); 52 | } 53 | 54 | @Override 55 | public void ensureValid(String key, Object value) { 56 | // calling toString on itself because IDE complains if the Object is passed. 57 | if (value != null && !validValues.contains(value.toString())) { 58 | throw new ConfigException(key, value, "Invalid enumerator"); 59 | } 60 | } 61 | 62 | @Override 63 | public String toString() { 64 | return validValues.toString(); 65 | } 66 | 67 | @Override 68 | public List validValues(String name, Map connectorConfigs) { 69 | return ImmutableList.copyOf(validValues); 70 | } 71 | 72 | @Override 73 | public boolean visible(String name, Map connectorConfigs) { 74 | return true; 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/utils/OffsetCheckpointTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache.utils; 18 | 19 | import org.apache.kafka.common.TopicPartition; 20 | import org.junit.Test; 21 | 22 | import java.io.File; 23 | import java.io.IOException; 24 | import java.util.Collections; 25 | import java.util.HashMap; 26 | import java.util.Map; 27 | 28 | import static org.junit.Assert.assertEquals; 29 | import static org.junit.Assert.assertFalse; 30 | 31 | public class OffsetCheckpointTest { 32 | 33 | private final String topic = "topic"; 34 | 35 | @Test 36 | public void testReadWrite() throws IOException { 37 | try (final OffsetCheckpoint checkpoint = new OffsetCheckpoint("/tmp", 0, topic)) { 38 | final Map offsets = new HashMap<>(); 39 | offsets.put(new TopicPartition(topic, 0), 0L); 40 | offsets.put(new TopicPartition(topic, 1), 1L); 41 | offsets.put(new TopicPartition(topic, 2), 2L); 42 | 43 | checkpoint.write(offsets); 44 | assertEquals(offsets, checkpoint.read()); 45 | 46 | checkpoint.delete(); 47 | assertFalse(new File("/tmp", OffsetCheckpoint.CHECKPOINT_FILE_NAME).exists()); 48 | 49 | offsets.put(new TopicPartition(topic, 3), 3L); 50 | checkpoint.write(offsets); 51 | assertEquals(offsets, checkpoint.read()); 52 | 53 | checkpoint.delete(); 54 | } 55 | } 56 | 57 | @Test 58 | public void shouldNotWriteCheckpointWhenNoOffsets() throws IOException { 59 | // we do not need to worry about file name uniqueness since this file should not be created 60 | try (final OffsetCheckpoint checkpoint = new OffsetCheckpoint("/tmp", 0, topic)) { 61 | 62 | checkpoint.write(Collections.emptyMap()); 63 | 64 | assertFalse(new File("/tmp", OffsetCheckpoint.CHECKPOINT_FILE_NAME).exists()); 65 | 66 | assertEquals(Collections.emptyMap(), checkpoint.read()); 67 | 68 | // deleting a non-exist checkpoint file should be fine 69 | checkpoint.delete(); 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /kcache-caffeine/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | 4 | 5 | kcache-parent 6 | io.kcache 7 | 5.2.4-SNAPSHOT 8 | 9 | 10 | io.kcache 11 | kcache-caffeine 12 | jar 13 | kcache-caffeine 14 | 15 | 16 | 17 | io.kcache 18 | kcache 19 | ${project.version} 20 | 21 | 22 | io.kcache 23 | kcache 24 | ${project.version} 25 | test-jar 26 | test 27 | 28 | 29 | com.google.guava 30 | guava 31 | 32 | 33 | com.esotericsoftware 34 | kryo 35 | 36 | 37 | org.apache.kafka 38 | kafka-clients 39 | 40 | 41 | org.slf4j 42 | slf4j-api 43 | 44 | 45 | com.github.ben-manes.caffeine 46 | caffeine 47 | 48 | 49 | 50 | org.apache.kafka 51 | kafka-clients 52 | test 53 | test 54 | 55 | 56 | org.apache.kafka 57 | kafka_${kafka.scala.version} 58 | test 59 | 60 | 61 | org.apache.kafka 62 | kafka_${kafka.scala.version} 63 | test 64 | test 65 | 66 | 67 | junit 68 | junit 69 | test 70 | 71 | 72 | org.slf4j 73 | slf4j-log4j12 74 | test 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /kcache-lmdb/src/main/java/io/kcache/lmdb/LmdbIterator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache.lmdb; 18 | 19 | import io.kcache.KeyValue; 20 | import io.kcache.KeyValueIterator; 21 | import java.nio.ByteBuffer; 22 | import java.util.Iterator; 23 | import org.lmdbjava.CursorIterable; 24 | import org.lmdbjava.CursorIterable.KeyVal; 25 | import org.lmdbjava.Dbi; 26 | import org.lmdbjava.Env; 27 | import org.lmdbjava.KeyRange; 28 | import org.lmdbjava.Txn; 29 | 30 | class LmdbIterator implements KeyValueIterator { 31 | 32 | private final Txn txn; 33 | private final CursorIterable iterable; 34 | private final Iterator> iterator; 35 | 36 | LmdbIterator(final Env env, 37 | final Dbi db, 38 | final KeyRange keyRange) { 39 | this.txn = env.txnRead(); 40 | this.iterable = db.iterate(txn, keyRange); 41 | this.iterator = this.iterable.iterator(); 42 | txn.reset(); 43 | } 44 | 45 | @Override 46 | public synchronized boolean hasNext() { 47 | txn.renew(); 48 | try { 49 | return iterator.hasNext(); 50 | } finally { 51 | txn.reset(); 52 | } 53 | } 54 | 55 | @Override 56 | public KeyValue next() { 57 | txn.renew(); 58 | try { 59 | KeyVal keyVal = iterator.next(); 60 | if (keyVal == null) { 61 | return null; 62 | } 63 | ByteBuffer keyBuf = keyVal.key(); 64 | ByteBuffer valueBuf = keyVal.val(); 65 | byte[] keyBytes = new byte[keyBuf.remaining()]; 66 | byte[] valueBytes = new byte[valueBuf.remaining()]; 67 | keyBuf.get(keyBytes); 68 | valueBuf.get(valueBytes); 69 | return new KeyValue<>(keyBytes, valueBytes); 70 | } finally { 71 | txn.reset(); 72 | } 73 | } 74 | 75 | @Override 76 | public void remove() { 77 | throw new UnsupportedOperationException("LMDB iterator does not support remove()"); 78 | } 79 | 80 | @Override 81 | public synchronized void close() { 82 | iterable.close(); 83 | txn.close(); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/KafkaCacheSSLTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to you under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache; 18 | 19 | import io.kcache.exceptions.CacheInitializationException; 20 | import io.kcache.utils.SSLClusterTestHarness; 21 | import java.util.Properties; 22 | import org.junit.Test; 23 | 24 | import static org.junit.Assert.assertEquals; 25 | 26 | public class KafkaCacheSSLTest extends SSLClusterTestHarness { 27 | @Test 28 | public void testInitialization() throws Exception { 29 | Cache kafkaCache = createAndInitKafkaCacheInstance(requireSSLClientAuth()); 30 | kafkaCache.close(); 31 | } 32 | 33 | @Test(expected = CacheInitializationException.class) 34 | public void testInitializationWithoutClientAuth() throws Exception { 35 | Cache kafkaCache = createAndInitKafkaCacheInstance(false); 36 | kafkaCache.close(); 37 | 38 | // TODO: make the timeout shorter so the test fails quicker. 39 | } 40 | 41 | @Test(expected = CacheInitializationException.class) 42 | public void testDoubleInitialization() throws Exception { 43 | try (Cache kafkaCache = createAndInitKafkaCacheInstance(requireSSLClientAuth())) { 44 | kafkaCache.init(); 45 | } 46 | } 47 | 48 | @Test 49 | public void testSimplePut() throws Exception { 50 | try (Cache kafkaCache = createAndInitKafkaCacheInstance(requireSSLClientAuth())) { 51 | String key = "Kafka"; 52 | String value = "Rocks"; 53 | kafkaCache.put(key, value); 54 | String retrievedValue = kafkaCache.get(key); 55 | assertEquals("Retrieved value should match entered value", value, retrievedValue); 56 | } 57 | } 58 | 59 | protected Cache createAndInitKafkaCacheInstance(boolean requireSSLClientAuth) { 60 | Properties props = getKafkaCacheProperties(); 61 | return CacheUtils.createAndInitSSLKafkaCacheInstance(props, clientSslConfigs, requireSSLClientAuth); 62 | } 63 | 64 | protected Properties getKafkaCacheProperties() { 65 | Properties props = new Properties(); 66 | props.put(KafkaCacheConfig.KAFKACACHE_BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); 67 | props.put(KafkaCacheConfig.KAFKACACHE_BACKING_CACHE_CONFIG, CacheType.MEMORY.toString()); 68 | return props; 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /kcache-rocksdb/src/main/java/io/kcache/rocksdb/RocksDBIterator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache.rocksdb; 18 | 19 | import io.kcache.KeyValue; 20 | import io.kcache.KeyValueIterator; 21 | import io.kcache.exceptions.CacheException; 22 | import org.apache.kafka.common.utils.AbstractIterator; 23 | import org.rocksdb.RocksIterator; 24 | 25 | import java.util.Set; 26 | 27 | class RocksDBIterator extends AbstractIterator> implements KeyValueIterator { 28 | 29 | private final String storeName; 30 | private final RocksIterator iter; 31 | private final Set> openIterators; 32 | private final boolean isDescending; 33 | 34 | private volatile boolean open = true; 35 | 36 | private KeyValue next; 37 | 38 | RocksDBIterator(final String storeName, 39 | final RocksIterator iter, 40 | final Set> openIterators, 41 | final boolean isDescending) { 42 | this.storeName = storeName; 43 | this.iter = iter; 44 | this.openIterators = openIterators; 45 | this.isDescending = isDescending; 46 | if (isDescending) { 47 | iter.seekToLast(); 48 | } else { 49 | iter.seekToFirst(); 50 | } 51 | } 52 | 53 | @Override 54 | public synchronized boolean hasNext() { 55 | if (!open) { 56 | throw new CacheException(String.format("RocksDB iterator for store %s has closed", storeName)); 57 | } 58 | return super.hasNext(); 59 | } 60 | 61 | @Override 62 | public KeyValue makeNext() { 63 | if (!iter.isValid()) { 64 | return allDone(); 65 | } else { 66 | next = getKeyValue(); 67 | if (isDescending) { 68 | iter.prev(); 69 | } else { 70 | iter.next(); 71 | } 72 | return next; 73 | } 74 | } 75 | 76 | private KeyValue getKeyValue() { 77 | return new KeyValue<>(iter.key(), iter.value()); 78 | } 79 | 80 | @Override 81 | public void remove() { 82 | throw new UnsupportedOperationException("RocksDB iterator does not support remove()"); 83 | } 84 | 85 | @Override 86 | public synchronized void close() { 87 | openIterators.remove(this); 88 | iter.close(); 89 | open = false; 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/utils/SSLClusterTestHarness.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to you under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache.utils; 18 | 19 | import kafka.server.KafkaConfig; 20 | import kafka.utils.TestUtils; 21 | import org.apache.kafka.common.network.ConnectionMode; 22 | import org.apache.kafka.common.security.auth.SecurityProtocol; 23 | import org.apache.kafka.test.TestSslUtils; 24 | import scala.Option; 25 | 26 | import java.io.File; 27 | import java.io.IOException; 28 | import java.util.Map; 29 | import java.util.Properties; 30 | 31 | public class SSLClusterTestHarness extends ClusterTestHarness { 32 | public Map clientSslConfigs; 33 | 34 | public SSLClusterTestHarness() { 35 | super(DEFAULT_NUM_BROKERS); 36 | } 37 | 38 | @Override 39 | protected SecurityProtocol getSecurityProtocol() { 40 | return SecurityProtocol.SSL; 41 | } 42 | 43 | @Override 44 | protected KafkaConfig getKafkaConfig(int brokerId) { 45 | File trustStoreFile; 46 | try { 47 | trustStoreFile = File.createTempFile("SSLClusterTestHarness-truststore", ".jks"); 48 | } catch (IOException ioe) { 49 | throw new RuntimeException("Unable to create temporary file for the truststore."); 50 | } 51 | final Option trustStoreFileOption = Option.apply(trustStoreFile); 52 | final Option sslInterBrokerSecurityProtocol = Option.apply(SecurityProtocol.SSL); 53 | Properties props = TestUtils.createBrokerConfig( 54 | brokerId, zkConnect, false, false, TestUtils.RandomPort(), sslInterBrokerSecurityProtocol, 55 | trustStoreFileOption, EMPTY_SASL_PROPERTIES, false, false, TestUtils.RandomPort(), 56 | true, TestUtils.RandomPort(), false, TestUtils.RandomPort(), Option.empty(), 1, false, 57 | 1, (short) 1, false); 58 | 59 | // setup client SSL. Needs to happen before the broker is initialized, because the client's cert 60 | // needs to be added to the broker's trust store. 61 | Map sslConfigs; 62 | try { 63 | this.clientSslConfigs = TestSslUtils.createSslConfig(true, true, ConnectionMode.CLIENT, 64 | trustStoreFile, "client", "localhost"); 65 | } catch (Exception e) { 66 | throw new RuntimeException(e); 67 | } 68 | 69 | injectProperties(props); 70 | if (requireSSLClientAuth()) { 71 | props.setProperty("ssl.client.auth", "required"); 72 | } 73 | 74 | return KafkaConfig.fromProps(props); 75 | } 76 | 77 | protected boolean requireSSLClientAuth() { 78 | return true; 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /kcache-rocksdb/src/main/java/io/kcache/rocksdb/RocksDBRangeIterator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache.rocksdb; 18 | 19 | import io.kcache.KeyValue; 20 | import io.kcache.KeyValueIterator; 21 | import org.rocksdb.RocksIterator; 22 | 23 | import java.util.Collections; 24 | import java.util.Comparator; 25 | import java.util.Set; 26 | 27 | class RocksDBRangeIterator extends RocksDBIterator { 28 | private final byte[] rawFromKey; 29 | private final boolean fromInclusive; 30 | private final byte[] rawToKey; 31 | private final boolean toInclusive; 32 | private final Comparator comparator; 33 | private boolean checkAndSkipFrom; 34 | 35 | RocksDBRangeIterator(String storeName, 36 | RocksIterator iter, 37 | Set> openIterators, 38 | byte[] from, 39 | boolean fromInclusive, 40 | byte[] to, 41 | boolean toInclusive, 42 | boolean isDescending, 43 | Comparator comparator) { 44 | super(storeName, iter, openIterators, isDescending); 45 | this.rawFromKey = from; 46 | if (rawFromKey == null) { 47 | if (isDescending) { 48 | iter.seekToLast(); 49 | } else { 50 | iter.seekToFirst(); 51 | } 52 | } else { 53 | if (isDescending) { 54 | iter.seekForPrev(rawFromKey); 55 | } else { 56 | iter.seek(rawFromKey); 57 | } 58 | } 59 | this.fromInclusive = fromInclusive; 60 | if (rawFromKey != null && !fromInclusive) { 61 | checkAndSkipFrom = true; 62 | } 63 | 64 | this.rawToKey = to; 65 | this.toInclusive = toInclusive; 66 | this.comparator = isDescending ? Collections.reverseOrder(comparator) : comparator; 67 | } 68 | 69 | @Override 70 | public KeyValue makeNext() { 71 | KeyValue next = super.makeNext(); 72 | if (checkAndSkipFrom) { 73 | if (next != null && comparator.compare(next.key, rawFromKey) == 0) { 74 | next = super.makeNext(); 75 | } 76 | checkAndSkipFrom = false; 77 | } 78 | 79 | if (next == null) { 80 | return allDone(); 81 | } else if (rawToKey == null) { 82 | return next; 83 | } else { 84 | int cmp = comparator.compare(next.key, rawToKey); 85 | if (cmp < 0 || (cmp == 0 && toInclusive)) { 86 | return next; 87 | } else { 88 | return allDone(); 89 | } 90 | } 91 | } 92 | } 93 | 94 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/KafkaBoundedCacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache; 18 | 19 | import static org.junit.Assert.assertEquals; 20 | 21 | import io.kcache.utils.Caches; 22 | import io.kcache.utils.InMemoryBoundedCache; 23 | import java.util.HashMap; 24 | import java.util.Map; 25 | import java.util.Properties; 26 | import org.apache.kafka.common.TopicPartition; 27 | import org.apache.kafka.common.serialization.Serdes; 28 | import org.junit.Test; 29 | 30 | public class KafkaBoundedCacheTest extends KafkaCacheTest { 31 | 32 | @Test 33 | public void testCacheLoader() throws Exception { 34 | try (Cache kafkaCache = createAndInitKafkaCacheInstanceWithLoader()) { 35 | String key = "Kafka"; 36 | String value = "Rocks"; 37 | kafkaCache.put(key, value); 38 | String key2 = "Kafka2"; 39 | String value2 = "Rocks2"; 40 | kafkaCache.put(key2, value2); 41 | String retrievedValue = kafkaCache.get(key); 42 | assertEquals("Retrieved value should match entered value", value, retrievedValue); 43 | } 44 | } 45 | 46 | private Cache createAndInitKafkaCacheInstanceWithLoader() throws Exception { 47 | Properties props = super.getKafkaCacheProperties(); 48 | KafkaCacheConfig config = new KafkaCacheConfig(props); 49 | Map map = new HashMap<>(); 50 | InMemoryBoundedCache cache = new InMemoryBoundedCache<>( 51 | 1, null, new StringFromMapCacheLoader(map)); 52 | Cache kafkaCache = Caches.concurrentCache( 53 | new KafkaCache<>(config, 54 | Serdes.String(), 55 | Serdes.String(), 56 | new StringToMapUpdateHandler(map), 57 | cache)); 58 | kafkaCache.init(); 59 | return kafkaCache; 60 | } 61 | 62 | public static class StringToMapUpdateHandler implements CacheUpdateHandler { 63 | public final Map map; 64 | 65 | public StringToMapUpdateHandler(Map map) { 66 | this.map = map; 67 | } 68 | 69 | @Override 70 | public void handleUpdate(String key, String value, String oldValue, 71 | TopicPartition tp, long offset, long timestamp) { 72 | map.put(key, value); 73 | } 74 | } 75 | 76 | public static class StringFromMapCacheLoader implements CacheLoader { 77 | public final Map map; 78 | 79 | public StringFromMapCacheLoader(Map map) { 80 | this.map = map; 81 | } 82 | 83 | @Override 84 | public String load(String key) { 85 | return map.get(key); 86 | } 87 | } 88 | 89 | @Override 90 | protected Properties getKafkaCacheProperties() throws Exception { 91 | Properties props = super.getKafkaCacheProperties(); 92 | props.put(KafkaCacheConfig.KAFKACACHE_BACKING_CACHE_CONFIG, CacheType.MEMORY.toString()); 93 | props.put(KafkaCacheConfig.KAFKACACHE_BOUNDED_CACHE_SIZE_CONFIG, 2); 94 | return props; 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/CacheUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to you under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache; 18 | 19 | import io.kcache.exceptions.CacheInitializationException; 20 | import io.kcache.utils.Caches; 21 | import io.kcache.utils.StringUpdateHandler; 22 | import org.apache.kafka.common.config.SslConfigs; 23 | import org.apache.kafka.common.config.types.Password; 24 | import org.apache.kafka.common.security.auth.SecurityProtocol; 25 | import org.apache.kafka.common.serialization.Serdes; 26 | 27 | import java.util.Map; 28 | import java.util.Properties; 29 | 30 | public class CacheUtils { 31 | 32 | /** 33 | * Get a new instance of a KafkaCache and initialize it. 34 | */ 35 | public static Cache createAndInitKafkaCacheInstance(Properties props) 36 | throws CacheInitializationException { 37 | KafkaCacheConfig config = new KafkaCacheConfig(props); 38 | Cache kafkaCache = Caches.concurrentCache( 39 | new KafkaCache<>(config, 40 | Serdes.String(), 41 | Serdes.String(), 42 | new StringUpdateHandler(), 43 | null)); 44 | kafkaCache.init(); 45 | return kafkaCache; 46 | } 47 | /** 48 | * Get a new instance of an SASL KafkaCache and initialize it. 49 | */ 50 | public static Cache createAndInitSASLCacheInstance(Properties props) 51 | throws CacheInitializationException { 52 | 53 | props.put(KafkaCacheConfig.KAFKACACHE_SECURITY_PROTOCOL_CONFIG, 54 | SecurityProtocol.SASL_PLAINTEXT.toString()); 55 | 56 | return createAndInitKafkaCacheInstance(props); 57 | } 58 | 59 | /** 60 | * Get a new instance of an SSL KafkaCache and initialize it. 61 | */ 62 | public static Cache createAndInitSSLKafkaCacheInstance( 63 | Properties props, Map sslConfigs, boolean requireSSLClientAuth) 64 | throws CacheInitializationException { 65 | 66 | props.put(KafkaCacheConfig.KAFKACACHE_SECURITY_PROTOCOL_CONFIG, 67 | SecurityProtocol.SSL.toString()); 68 | props.put(KafkaCacheConfig.KAFKACACHE_SSL_TRUSTSTORE_LOCATION_CONFIG, 69 | sslConfigs.get(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG)); 70 | props.put(KafkaCacheConfig.KAFKACACHE_SSL_TRUSTSTORE_PASSWORD_CONFIG, 71 | ((Password) sslConfigs.get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)).value()); 72 | if (requireSSLClientAuth) { 73 | props.put(KafkaCacheConfig.KAFKACACHE_SSL_KEYSTORE_LOCATION_CONFIG, 74 | sslConfigs.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG)); 75 | props.put(KafkaCacheConfig.KAFKACACHE_SSL_KEYSTORE_PASSWORD_CONFIG, 76 | ((Password) sslConfigs.get(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG)).value()); 77 | props.put(KafkaCacheConfig.KAFKACACHE_SSL_KEY_PASSWORD_CONFIG, 78 | ((Password) sslConfigs.get(SslConfigs.SSL_KEY_PASSWORD_CONFIG)).value()); 79 | } 80 | 81 | return createAndInitKafkaCacheInstance(props); 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /kcache-rdbms/src/main/java/io/kcache/rdbms/jooq/tables/records/KvRecord.java: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is generated by jOOQ. 3 | */ 4 | package io.kcache.rdbms.jooq.tables.records; 5 | 6 | 7 | import org.jooq.Field; 8 | import org.jooq.Record1; 9 | import org.jooq.Record2; 10 | import org.jooq.Row2; 11 | import org.jooq.impl.UpdatableRecordImpl; 12 | 13 | import io.kcache.rdbms.jooq.tables.Kv; 14 | 15 | 16 | /** 17 | * This class is generated by jOOQ. 18 | */ 19 | @SuppressWarnings({ "all", "unchecked", "rawtypes" }) 20 | public class KvRecord extends UpdatableRecordImpl implements Record2 { 21 | 22 | private static final long serialVersionUID = 1L; 23 | 24 | /** 25 | * Setter for kcache.KV.kv_key. 26 | */ 27 | public void setKvKey(byte[] value) { 28 | set(0, value); 29 | } 30 | 31 | /** 32 | * Getter for kcache.KV.kv_key. 33 | */ 34 | public byte[] getKvKey() { 35 | return (byte[]) get(0); 36 | } 37 | 38 | /** 39 | * Setter for kcache.KV.kv_value. 40 | */ 41 | public void setKvValue(byte[] value) { 42 | set(1, value); 43 | } 44 | 45 | /** 46 | * Getter for kcache.KV.kv_value. 47 | */ 48 | public byte[] getKvValue() { 49 | return (byte[]) get(1); 50 | } 51 | 52 | // ------------------------------------------------------------------------- 53 | // Primary key information 54 | // ------------------------------------------------------------------------- 55 | 56 | @Override 57 | public Record1 key() { 58 | return (Record1) super.key(); 59 | } 60 | 61 | // ------------------------------------------------------------------------- 62 | // Record2 type implementation 63 | // ------------------------------------------------------------------------- 64 | 65 | @Override 66 | public Row2 fieldsRow() { 67 | return (Row2) super.fieldsRow(); 68 | } 69 | 70 | @Override 71 | public Row2 valuesRow() { 72 | return (Row2) super.valuesRow(); 73 | } 74 | 75 | @Override 76 | public Field field1() { 77 | return Kv.KV.KV_KEY; 78 | } 79 | 80 | @Override 81 | public Field field2() { 82 | return Kv.KV.KV_VALUE; 83 | } 84 | 85 | @Override 86 | public byte[] component1() { 87 | return getKvKey(); 88 | } 89 | 90 | @Override 91 | public byte[] component2() { 92 | return getKvValue(); 93 | } 94 | 95 | @Override 96 | public byte[] value1() { 97 | return getKvKey(); 98 | } 99 | 100 | @Override 101 | public byte[] value2() { 102 | return getKvValue(); 103 | } 104 | 105 | @Override 106 | public KvRecord value1(byte[] value) { 107 | setKvKey(value); 108 | return this; 109 | } 110 | 111 | @Override 112 | public KvRecord value2(byte[] value) { 113 | setKvValue(value); 114 | return this; 115 | } 116 | 117 | @Override 118 | public KvRecord values(byte[] value1, byte[] value2) { 119 | value1(value1); 120 | value2(value2); 121 | return this; 122 | } 123 | 124 | // ------------------------------------------------------------------------- 125 | // Constructors 126 | // ------------------------------------------------------------------------- 127 | 128 | /** 129 | * Create a detached KvRecord 130 | */ 131 | public KvRecord() { 132 | super(Kv.KV); 133 | } 134 | 135 | /** 136 | * Create a detached, initialised KvRecord 137 | */ 138 | public KvRecord(byte[] kvKey, byte[] kvValue) { 139 | super(Kv.KV); 140 | 141 | setKvKey(kvKey); 142 | setKvValue(kvValue); 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /kcache-caffeine/src/test/java/io/kcache/caffeine/KafkaCaffeineCacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.caffeine; 18 | 19 | import static org.junit.Assert.assertEquals; 20 | 21 | import io.kcache.Cache; 22 | import io.kcache.CacheLoader; 23 | import io.kcache.CacheType; 24 | import io.kcache.CacheUpdateHandler; 25 | import io.kcache.KafkaCache; 26 | import io.kcache.KafkaCacheConfig; 27 | import io.kcache.KafkaCacheTest; 28 | import io.kcache.utils.Caches; 29 | import java.util.Map; 30 | import java.util.Properties; 31 | import java.util.concurrent.ConcurrentHashMap; 32 | import org.apache.kafka.common.TopicPartition; 33 | import org.apache.kafka.common.serialization.Serdes; 34 | import org.junit.Test; 35 | 36 | public class KafkaCaffeineCacheTest extends KafkaCacheTest { 37 | 38 | @Test 39 | public void testCacheLoader() throws Exception { 40 | try (Cache kafkaCache = createAndInitKafkaCacheInstanceWithLoader()) { 41 | String key = "Kafka"; 42 | String value = "Rocks"; 43 | kafkaCache.put(key, value); 44 | String key2 = "Kafka2"; 45 | String value2 = "Rocks2"; 46 | kafkaCache.put(key2, value2); 47 | String retrievedValue = kafkaCache.get(key); 48 | assertEquals("Retrieved value should match entered value", value, retrievedValue); 49 | } 50 | } 51 | 52 | private Cache createAndInitKafkaCacheInstanceWithLoader() throws Exception { 53 | Properties props = super.getKafkaCacheProperties(); 54 | KafkaCacheConfig config = new KafkaCacheConfig(props); 55 | Map map = new ConcurrentHashMap<>(); 56 | CaffeineCache cache = new CaffeineCache<>( 57 | 1, null, new StringFromMapCacheLoader(map)); 58 | Cache kafkaCache = Caches.concurrentCache( 59 | new KafkaCache<>(config, 60 | Serdes.String(), 61 | Serdes.String(), 62 | new StringToMapUpdateHandler(map), 63 | cache)); 64 | kafkaCache.init(); 65 | return kafkaCache; 66 | } 67 | 68 | public static class StringToMapUpdateHandler implements CacheUpdateHandler { 69 | public final Map map; 70 | 71 | public StringToMapUpdateHandler(Map map) { 72 | this.map = map; 73 | } 74 | 75 | @Override 76 | public void handleUpdate(String key, String value, String oldValue, 77 | TopicPartition tp, long offset, long timestamp) { 78 | map.put(key, value); 79 | } 80 | } 81 | 82 | public static class StringFromMapCacheLoader implements CacheLoader { 83 | public final Map map; 84 | 85 | public StringFromMapCacheLoader(Map map) { 86 | this.map = map; 87 | } 88 | 89 | @Override 90 | public String load(String key) { 91 | return map.get(key); 92 | } 93 | } 94 | 95 | @Override 96 | protected Properties getKafkaCacheProperties() throws Exception { 97 | Properties props = super.getKafkaCacheProperties(); 98 | props.put(KafkaCacheConfig.KAFKACACHE_BACKING_CACHE_CONFIG, CacheType.CAFFEINE.toString()); 99 | props.put(KafkaCacheConfig.KAFKACACHE_BOUNDED_CACHE_SIZE_CONFIG, 2); 100 | return props; 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /kcache-rdbms/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | 4 | 5 | kcache-parent 6 | io.kcache 7 | 5.2.4-SNAPSHOT 8 | 9 | 10 | io.kcache 11 | kcache-rdbms 12 | jar 13 | kcache-rdbms 14 | 15 | 16 | 17 | io.kcache 18 | kcache 19 | ${project.version} 20 | 21 | 22 | io.kcache 23 | kcache 24 | ${project.version} 25 | test-jar 26 | test 27 | 28 | 29 | com.google.guava 30 | guava 31 | 32 | 33 | com.esotericsoftware 34 | kryo 35 | 36 | 37 | org.apache.kafka 38 | kafka-clients 39 | 40 | 41 | org.slf4j 42 | slf4j-api 43 | 44 | 45 | org.jooq 46 | jooq 47 | 48 | 49 | com.zaxxer 50 | HikariCP 51 | 52 | 53 | 61 | 68 | 69 | com.h2database 70 | h2 71 | 2.3.232 72 | test 73 | 74 | 75 | org.hsqldb 76 | hsqldb 77 | 2.7.4 78 | test 79 | 80 | 81 | org.apache.derby 82 | derby 83 | 10.15.2.0 84 | test 85 | 86 | 87 | org.apache.kafka 88 | kafka-clients 89 | test 90 | test 91 | 92 | 93 | org.apache.kafka 94 | kafka_${kafka.scala.version} 95 | test 96 | 97 | 98 | org.apache.kafka 99 | kafka_${kafka.scala.version} 100 | test 101 | test 102 | 103 | 104 | junit 105 | junit 106 | test 107 | 108 | 109 | org.slf4j 110 | slf4j-log4j12 111 | test 112 | 113 | 114 | 115 | 116 | -------------------------------------------------------------------------------- /kcache-rdbms/src/main/java/io/kcache/rdbms/jooq/tables/Kv.java: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is generated by jOOQ. 3 | */ 4 | package io.kcache.rdbms.jooq.tables; 5 | 6 | 7 | import java.util.Arrays; 8 | import java.util.List; 9 | 10 | import org.jooq.Field; 11 | import org.jooq.ForeignKey; 12 | import org.jooq.Name; 13 | import org.jooq.Record; 14 | import org.jooq.Row2; 15 | import org.jooq.Schema; 16 | import org.jooq.Table; 17 | import org.jooq.TableField; 18 | import org.jooq.TableOptions; 19 | import org.jooq.UniqueKey; 20 | import org.jooq.impl.DSL; 21 | import org.jooq.impl.TableImpl; 22 | 23 | import io.kcache.rdbms.jooq.Kcache; 24 | import io.kcache.rdbms.jooq.Keys; 25 | import io.kcache.rdbms.jooq.tables.records.KvRecord; 26 | import org.jooq.util.derby.DerbyDataType; 27 | 28 | 29 | /** 30 | * This class is generated by jOOQ. 31 | */ 32 | @SuppressWarnings({ "all", "unchecked", "rawtypes" }) 33 | public class Kv extends TableImpl { 34 | 35 | private static final long serialVersionUID = 1L; 36 | 37 | /** 38 | * The reference instance of kcache.KV 39 | */ 40 | public static final Kv KV = new Kv(); 41 | 42 | /** 43 | * The class holding records for this type 44 | */ 45 | @Override 46 | public Class getRecordType() { 47 | return KvRecord.class; 48 | } 49 | 50 | /** 51 | * The column kcache.KV.kv_key. 52 | */ 53 | //public final TableField KV_KEY = createField(DSL.name("kv_key"), SQLDataType.VARBINARY(3072).nullable(false), this, ""); 54 | public final TableField KV_KEY = createField(DSL.name("kv_key"), DerbyDataType.CHARVARYINGFORBITDATA.nullable(false), this, ""); 55 | 56 | /** 57 | * The column kcache.KV.kv_value. 58 | */ 59 | //public final TableField KV_VALUE = createField(DSL.name("kv_value"), SQLDataType.VARBINARY(32672).nullable(false), this, ""); 60 | public final TableField KV_VALUE = createField(DSL.name("kv_value"), DerbyDataType.CHARVARYINGFORBITDATA.nullable(false), this, ""); 61 | 62 | private Kv(Name alias, Table aliased) { 63 | this(alias, aliased, null); 64 | } 65 | 66 | private Kv(Name alias, Table aliased, Field[] parameters) { 67 | super(alias, null, aliased, parameters, DSL.comment(""), TableOptions.table()); 68 | } 69 | 70 | /** 71 | * Create an aliased kcache.KV table reference 72 | */ 73 | public Kv(String alias) { 74 | this(DSL.name(alias), KV); 75 | } 76 | 77 | /** 78 | * Create an aliased kcache.KV table reference 79 | */ 80 | public Kv(Name alias) { 81 | this(alias, KV); 82 | } 83 | 84 | /** 85 | * Create a kcache.KV table reference 86 | */ 87 | public Kv() { 88 | this(DSL.name("kv"), null); 89 | } 90 | 91 | public Kv(Table child, ForeignKey key) { 92 | super(child, key, KV); 93 | } 94 | 95 | @Override 96 | public Schema getSchema() { 97 | return Kcache.KCACHE; 98 | } 99 | 100 | @Override 101 | public UniqueKey getPrimaryKey() { 102 | return Keys.KEY_KV_PRIMARY; 103 | } 104 | 105 | @Override 106 | public List> getKeys() { 107 | return Arrays.>asList(Keys.KEY_KV_PRIMARY); 108 | } 109 | 110 | @Override 111 | public Kv as(String alias) { 112 | return new Kv(DSL.name(alias), this); 113 | } 114 | 115 | @Override 116 | public Kv as(Name alias) { 117 | return new Kv(alias, this); 118 | } 119 | 120 | /** 121 | * Rename this table 122 | */ 123 | @Override 124 | public Kv rename(String name) { 125 | return new Kv(DSL.name(name), null); 126 | } 127 | 128 | /** 129 | * Rename this table 130 | */ 131 | @Override 132 | public Kv rename(Name name) { 133 | return new Kv(name, null); 134 | } 135 | 136 | // ------------------------------------------------------------------------- 137 | // Row2 type methods 138 | // ------------------------------------------------------------------------- 139 | 140 | @Override 141 | public Row2 fieldsRow() { 142 | return (Row2) super.fieldsRow(); 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/utils/InMemoryCache.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.utils; 18 | 19 | import com.google.common.collect.ForwardingSortedMap; 20 | import io.kcache.Cache; 21 | import io.kcache.KeyValue; 22 | import io.kcache.KeyValueIterator; 23 | 24 | import java.util.Comparator; 25 | import java.util.Iterator; 26 | import java.util.Map; 27 | import java.util.NavigableMap; 28 | import java.util.SortedMap; 29 | import java.util.concurrent.ConcurrentSkipListMap; 30 | 31 | /** 32 | * In-memory cache 33 | */ 34 | public class InMemoryCache extends ForwardingSortedMap implements Cache { 35 | 36 | private final NavigableMap delegate; 37 | 38 | public InMemoryCache() { 39 | // Use a concurrent data structure to ensure fail-safe iterators 40 | this.delegate = new ConcurrentSkipListMap<>(); 41 | } 42 | 43 | public InMemoryCache(Comparator comparator) { 44 | // Use a concurrent data structure to ensure fail-safe iterators 45 | this.delegate = new ConcurrentSkipListMap<>(comparator); 46 | } 47 | 48 | public InMemoryCache(NavigableMap delegate) { 49 | this.delegate = delegate; 50 | } 51 | 52 | @Override 53 | protected SortedMap delegate() { 54 | return delegate; 55 | } 56 | 57 | @Override 58 | public void init() { 59 | // do nothing 60 | } 61 | 62 | @Override 63 | public void reset() { 64 | // do nothing 65 | } 66 | 67 | @Override 68 | public void sync() { 69 | // do nothing 70 | } 71 | 72 | @Override 73 | public Cache subCache(K from, boolean fromInclusive, K to, boolean toInclusive) { 74 | return new InMemoryCache<>(subMap(from, fromInclusive, to, toInclusive)); 75 | } 76 | 77 | private NavigableMap subMap(K from, boolean fromInclusive, K to, boolean toInclusive) { 78 | if (from == null) { 79 | return delegate.headMap(to, toInclusive); 80 | } else if (to == null) { 81 | return delegate.tailMap(from, fromInclusive); 82 | } else { 83 | return delegate.subMap(from, fromInclusive, to, toInclusive); 84 | } 85 | } 86 | 87 | @Override 88 | public KeyValueIterator range(K from, boolean fromInclusive, K to, boolean toInclusive) { 89 | return new InMemoryKeyValueIterator<>(subMap(from, fromInclusive, to, toInclusive).entrySet().iterator()); 90 | } 91 | 92 | @Override 93 | public KeyValueIterator all() { 94 | return new InMemoryKeyValueIterator<>(entrySet().iterator()); 95 | } 96 | 97 | @Override 98 | public Cache descendingCache() { 99 | return new InMemoryCache<>(delegate.descendingMap()); 100 | } 101 | 102 | @Override 103 | public void flush() { 104 | // do nothing 105 | } 106 | 107 | @Override 108 | public void close() { 109 | // do nothing 110 | } 111 | 112 | @Override 113 | public void destroy() { 114 | // do nothing 115 | } 116 | 117 | private static class InMemoryKeyValueIterator implements KeyValueIterator { 118 | private final Iterator> iter; 119 | 120 | private InMemoryKeyValueIterator(final Iterator> iter) { 121 | this.iter = iter; 122 | } 123 | 124 | @Override 125 | public boolean hasNext() { 126 | return iter.hasNext(); 127 | } 128 | 129 | @Override 130 | public KeyValue next() { 131 | final Map.Entry entry = iter.next(); 132 | return new KeyValue<>(entry.getKey(), entry.getValue()); 133 | } 134 | 135 | @Override 136 | public void close() { 137 | // do nothing 138 | } 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/utils/InMemoryBoundedCache.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache.utils; 18 | 19 | import com.google.common.cache.CacheBuilder; 20 | import com.google.common.cache.CacheLoader.InvalidCacheLoadException; 21 | import com.google.common.cache.LoadingCache; 22 | import io.kcache.CacheLoader; 23 | import java.time.Duration; 24 | import java.util.Comparator; 25 | import java.util.Map; 26 | import javax.annotation.Nullable; 27 | 28 | /** 29 | * An in-memory cache with bounded size. 30 | */ 31 | public class InMemoryBoundedCache extends InMemoryCache { 32 | 33 | private final com.google.common.cache.Cache cache; 34 | private final CacheLoader loader; 35 | 36 | public InMemoryBoundedCache(Comparator comparator) { 37 | this(null, null, null, comparator); 38 | } 39 | 40 | public InMemoryBoundedCache( 41 | Integer maximumSize, 42 | Duration expireAfterWrite, 43 | CacheLoader loader 44 | ) { 45 | super(); 46 | this.loader = loader; 47 | this.cache = createCache(maximumSize, expireAfterWrite); 48 | } 49 | 50 | public InMemoryBoundedCache( 51 | Integer maximumSize, 52 | Duration expireAfterWrite, 53 | CacheLoader loader, 54 | Comparator comparator 55 | ) { 56 | super(comparator); 57 | this.loader = loader; 58 | this.cache = createCache(maximumSize, expireAfterWrite); 59 | } 60 | 61 | private com.google.common.cache.Cache createCache( 62 | Integer maximumSize, Duration expireAfterWrite 63 | ) { 64 | CacheBuilder cacheBuilder = CacheBuilder.newBuilder() 65 | .removalListener(entry -> delegate().remove(entry.getKey(), entry.getValue())); 66 | if (maximumSize != null && maximumSize >= 0) { 67 | cacheBuilder = cacheBuilder.maximumSize(maximumSize); 68 | } 69 | if (expireAfterWrite != null && !expireAfterWrite.isNegative()) { 70 | cacheBuilder = cacheBuilder.expireAfterWrite(expireAfterWrite); 71 | } 72 | if (loader != null) { 73 | return cacheBuilder.build( 74 | new com.google.common.cache.CacheLoader() { 75 | @Override 76 | @Nullable 77 | public V load(K key) throws Exception { 78 | V value = loader.load(key); 79 | if (value != null) { 80 | delegate().put(key, value); 81 | } 82 | return value; 83 | } 84 | } 85 | ); 86 | } else { 87 | return cacheBuilder.build(); 88 | } 89 | } 90 | 91 | @Override 92 | public boolean containsKey(final Object key) { 93 | return get(key) != null; 94 | } 95 | 96 | @Override 97 | @SuppressWarnings("unchecked") 98 | public V get(final Object key) { 99 | if (loader != null) { 100 | try { 101 | return ((LoadingCache) cache).getUnchecked((K) key); 102 | } catch (InvalidCacheLoadException e) { 103 | return null; 104 | } 105 | } else { 106 | return super.get(key); 107 | } 108 | } 109 | 110 | @Override 111 | public V put(final K key, final V value) { 112 | cache.put(key, value); 113 | return super.put(key, value); 114 | } 115 | 116 | @Override 117 | public void putAll(Map entries) { 118 | cache.putAll(entries); 119 | super.putAll(entries); 120 | } 121 | 122 | @Override 123 | @SuppressWarnings("unchecked") 124 | public V remove(final Object key) { 125 | if (key != null) { 126 | cache.invalidate((K) key); 127 | } 128 | return super.remove(key); 129 | } 130 | 131 | @Override 132 | public void clear() { 133 | cache.invalidateAll(); 134 | super.clear(); 135 | } 136 | } -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/CompositeCacheUpdateHandler.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package io.kcache; 16 | 17 | import java.io.IOException; 18 | import java.util.HashMap; 19 | import java.util.List; 20 | import java.util.Map; 21 | import java.util.Optional; 22 | import org.apache.kafka.common.TopicPartition; 23 | import org.apache.kafka.common.header.Headers; 24 | import org.apache.kafka.common.record.TimestampType; 25 | 26 | public class CompositeCacheUpdateHandler implements CacheUpdateHandler { 27 | 28 | private final List> handlers; 29 | 30 | public CompositeCacheUpdateHandler(List> handlers) { 31 | this.handlers = handlers; 32 | } 33 | 34 | @Override 35 | public void cacheInitialized(int count, Map checkpoints) { 36 | for (CacheUpdateHandler handler : handlers) { 37 | handler.cacheInitialized(count, checkpoints); 38 | } 39 | } 40 | 41 | @Override 42 | public void cacheReset() { 43 | for (CacheUpdateHandler handler : handlers) { 44 | handler.cacheReset(); 45 | } 46 | } 47 | 48 | @Override 49 | public void cacheSynchronized(int count, Map checkpoints) { 50 | for (CacheUpdateHandler handler : handlers) { 51 | handler.cacheSynchronized(count, checkpoints); 52 | } 53 | } 54 | 55 | @Override 56 | public void startBatch(int count) { 57 | for (CacheUpdateHandler handler : handlers) { 58 | handler.startBatch(count); 59 | } 60 | } 61 | 62 | @Override 63 | public ValidationStatus validateUpdate(Headers headers, K key, V value, TopicPartition tp, 64 | long offset, long ts, TimestampType tsType, 65 | Optional leaderEpoch) { 66 | for (CacheUpdateHandler handler : handlers) { 67 | ValidationStatus status = handler.validateUpdate(key, value, tp, offset, ts); 68 | if (status != ValidationStatus.SUCCESS) { 69 | return status; 70 | } 71 | } 72 | return ValidationStatus.SUCCESS; 73 | } 74 | 75 | @Override 76 | public void handleUpdate(K key, V value, V oldValue, TopicPartition tp, long offset, long ts) { 77 | for (CacheUpdateHandler handler : handlers) { 78 | handler.handleUpdate(key, value, oldValue, tp, offset, ts); 79 | } 80 | } 81 | 82 | @Override 83 | public Map checkpoint(int count) { 84 | Map result = null; 85 | for (CacheUpdateHandler handler : handlers) { 86 | Map offsets = handler.checkpoint(count); 87 | if (offsets != null) { 88 | if (result != null) { 89 | for (Map.Entry entry : offsets.entrySet()) { 90 | // When merging, choose the smaller offset 91 | result.merge(entry.getKey(), entry.getValue(), Long::min); 92 | } 93 | } else { 94 | result = new HashMap<>(offsets); 95 | } 96 | } 97 | } 98 | return result; 99 | } 100 | 101 | @Override 102 | public void endBatch(int count) { 103 | for (CacheUpdateHandler handler : handlers) { 104 | handler.endBatch(count); 105 | } 106 | } 107 | 108 | @Override 109 | public void failBatch(int count, Throwable t) { 110 | for (CacheUpdateHandler handler : handlers) { 111 | handler.failBatch(count, t); 112 | } 113 | } 114 | 115 | @Override 116 | public void cacheFlushed() { 117 | for (CacheUpdateHandler handler : handlers) { 118 | handler.cacheFlushed(); 119 | } 120 | } 121 | 122 | @Override 123 | public void close() throws IOException { 124 | for (CacheUpdateHandler handler : handlers) { 125 | handler.close(); 126 | } 127 | } 128 | 129 | @Override 130 | public void cacheDestroyed() { 131 | for (CacheUpdateHandler handler : handlers) { 132 | handler.cacheDestroyed(); 133 | } 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /kcache-caffeine/src/main/java/io/kcache/caffeine/CaffeineCache.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache.caffeine; 18 | 19 | import com.github.benmanes.caffeine.cache.CacheWriter; 20 | import com.github.benmanes.caffeine.cache.Caffeine; 21 | import com.github.benmanes.caffeine.cache.LoadingCache; 22 | import com.github.benmanes.caffeine.cache.RemovalCause; 23 | import com.github.benmanes.caffeine.cache.Scheduler; 24 | import io.kcache.CacheLoader; 25 | import io.kcache.utils.InMemoryCache; 26 | import java.time.Duration; 27 | import java.util.Comparator; 28 | import java.util.Map; 29 | 30 | /** 31 | * An in-memory cache with bounded size. 32 | */ 33 | public class CaffeineCache extends InMemoryCache { 34 | 35 | private final com.github.benmanes.caffeine.cache.Cache cache; 36 | private final CacheLoader loader; 37 | 38 | public CaffeineCache(Comparator comparator) { 39 | this(null, null, null, comparator); 40 | } 41 | 42 | public CaffeineCache( 43 | Integer maximumSize, 44 | Duration expireAfterWrite, 45 | CacheLoader loader 46 | ) { 47 | super(); 48 | this.loader = loader; 49 | this.cache = createCache(maximumSize, expireAfterWrite); 50 | } 51 | 52 | public CaffeineCache( 53 | Integer maximumSize, 54 | Duration expireAfterWrite, 55 | CacheLoader loader, 56 | Comparator comparator 57 | ) { 58 | super(comparator); 59 | this.loader = loader; 60 | this.cache = createCache(maximumSize, expireAfterWrite); 61 | } 62 | 63 | private com.github.benmanes.caffeine.cache.Cache createCache( 64 | Integer maximumSize, Duration expireAfterWrite 65 | ) { 66 | Caffeine caffeine = Caffeine.newBuilder() 67 | .writer(new CacheWriter() { 68 | public void write(K key, V value) { 69 | delegate().put(key, value); 70 | } 71 | 72 | public void delete(K key, V value, RemovalCause cause) { 73 | delegate().remove(key, value); 74 | } 75 | }); 76 | if (maximumSize != null && maximumSize >= 0) { 77 | caffeine = caffeine.maximumSize(maximumSize); 78 | } 79 | if (expireAfterWrite != null && !expireAfterWrite.isNegative()) { 80 | caffeine = caffeine 81 | .scheduler(Scheduler.systemScheduler()) 82 | .expireAfterWrite(expireAfterWrite); 83 | } 84 | if (loader != null) { 85 | return caffeine.build( 86 | key -> { 87 | V value = loader.load(key); 88 | if (value != null) { 89 | delegate().put(key, value); 90 | } 91 | return value; 92 | } 93 | ); 94 | } else { 95 | return caffeine.build(); 96 | } 97 | } 98 | 99 | @Override 100 | public boolean containsKey(final Object key) { 101 | return get(key) != null; 102 | } 103 | 104 | @Override 105 | @SuppressWarnings("unchecked") 106 | public V get(final Object key) { 107 | if (loader != null) { 108 | return ((LoadingCache) cache).get((K) key); 109 | } else { 110 | return super.get(key); 111 | } 112 | } 113 | 114 | @Override 115 | public V put(final K key, final V value) { 116 | final V originalValue = get(key); 117 | cache.put(key, value); 118 | return originalValue; 119 | } 120 | 121 | @Override 122 | public void putAll(Map entries) { 123 | cache.putAll(entries); 124 | } 125 | 126 | @Override 127 | public V remove(final Object key) { 128 | final V originalValue = get(key); 129 | if (key != null) { 130 | cache.invalidate(key); 131 | } 132 | return originalValue; 133 | } 134 | 135 | @Override 136 | public void clear() { 137 | cache.invalidateAll(); 138 | } 139 | } -------------------------------------------------------------------------------- /kcache-benchmark/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | 4 | 5 | kcache-parent 6 | io.kcache 7 | 5.2.4-SNAPSHOT 8 | 9 | 10 | io.kcache 11 | kcache-benchmark 12 | jar 13 | kcache-benchmark 14 | 15 | 16 | 1.37 17 | 18 | 19 | 20 | 21 | 22 | org.openjdk.jmh 23 | jmh-core 24 | ${jmh.version} 25 | 26 | 27 | org.openjdk.jmh 28 | jmh-generator-annprocess 29 | ${jmh.version} 30 | provided 31 | 32 | 33 | 34 | org.agrona 35 | agrona 36 | 1.21.1 37 | 38 | 39 | 40 | com.github.jnr 41 | jnr-posix 42 | 3.1.20 43 | 44 | 45 | com.github.jnr 46 | jnr-constants 47 | 48 | 49 | 50 | 51 | 52 | org.apache.derby 53 | derby 54 | 10.15.2.0 55 | 56 | 57 | 58 | io.kcache 59 | kcache 60 | ${project.version} 61 | 62 | 63 | io.kcache 64 | kcache-bdbje 65 | ${project.version} 66 | 67 | 68 | io.kcache 69 | kcache-lmdb 70 | ${project.version} 71 | 72 | 73 | io.kcache 74 | kcache-mapdb 75 | ${project.version} 76 | 77 | 78 | io.kcache 79 | kcache-rdbms 80 | ${project.version} 81 | 82 | 83 | io.kcache 84 | kcache-rocksdb 85 | ${project.version} 86 | 87 | 88 | 89 | 90 | 91 | 92 | org.apache.maven.plugins 93 | maven-shade-plugin 94 | 3.6.0 95 | 96 | 97 | package 98 | 99 | shade 100 | 101 | 102 | benchmarks 103 | 104 | 105 | org.openjdk.jmh.Main 106 | 107 | 108 | 109 | 110 | *:* 111 | 112 | META-INF/services/javax.annotation.processing.Processor 113 | META-INF/*.SF 114 | META-INF/*.DSA 115 | META-INF/*.RSA 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/utils/ClusterTestHarness.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache.utils; 18 | 19 | import kafka.server.KafkaConfig; 20 | import kafka.server.KafkaServer; 21 | import kafka.utils.CoreUtils; 22 | import kafka.utils.TestUtils; 23 | import kafka.zk.EmbeddedZookeeper; 24 | import org.apache.kafka.common.network.ListenerName; 25 | import org.apache.kafka.common.security.auth.SecurityProtocol; 26 | import org.apache.kafka.common.utils.Time; 27 | import org.apache.kafka.common.utils.Utils; 28 | import org.junit.After; 29 | import org.junit.Before; 30 | import scala.Option; 31 | import scala.Option$; 32 | 33 | import java.util.List; 34 | import java.util.Properties; 35 | import java.util.Vector; 36 | 37 | /** 38 | * Test harness to run against a real, local Kafka cluster. This is essentially 39 | * Kafka's ZookeeperTestHarness and KafkaServerTestHarness traits combined. 40 | */ 41 | public abstract class ClusterTestHarness { 42 | 43 | protected static final int DEFAULT_NUM_BROKERS = 1; 44 | protected static final Option EMPTY_SASL_PROPERTIES = Option$.MODULE$.empty(); 45 | 46 | private final int numBrokers; 47 | 48 | // ZK Config 49 | protected EmbeddedZookeeper zookeeper; 50 | protected String zkConnect; 51 | 52 | // Kafka Config 53 | protected List configs = null; 54 | protected List servers = null; 55 | protected String bootstrapServers = null; 56 | 57 | public ClusterTestHarness() { 58 | this(DEFAULT_NUM_BROKERS); 59 | } 60 | 61 | public ClusterTestHarness(int numBrokers) { 62 | this.numBrokers = numBrokers; 63 | } 64 | 65 | @Before 66 | public void setUp() throws Exception { 67 | zookeeper = new EmbeddedZookeeper(); 68 | zkConnect = String.format("localhost:%d", zookeeper.port()); 69 | 70 | configs = new Vector<>(); 71 | servers = new Vector<>(); 72 | for (int i = 0; i < numBrokers; i++) { 73 | KafkaConfig config = getKafkaConfig(i); 74 | configs.add(config); 75 | 76 | KafkaServer server = TestUtils.createServer(config, Time.SYSTEM); 77 | servers.add(server); 78 | } 79 | 80 | String[] serverUrls = new String[servers.size()]; 81 | ListenerName listenerType = ListenerName.forSecurityProtocol(getSecurityProtocol()); 82 | for (int i = 0; i < servers.size(); i++) { 83 | serverUrls[i] = 84 | Utils.formatAddress( 85 | servers.get(i).config().effectiveAdvertisedBrokerListeners().head().host(), 86 | servers.get(i).boundPort(listenerType) 87 | ); 88 | } 89 | bootstrapServers = String.join(",", serverUrls); 90 | } 91 | 92 | protected void injectProperties(Properties props) { 93 | props.setProperty("auto.create.topics.enable", "true"); 94 | props.setProperty("num.partitions", "1"); 95 | } 96 | 97 | protected KafkaConfig getKafkaConfig(int brokerId) { 98 | 99 | final Option noFile = scala.Option.apply(null); 100 | final Option noInterBrokerSecurityProtocol = scala.Option.apply(null); 101 | Properties props = TestUtils.createBrokerConfig( 102 | brokerId, 103 | zkConnect, 104 | false, 105 | false, 106 | TestUtils.RandomPort(), 107 | noInterBrokerSecurityProtocol, 108 | noFile, 109 | EMPTY_SASL_PROPERTIES, 110 | true, 111 | false, 112 | TestUtils.RandomPort(), 113 | false, 114 | TestUtils.RandomPort(), 115 | false, 116 | TestUtils.RandomPort(), 117 | Option.empty(), 118 | 1, 119 | false, 120 | 1, 121 | (short) 1, 122 | false 123 | ); 124 | injectProperties(props); 125 | return KafkaConfig.fromProps(props); 126 | } 127 | 128 | protected SecurityProtocol getSecurityProtocol() { 129 | return SecurityProtocol.PLAINTEXT; 130 | } 131 | 132 | @After 133 | public void tearDown() throws Exception { 134 | if (servers != null) { 135 | for (KafkaServer server : servers) { 136 | server.shutdown(); 137 | } 138 | 139 | // Remove any persistent data 140 | for (KafkaServer server : servers) { 141 | CoreUtils.delete(server.config().logDirs()); 142 | } 143 | } 144 | 145 | if (zookeeper != null) { 146 | zookeeper.shutdown(); 147 | } 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # KCache - An In-Memory Cache Backed by Apache Kafka 2 | 3 | [![Build Status][github-actions-shield]][github-actions-link] 4 | [![Maven][maven-shield]][maven-link] 5 | [![Javadoc][javadoc-shield]][javadoc-link] 6 | 7 | [github-actions-shield]: https://github.com/rayokota/kcache/workflows/build/badge.svg?branch=master 8 | [github-actions-link]: https://github.com/rayokota/kcache/actions 9 | [maven-shield]: https://img.shields.io/maven-central/v/io.kcache/kcache.svg 10 | [maven-link]: https://search.maven.org/#search%7Cga%7C1%7Cio.kcache 11 | [javadoc-shield]: https://javadoc.io/badge/io.kcache/kcache.svg?color=blue 12 | [javadoc-link]: https://javadoc.io/doc/io.kcache/kcache 13 | 14 | KCache is a client library that provides an in-memory cache backed by a compacted topic in Kafka. It is one of the patterns for using Kafka as a persistent store, as described by Jay Kreps in the article [It's Okay to Store Data in Apache Kafka](https://www.confluent.io/blog/okay-store-data-apache-kafka/). 15 | 16 | ## Maven 17 | 18 | Releases of KCache are deployed to Maven Central. 19 | 20 | ```xml 21 | 22 | io.kcache 23 | kcache 24 | 5.2.3 25 | 26 | ``` 27 | 28 | For Java 11 or above, use `5.x` otherwise use `4.x`. 29 | 30 | ## Usage 31 | 32 | An instance of `KafkaCache` implements the `java.util.SortedMap` interface. Here is an example usage: 33 | 34 | ```java 35 | import io.kcache.*; 36 | 37 | String bootstrapServers = "localhost:9092"; 38 | Cache cache = new KafkaCache<>( 39 | bootstrapServers, 40 | Serdes.String(), // for serializing/deserializing keys 41 | Serdes.String() // for serializing/deserializing values 42 | ); 43 | cache.init(); // creates topic, initializes cache, consumer, and producer 44 | cache.put("Kafka", "Rocks"); 45 | String value = cache.get("Kafka"); // returns "Rocks" 46 | cache.remove("Kafka"); 47 | cache.close(); // shuts down the cache, consumer, and producer 48 | ``` 49 | 50 | One can also use RocksDB to back the `KafkaCache`: 51 | 52 | ```java 53 | Properties props = new Properties(); 54 | props.put(KafkaCacheConfig.KAFKACACHE_BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); 55 | props.put(KafkaCacheConfig.KAFKACACHE_BACKING_CACHE_CONFIG, "rocksdb"); 56 | props.put(KafkaCacheConfig.KAFKACACHE_DATA_DIR_CONFIG, "/tmp"); 57 | Cache cache = new KafkaCache<>( 58 | new KafkaCacheConfig(props), 59 | Serdes.String(), // for serializing/deserializing keys 60 | Serdes.String() // for serializing/deserializing values 61 | ); 62 | cache.init(); 63 | ``` 64 | ## Basic Configuration 65 | 66 | KCache has a number of configuration properties that can be specified. 67 | 68 | - `kafkacache.bootstrap.servers` - A list of host and port pairs to use for establishing the initial connection to Kafka. 69 | - `kafkacache.group.id` - The group ID to use for the internal consumer. Defaults to `kafkacache`. 70 | - `kafkacache.client.id` - The client ID to use for the internal consumer. Defaults to `kafka-cache-reader-`. 71 | - `kafkacache.topic` - The name of the compacted topic. Defaults to `_cache`. 72 | - `kafkacache.topic.replication.factor` - The desired replication factor for the compacted topic. Defaults to 3. 73 | - `kafkacache.topic.num.partitions` - The desired number of partitions for for the compacted topic. Defaults to 1. 74 | - `kafkacache.topic.partitions` - A list of partitions to consume, or all partitions if not specified. 75 | - `kafkacache.topic.partitions.offset` - The offset to start consuming all partitions from, one of `beginning`, `end`, 76 | a positive number representing an absolute offset, a negative number representing a relative offset from the end, 77 | or `@`, where `` is a timestamp in ms. Defaults to `beginning`. 78 | - `kafkacache.init.timeout.ms` - The timeout for initialization of the Kafka cache, including creation of the compacted topic. Defaults to 300 seconds. 79 | - `kafkacache.timeout.ms` - The timeout for an operation on the Kafka cache. Defaults to 60 seconds. 80 | - `kafkacache.backing.cache` - The backing cache for KCache, one of `memory` (default), `bdbje`, `caffeine`, `lmdb`, `mapdb`, `rdbms`, or `rocksdb`. 81 | - `kafkacache.data.dir` - The root directory for backing cache storage. Defaults to `/tmp`. 82 | 83 | Configuration properties can be passed as follows: 84 | 85 | ```java 86 | Properties props = new Properties(); 87 | props.setProperty("kafkacache.bootstrap.servers", "localhost:9092"); 88 | props.setProperty("kafkacache.topic", "_mycache"); 89 | Cache cache = new KafkaCache<>( 90 | new KafkaCacheConfig(props), 91 | Serdes.String(), // for serializing/deserializing keys 92 | Serdes.String() // for serializing/deserializing values 93 | ); 94 | cache.init(); 95 | ... 96 | ``` 97 | 98 | ## Security 99 | 100 | KCache supports both SSL authentication and SASL authentication to a secure Kafka cluster. See the [JavaDoc](https://static.javadoc.io/io.kcache/kcache/latest/io/kcache/KafkaCacheConfig.html) for more information. 101 | 102 | ## Using KCache as a Replicated Cache 103 | 104 | KCache can be used as a replicated cache, with some caveats. To ensure that updates are processed in the proper order, one instance of KCache should be designated as the sole writer, with all writes being forwarded to it. If the writer fails, another instance can then be elected as the new writer. 105 | 106 | For an example of a highly-available service that wraps KCache, see [Keta](https://github.com/rayokota/keta). 107 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/CacheUpdateHandler.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache; 18 | 19 | import java.util.Map; 20 | import java.util.Optional; 21 | import org.apache.kafka.common.Configurable; 22 | import org.apache.kafka.common.TopicPartition; 23 | 24 | import java.io.Closeable; 25 | import java.io.IOException; 26 | import org.apache.kafka.common.header.Headers; 27 | import org.apache.kafka.common.record.TimestampType; 28 | 29 | public interface CacheUpdateHandler extends Configurable, Closeable { 30 | 31 | enum ValidationStatus { 32 | SUCCESS, 33 | ROLLBACK_FAILURE, 34 | IGNORE_FAILURE 35 | } 36 | 37 | /** 38 | * Configures the cache update handler. 39 | */ 40 | default void configure(Map configs) { 41 | } 42 | 43 | /** 44 | * Invoked after the cache is initialized. 45 | * 46 | * @param count the number of records consumed during initialization 47 | * @param checkpoints current checkpoints 48 | */ 49 | default void cacheInitialized(int count, Map checkpoints) { 50 | } 51 | 52 | /** 53 | * Invoked after the cache is reset. 54 | */ 55 | default void cacheReset() { 56 | } 57 | 58 | /** 59 | * Invoked after the cache is synchronized. 60 | * 61 | * @param count the number of records consumed during synchronization 62 | * @param checkpoints current checkpoints 63 | */ 64 | default void cacheSynchronized(int count, Map checkpoints) { 65 | } 66 | 67 | /** 68 | * Invoked before a batch of updates. 69 | * @param count batch count 70 | */ 71 | default void startBatch(int count) { 72 | } 73 | 74 | /** 75 | * Invoked before every new K,V pair written to the cache 76 | * 77 | * @param key key associated with the data 78 | * @param value data written to the cache 79 | * @param tp topic-partition 80 | * @param offset offset 81 | * @param ts timestamp 82 | * @return whether the update should proceed 83 | */ 84 | default ValidationStatus validateUpdate(K key, V value, TopicPartition tp, long offset, long ts) { 85 | return ValidationStatus.SUCCESS; 86 | } 87 | 88 | /** 89 | * Invoked before every new K,V pair written to the cache 90 | * 91 | * @param headers headers 92 | * @param key key associated with the data 93 | * @param value data written to the cache 94 | * @param tp topic-partition 95 | * @param offset offset 96 | * @param ts timestamp 97 | * @param tsType timestamp type 98 | * @param leaderEpoch leader epoch 99 | * @return whether the update should proceed 100 | */ 101 | default ValidationStatus validateUpdate(Headers headers, K key, V value, TopicPartition tp, 102 | long offset, long ts, TimestampType tsType, 103 | Optional leaderEpoch) { 104 | return validateUpdate(key, value, tp, offset, ts); 105 | } 106 | 107 | /** 108 | * Invoked after every new K,V pair written to the cache 109 | * 110 | * @param key key associated with the data 111 | * @param value data written to the cache 112 | * @param oldValue the previous value associated with key, or null if there was no mapping for key 113 | * @param tp topic-partition 114 | * @param offset offset 115 | * @param ts timestamp 116 | */ 117 | void handleUpdate(K key, V value, V oldValue, TopicPartition tp, long offset, long ts); 118 | 119 | /** 120 | * Invoked after every new K,V pair written to the cache 121 | * 122 | * @param headers headers 123 | * @param key key associated with the data 124 | * @param value data written to the cache 125 | * @param oldValue the previous value associated with key, or null if there was no mapping for key 126 | * @param tp topic-partition 127 | * @param offset offset 128 | * @param ts timestamp 129 | * @param tsType timestamp type 130 | * @param leaderEpoch leader epoch 131 | */ 132 | default void handleUpdate(Headers headers, K key, V value, V oldValue, TopicPartition tp, 133 | long offset, long ts, TimestampType tsType, 134 | Optional leaderEpoch) { 135 | handleUpdate(key, value, oldValue, tp, offset, ts); 136 | } 137 | 138 | /** 139 | * Retrieve the offsets to checkpoint. 140 | * 141 | * @param count batch count 142 | * @return the offsets to checkpoint, or null to use the latest offsets 143 | */ 144 | default Map checkpoint(int count) { 145 | return null; 146 | } 147 | 148 | /** 149 | * Invoked after a batch of updates. 150 | * 151 | * @param count batch count 152 | */ 153 | default void endBatch(int count) { 154 | } 155 | 156 | /** 157 | * Invoked when an error has occurred while processing the batch. 158 | * 159 | * @param count batch count 160 | * @param t the error 161 | */ 162 | default void failBatch(int count, Throwable t) { 163 | } 164 | 165 | /** 166 | * Invoked after the cache is flushed. 167 | */ 168 | default void cacheFlushed() { 169 | } 170 | 171 | @Override 172 | default void close() throws IOException { 173 | } 174 | 175 | /** 176 | * Invoked after the cache is destroyed. 177 | */ 178 | default void cacheDestroyed() { 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/utils/SASLClusterTestHarness.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to you under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package io.kcache.utils; 19 | 20 | import kafka.security.minikdc.MiniKdc; 21 | import kafka.server.KafkaConfig; 22 | import kafka.utils.JaasTestUtils; 23 | import kafka.utils.TestUtils; 24 | import org.apache.kafka.common.config.internals.BrokerSecurityConfigs; 25 | import org.apache.kafka.common.security.auth.SecurityProtocol; 26 | import org.apache.kafka.common.security.authenticator.LoginManager; 27 | import org.junit.After; 28 | import org.junit.Before; 29 | import org.slf4j.Logger; 30 | import org.slf4j.LoggerFactory; 31 | import scala.Option; 32 | import scala.collection.JavaConverters; 33 | import scala.collection.immutable.List; 34 | import scala.collection.immutable.Seq; 35 | import scala.jdk.javaapi.CollectionConverters; 36 | 37 | import javax.security.auth.login.Configuration; 38 | import java.io.File; 39 | import java.util.ArrayList; 40 | import java.util.Arrays; 41 | import java.util.Properties; 42 | 43 | // sets up SASL for ZooKeeper and Kafka. Much of this was borrowed from kafka.api.SaslSetup in Kafka. 44 | public class SASLClusterTestHarness extends ClusterTestHarness { 45 | public static final String JAAS_CONF = "java.security.auth.login.config"; 46 | public static final String ZK_AUTH_PROVIDER = "zookeeper.authProvider.1"; 47 | 48 | private MiniKdc kdc = null; 49 | private final File kdcHome = TestUtils.tempDir(); 50 | private final Properties kdcProps = MiniKdc.createConfig(); 51 | 52 | private static final Logger log = LoggerFactory.getLogger(SASLClusterTestHarness.class); 53 | 54 | public SASLClusterTestHarness() { 55 | super(DEFAULT_NUM_BROKERS); 56 | } 57 | 58 | @Override 59 | protected SecurityProtocol getSecurityProtocol() { 60 | return SecurityProtocol.SASL_PLAINTEXT; 61 | } 62 | 63 | @Before 64 | @Override 65 | public void setUp() throws Exception { 66 | // Important if tests leak consumers, producers or brokers. 67 | LoginManager.closeAll(); 68 | 69 | File serverKeytab = File.createTempFile("server-", ".keytab"); 70 | File clientKeytab = File.createTempFile("client-", ".keytab"); 71 | 72 | // create a JAAS file. 73 | Option serverKeytabOption = Option.apply(serverKeytab); 74 | Option clientKeytabOption = Option.apply(clientKeytab); 75 | List serverSaslMechanisms = JavaConverters.asScalaBuffer(Arrays.asList("GSSAPI")).toList(); 76 | Option clientSaslMechanism = Option.apply("GSSAPI"); 77 | 78 | java.util.List jaasSections = new ArrayList<>(); 79 | jaasSections.add(JaasTestUtils.kafkaServerSection(JaasTestUtils.KafkaServerContextName(), serverSaslMechanisms, serverKeytabOption)); 80 | jaasSections.add(JaasTestUtils.kafkaClientSection(clientSaslMechanism, clientKeytabOption)); 81 | jaasSections.addAll(CollectionConverters.asJavaCollection(JaasTestUtils.zkSections())); 82 | String jaasFilePath = JaasTestUtils.writeJaasContextsToFile(JavaConverters.asScalaBuffer(jaasSections).toSeq()).getAbsolutePath(); 83 | 84 | log.info("Using KDC home: " + kdcHome.getAbsolutePath()); 85 | kdc = new MiniKdc(kdcProps, kdcHome); 86 | kdc.start(); 87 | 88 | createPrincipal(serverKeytab, "kafka/localhost"); 89 | createPrincipal(clientKeytab, "client"); 90 | createPrincipal(clientKeytab, "client2"); 91 | 92 | // This will cause a reload of the Configuration singleton when `getConfiguration` is called. 93 | Configuration.setConfiguration(null); 94 | 95 | System.setProperty(JAAS_CONF, jaasFilePath); 96 | System.setProperty(ZK_AUTH_PROVIDER, "org.apache.zookeeper.server.auth.SASLAuthenticationProvider"); 97 | super.setUp(); 98 | } 99 | 100 | private void createPrincipal(File keytab, String principalNoRealm) throws Exception { 101 | Seq principals = JavaConverters.asScalaBuffer( 102 | Arrays.asList(principalNoRealm) 103 | ).toList(); 104 | kdc.createPrincipal(keytab, principals); 105 | } 106 | 107 | @Override 108 | protected KafkaConfig getKafkaConfig(int brokerId) { 109 | final Option trustStoreFileOption = Option.apply(null); 110 | final Option saslInterBrokerSecurityProtocol = 111 | Option.apply(SecurityProtocol.SASL_PLAINTEXT); 112 | Properties props = TestUtils.createBrokerConfig( 113 | brokerId, zkConnect, false, false, TestUtils.RandomPort(), saslInterBrokerSecurityProtocol, 114 | trustStoreFileOption, EMPTY_SASL_PROPERTIES, false, true, TestUtils.RandomPort(), 115 | false, TestUtils.RandomPort(), 116 | false, TestUtils.RandomPort(), Option.empty(), 1, false, 1, (short) 1, false); 117 | 118 | injectProperties(props); 119 | props.setProperty("zookeeper.connection.timeout.ms", "30000"); 120 | props.setProperty("sasl.mechanism.inter.broker.protocol", "GSSAPI"); 121 | props.setProperty(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, "GSSAPI"); 122 | 123 | return KafkaConfig.fromProps(props); 124 | } 125 | 126 | @After 127 | @Override 128 | public void tearDown() throws Exception { 129 | if (kdc != null) { 130 | kdc.stop(); 131 | } 132 | LoginManager.closeAll(); 133 | System.clearProperty(JAAS_CONF); 134 | System.clearProperty(ZK_AUTH_PROVIDER); 135 | Configuration.setConfiguration(null); 136 | super.tearDown(); 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/KafkaPersistentCacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache; 18 | 19 | import static org.junit.Assert.assertEquals; 20 | 21 | import io.kcache.exceptions.CacheException; 22 | import io.kcache.utils.OffsetCheckpoint; 23 | import io.kcache.utils.PersistentCache; 24 | import java.io.File; 25 | import java.io.IOException; 26 | import java.util.Collections; 27 | import java.util.Map; 28 | import java.util.Properties; 29 | import org.apache.kafka.common.TopicPartition; 30 | import org.junit.After; 31 | import org.junit.Rule; 32 | import org.junit.Test; 33 | import org.junit.rules.TemporaryFolder; 34 | import org.slf4j.Logger; 35 | import org.slf4j.LoggerFactory; 36 | 37 | public abstract class KafkaPersistentCacheTest extends KafkaCacheTest { 38 | 39 | private static final Logger log = LoggerFactory.getLogger(KafkaPersistentCacheTest.class); 40 | 41 | @Rule 42 | public final TemporaryFolder dir = new TemporaryFolder(); 43 | 44 | @After 45 | @Override 46 | public void teardown() throws IOException { 47 | try (OffsetCheckpoint offsetCheckpoint = new OffsetCheckpoint(dir.getRoot().toString(), 0, topic)) { 48 | offsetCheckpoint.delete(); 49 | } 50 | super.teardown(); 51 | } 52 | 53 | @Override 54 | protected Properties getKafkaCacheProperties() throws Exception { 55 | Properties props = new Properties(); 56 | props.put(KafkaCacheConfig.KAFKACACHE_BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); 57 | props.put(KafkaCacheConfig.KAFKACACHE_CHECKPOINT_DIR_CONFIG, dir.getRoot().toString()); 58 | props.put(KafkaCacheConfig.KAFKACACHE_DATA_DIR_CONFIG, dir.getRoot().toString()); 59 | return props; 60 | } 61 | 62 | @Test 63 | public void testCheckpointBeforeAndAfterRestart() throws Exception { 64 | Cache kafkaCache = createAndInitKafkaCacheInstance(); 65 | String key = "Kafka"; 66 | String value = "Rocks"; 67 | String key2 = "Hello"; 68 | String value2 = "World"; 69 | try { 70 | try { 71 | kafkaCache.put(key, value); 72 | } catch (CacheException e) { 73 | throw new RuntimeException("Kafka store put(Kafka, Rocks) operation failed", e); 74 | } 75 | String retrievedValue; 76 | try { 77 | retrievedValue = kafkaCache.get(key); 78 | } catch (CacheException e) { 79 | throw new RuntimeException("Kafka store get(Kafka) operation failed", e); 80 | } 81 | assertEquals("Retrieved value should match entered value", value, retrievedValue); 82 | kafkaCache.close(); 83 | 84 | final Map offsets = Collections.singletonMap(new TopicPartition(topic, 0), 1L); 85 | final Map result = readOffsetsCheckpoint(); 86 | assertEquals(result, offsets); 87 | 88 | // recreate kafka store 89 | kafkaCache = createAndInitKafkaCacheInstance(); 90 | try { 91 | kafkaCache.put(key2, value2); 92 | } catch (CacheException e) { 93 | throw new RuntimeException("Kafka store put(Hello, World) operation failed", e); 94 | } 95 | } finally { 96 | kafkaCache.close(); 97 | } 98 | 99 | final Map offsets = Collections.singletonMap(new TopicPartition(topic, 0), 2L); 100 | final Map result = readOffsetsCheckpoint(); 101 | assertEquals(result, offsets); 102 | } 103 | 104 | @Test 105 | public void testMovedCheckpointBeforeAndAfterRestart() throws Exception { 106 | Cache kafkaCache = createAndInitKafkaCacheInstance(); 107 | String key = "Kafka"; 108 | String value = "Rocks"; 109 | String key2 = "Hello"; 110 | String value2 = "World"; 111 | try { 112 | try { 113 | kafkaCache.put(key, value); 114 | } catch (CacheException e) { 115 | throw new RuntimeException("Kafka store put(Kafka, Rocks) operation failed", e); 116 | } 117 | String retrievedValue; 118 | try { 119 | retrievedValue = kafkaCache.get(key); 120 | } catch (CacheException e) { 121 | throw new RuntimeException("Kafka store get(Kafka) operation failed", e); 122 | } 123 | assertEquals("Retrieved value should match entered value", value, retrievedValue); 124 | kafkaCache.close(); 125 | 126 | // add moveme file 127 | File moveme = new File(dir.getRoot().toString(), PersistentCache.MOVEME_FILE_NAME); 128 | moveme.createNewFile(); 129 | 130 | // recreate kafka store 131 | kafkaCache = createAndInitKafkaCacheInstance(); 132 | try { 133 | kafkaCache.put(key2, value2); 134 | } catch (CacheException e) { 135 | throw new RuntimeException("Kafka store put(Hello, World) operation failed", e); 136 | } 137 | } finally { 138 | kafkaCache.close(); 139 | } 140 | 141 | final Map offsets = Collections.singletonMap(new TopicPartition(topic, 0), 1L); 142 | final Map result = readOffsetsCheckpoint(dir.getRoot().toString() + ".bak"); 143 | assertEquals(result, offsets); 144 | } 145 | 146 | private Map readOffsetsCheckpoint() throws IOException { 147 | return readOffsetsCheckpoint(dir.getRoot().toString()); 148 | } 149 | 150 | private Map readOffsetsCheckpoint(String dir) throws IOException { 151 | try (OffsetCheckpoint offsetCheckpoint = new OffsetCheckpoint(dir, 0, topic)) { 152 | return offsetCheckpoint.read(); 153 | } 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/KafkaReadOnlyCacheTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache; 18 | 19 | import io.kcache.exceptions.CacheException; 20 | import io.kcache.exceptions.CacheInitializationException; 21 | import io.kcache.utils.Caches; 22 | import io.kcache.utils.ClusterTestHarness; 23 | import io.kcache.utils.StringUpdateHandler; 24 | import org.apache.kafka.clients.admin.AdminClient; 25 | import org.apache.kafka.clients.admin.AdminClientConfig; 26 | import org.apache.kafka.clients.admin.NewTopic; 27 | import org.apache.kafka.common.config.TopicConfig; 28 | import org.apache.kafka.common.serialization.Serdes; 29 | import org.junit.After; 30 | import org.junit.Before; 31 | import org.junit.Test; 32 | import org.slf4j.Logger; 33 | import org.slf4j.LoggerFactory; 34 | 35 | import java.io.IOException; 36 | import java.util.AbstractMap; 37 | import java.util.Collections; 38 | import java.util.Properties; 39 | import java.util.concurrent.TimeUnit; 40 | 41 | import static io.kcache.KafkaCacheConfig.DEFAULT_KAFKACACHE_TOPIC; 42 | import static org.junit.Assert.assertEquals; 43 | import static org.junit.Assert.fail; 44 | 45 | public class KafkaReadOnlyCacheTest extends ClusterTestHarness { 46 | 47 | private static final Logger log = LoggerFactory.getLogger(KafkaReadOnlyCacheTest.class); 48 | 49 | @Before 50 | public void setup() { 51 | log.debug("bootstrapservers = {}", bootstrapServers); 52 | } 53 | 54 | @After 55 | public void teardown() { 56 | log.debug("Shutting down"); 57 | } 58 | 59 | @Test(expected = CacheInitializationException.class) 60 | public void testInitialization() throws IOException { 61 | try (Cache cache = createKafkaCacheInstance()) { 62 | cache.init(); 63 | } 64 | } 65 | 66 | @Test 67 | public void testInitializationGivenTopicAlreadyExists() throws IOException { 68 | try (Cache cache = createKafkaCacheInstance()) { 69 | createTopic(bootstrapServers); 70 | cache.init(); 71 | } 72 | } 73 | 74 | @Test 75 | public void testSimplePut() throws Exception { 76 | try (Cache kafkaCache = createKafkaCacheInstance()) { 77 | createTopic(bootstrapServers); 78 | kafkaCache.init(); 79 | kafkaCache.put("Kafka", "Rocks"); 80 | fail("Expected put to fail"); 81 | } catch (CacheException e) { 82 | assertEquals("Cache is read-only", e.getMessage()); 83 | } 84 | } 85 | 86 | @Test 87 | public void testSimpleRemove() throws Exception { 88 | try (Cache kafkaCache = createKafkaCacheInstance()) { 89 | createTopic(bootstrapServers); 90 | kafkaCache.init(); 91 | kafkaCache.remove("Kafka"); 92 | fail("Expected remove to fail"); 93 | } catch (CacheException e) { 94 | assertEquals("Cache is read-only", e.getMessage()); 95 | } 96 | } 97 | 98 | @Test(expected = UnsupportedOperationException.class) 99 | public void testKeySetIsImmutable() throws Exception { 100 | try (Cache kafkaCache = createKafkaCacheInstance()) { 101 | createTopic(bootstrapServers); 102 | kafkaCache.init(); 103 | kafkaCache.keySet().remove("Kafka"); 104 | } 105 | } 106 | 107 | @Test(expected = UnsupportedOperationException.class) 108 | public void testEntrySetIsImmutable() throws Exception { 109 | try (Cache kafkaCache = createKafkaCacheInstance()) { 110 | createTopic(bootstrapServers); 111 | kafkaCache.init(); 112 | kafkaCache.entrySet().add(new AbstractMap.SimpleEntry<>("Kafka", "Rocks")); 113 | } 114 | } 115 | 116 | @Test(expected = UnsupportedOperationException.class) 117 | public void testValuesIsImmutable() throws Exception { 118 | try (Cache kafkaCache = createKafkaCacheInstance()) { 119 | createTopic(bootstrapServers); 120 | kafkaCache.init(); 121 | kafkaCache.values().add("Kafka"); 122 | } 123 | } 124 | 125 | private void createTopic(String bootstrapServers) throws CacheInitializationException { 126 | Properties props = new Properties(); 127 | props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); 128 | 129 | try (AdminClient admin = AdminClient.create(props)) { 130 | NewTopic topicRequest = new NewTopic(DEFAULT_KAFKACACHE_TOPIC, 1, (short) 1); 131 | topicRequest.configs( 132 | Collections.singletonMap( 133 | TopicConfig.CLEANUP_POLICY_CONFIG, 134 | TopicConfig.CLEANUP_POLICY_COMPACT 135 | ) 136 | ); 137 | admin.createTopics(Collections.singleton(topicRequest)).all().get(1000, TimeUnit.MILLISECONDS); 138 | } catch (Exception e) { 139 | throw new CacheInitializationException("Failed to create topic", e); 140 | } 141 | } 142 | 143 | private Cache createKafkaCacheInstance() { 144 | Properties props = getKafkaCacheProperties(); 145 | KafkaCacheConfig config = new KafkaCacheConfig(props); 146 | Cache kafkaCache = Caches.concurrentCache( 147 | new KafkaCache<>(config, 148 | Serdes.String(), 149 | Serdes.String(), 150 | new StringUpdateHandler(), 151 | null)); 152 | return kafkaCache; 153 | } 154 | 155 | protected Properties getKafkaCacheProperties() { 156 | Properties props = new Properties(); 157 | props.put(KafkaCacheConfig.KAFKACACHE_BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); 158 | props.put(KafkaCacheConfig.KAFKACACHE_TOPIC_READ_ONLY_CONFIG, true); 159 | props.put(KafkaCacheConfig.KAFKACACHE_BACKING_CACHE_CONFIG, CacheType.MEMORY.toString()); 160 | return props; 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /kcache/src/test/java/io/kcache/KafkaCacheOffsetTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache; 18 | 19 | import static org.junit.Assert.assertEquals; 20 | import static org.junit.Assert.assertNull; 21 | 22 | import io.kcache.exceptions.CacheException; 23 | import io.kcache.utils.ClusterTestHarness; 24 | import java.util.Properties; 25 | import org.junit.Test; 26 | import org.slf4j.Logger; 27 | import org.slf4j.LoggerFactory; 28 | 29 | public class KafkaCacheOffsetTest extends ClusterTestHarness { 30 | 31 | private static final Logger log = LoggerFactory.getLogger(KafkaCacheOffsetTest.class); 32 | 33 | @Test 34 | public void testDifferentOffsetAfterRestart() throws Exception { 35 | Properties props = getKafkaCacheProperties(); 36 | try (Cache kafkaCache = CacheUtils.createAndInitKafkaCacheInstance(props)) { 37 | try { 38 | kafkaCache.put("Kafka", "Rocks"); 39 | } catch (CacheException e) { 40 | throw new RuntimeException("Kafka store put(Kafka, Rocks) operation failed", e); 41 | } 42 | try { 43 | kafkaCache.put("Kafka2", "Rocks2"); 44 | } catch (CacheException e) { 45 | throw new RuntimeException("Kafka store put(Kafka2, Rocks2) operation failed", e); 46 | } 47 | } 48 | props.put("kafkacache.topic.partitions.offset", "end"); 49 | try (Cache kafkaCache = CacheUtils.createAndInitKafkaCacheInstance(props)) { 50 | String retrievedValue; 51 | try { 52 | retrievedValue = kafkaCache.get("Kafka"); 53 | } catch (CacheException e) { 54 | throw new RuntimeException("Kafka store get(Kafka) operation failed", e); 55 | } 56 | assertNull("Value should have been deleted", retrievedValue); 57 | try { 58 | retrievedValue = kafkaCache.get("Kafka2"); 59 | } catch (CacheException e) { 60 | throw new RuntimeException("Kafka store get(Kafka2) operation failed", e); 61 | } 62 | assertNull("Value should have been deleted", retrievedValue); 63 | } 64 | props.put("kafkacache.topic.partitions.offset", "1"); 65 | try (Cache kafkaCache = CacheUtils.createAndInitKafkaCacheInstance(props)) { 66 | String retrievedValue; 67 | try { 68 | retrievedValue = kafkaCache.get("Kafka"); 69 | } catch (CacheException e) { 70 | throw new RuntimeException("Kafka store get(Kafka) operation failed", e); 71 | } 72 | assertNull("Value should have been deleted", retrievedValue); 73 | try { 74 | retrievedValue = kafkaCache.get("Kafka2"); 75 | } catch (CacheException e) { 76 | throw new RuntimeException("Kafka store get(Kafka2) operation failed", e); 77 | } 78 | assertEquals("Retrieved value should match entered value", "Rocks2", retrievedValue); 79 | } 80 | props.put("kafkacache.topic.partitions.offset", "-1"); 81 | try (Cache kafkaCache = CacheUtils.createAndInitKafkaCacheInstance(props)) { 82 | String retrievedValue; 83 | try { 84 | retrievedValue = kafkaCache.get("Kafka"); 85 | } catch (CacheException e) { 86 | throw new RuntimeException("Kafka store get(Kafka) operation failed", e); 87 | } 88 | assertNull("Value should have been deleted", retrievedValue); 89 | try { 90 | retrievedValue = kafkaCache.get("Kafka2"); 91 | } catch (CacheException e) { 92 | throw new RuntimeException("Kafka store get(Kafka2) operation failed", e); 93 | } 94 | assertEquals("Retrieved value should match entered value", "Rocks2", retrievedValue); 95 | } 96 | props.put("kafkacache.topic.partitions.offset", "@0"); 97 | try (Cache kafkaCache = CacheUtils.createAndInitKafkaCacheInstance(props)) { 98 | String retrievedValue; 99 | try { 100 | retrievedValue = kafkaCache.get("Kafka"); 101 | } catch (CacheException e) { 102 | throw new RuntimeException("Kafka store get(Kafka) operation failed", e); 103 | } 104 | assertEquals("Retrieved value should match entered value", "Rocks", retrievedValue); 105 | try { 106 | retrievedValue = kafkaCache.get("Kafka2"); 107 | } catch (CacheException e) { 108 | throw new RuntimeException("Kafka store get(Kafka2) operation failed", e); 109 | } 110 | assertEquals("Retrieved value should match entered value", "Rocks2", retrievedValue); 111 | } 112 | props.put("kafkacache.topic.partitions.offset", "@" + Long.MAX_VALUE); 113 | // Max timestamp causes offsetsForTimes to fail, resulting in seeking to beginning 114 | try (Cache kafkaCache = CacheUtils.createAndInitKafkaCacheInstance(props)) { 115 | String retrievedValue; 116 | try { 117 | retrievedValue = kafkaCache.get("Kafka"); 118 | } catch (CacheException e) { 119 | throw new RuntimeException("Kafka store get(Kafka) operation failed", e); 120 | } 121 | assertEquals("Retrieved value should match entered value", "Rocks", retrievedValue); 122 | try { 123 | retrievedValue = kafkaCache.get("Kafka2"); 124 | } catch (CacheException e) { 125 | throw new RuntimeException("Kafka store get(Kafka2) operation failed", e); 126 | } 127 | assertEquals("Retrieved value should match entered value", "Rocks2", retrievedValue); 128 | } 129 | } 130 | 131 | protected Properties getKafkaCacheProperties() throws Exception { 132 | Properties props = new Properties(); 133 | props.put(KafkaCacheConfig.KAFKACACHE_BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); 134 | props.put(KafkaCacheConfig.KAFKACACHE_BACKING_CACHE_CONFIG, CacheType.MEMORY.toString()); 135 | return props; 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /kcache-mapdb/src/main/java/io/kcache/mapdb/MapDBCache.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package io.kcache.mapdb; 18 | 19 | import io.kcache.KeyValueIterator; 20 | import io.kcache.KeyValueIterators; 21 | import io.kcache.exceptions.CacheInitializationException; 22 | import io.kcache.utils.PersistentCache; 23 | import java.io.File; 24 | import java.util.Comparator; 25 | import java.util.Iterator; 26 | import java.util.Map; 27 | import java.util.Objects; 28 | import org.apache.kafka.common.serialization.Serde; 29 | import org.mapdb.BTreeMap; 30 | import org.mapdb.DB; 31 | import org.mapdb.DBMaker; 32 | import org.mapdb.Serializer; 33 | import org.slf4j.Logger; 34 | import org.slf4j.LoggerFactory; 35 | 36 | /** 37 | * A persistent key-value store based on MapDB. 38 | */ 39 | public class MapDBCache extends PersistentCache { 40 | private static final Logger log = LoggerFactory.getLogger(MapDBCache.class); 41 | 42 | private static final String DB_FILE_DIR = "mapdb"; 43 | 44 | private DB db; 45 | private BTreeMap map; 46 | 47 | public MapDBCache(final String name, 48 | final String rootDir, 49 | Serde keySerde, 50 | Serde valueSerde) { 51 | this(name, DB_FILE_DIR, rootDir, keySerde, valueSerde); 52 | } 53 | 54 | public MapDBCache(final String name, 55 | final String rootDir, 56 | Serde keySerde, 57 | Serde valueSerde, 58 | Comparator comparator) { 59 | this(name, DB_FILE_DIR, rootDir, keySerde, valueSerde, comparator); 60 | } 61 | 62 | public MapDBCache(final String name, 63 | final String parentDir, 64 | final String rootDir, 65 | Serde keySerde, 66 | Serde valueSerde) { 67 | this(name, parentDir, rootDir, keySerde, valueSerde, null); 68 | } 69 | 70 | public MapDBCache(final String name, 71 | final String parentDir, 72 | final String rootDir, 73 | Serde keySerde, 74 | Serde valueSerde, 75 | Comparator comparator) { 76 | super(name, parentDir, rootDir, keySerde, valueSerde, comparator); 77 | } 78 | 79 | @Override 80 | protected void openDB() { 81 | try { 82 | db = DBMaker.fileDB(new File(dbDir(), "map.db")) 83 | .fileMmapEnableIfSupported() 84 | .make(); 85 | map = db.treeMap(name()) 86 | .keySerializer(new CustomSerializerByteArray<>(keySerde(), comparator())) 87 | .valueSerializer(Serializer.BYTE_ARRAY) 88 | .counterEnable() 89 | .createOrOpen(); 90 | } catch (final Exception e) { 91 | throw new CacheInitializationException("Error opening store " + name() + " at location " + dbDir(), e); 92 | } 93 | } 94 | 95 | @Override 96 | public int size() { 97 | validateStoreOpen(); 98 | return map.size(); 99 | } 100 | 101 | @Override 102 | public V put(final K key, final V value) { 103 | Objects.requireNonNull(key, "key cannot be null"); 104 | validateStoreOpen(); 105 | byte[] keyBytes = keySerde().serializer().serialize(null, key); 106 | byte[] valueBytes = valueSerde().serializer().serialize(null, value); 107 | byte[] oldValueBytes = map.put(keyBytes, valueBytes); 108 | db.commit(); 109 | return valueSerde().deserializer().deserialize(null, oldValueBytes); 110 | } 111 | 112 | @Override 113 | public void putAll(Map entries) { 114 | validateStoreOpen(); 115 | for (Map.Entry entry : entries.entrySet()) { 116 | byte[] keyBytes = keySerde().serializer().serialize(null, entry.getKey()); 117 | byte[] valueBytes = valueSerde().serializer().serialize(null, entry.getValue()); 118 | map.put(keyBytes, valueBytes); 119 | } 120 | db.commit(); 121 | } 122 | 123 | @Override 124 | @SuppressWarnings("unchecked") 125 | public V get(final Object key) { 126 | validateStoreOpen(); 127 | byte[] keyBytes = keySerde().serializer().serialize(null, (K) key); 128 | byte[] valueBytes = map.get(keyBytes); 129 | return valueSerde().deserializer().deserialize(null, valueBytes); 130 | } 131 | 132 | @Override 133 | @SuppressWarnings("unchecked") 134 | public V remove(final Object key) { 135 | Objects.requireNonNull(key, "key cannot be null"); 136 | byte[] keyBytes = keySerde().serializer().serialize(null, (K) key); 137 | byte[] oldValueBytes = map.remove(keyBytes); 138 | db.commit(); 139 | return valueSerde().deserializer().deserialize(null, oldValueBytes); 140 | } 141 | 142 | @Override 143 | protected KeyValueIterator range(K from, boolean fromInclusive, K to, boolean toInclusive, boolean isDescending) { 144 | validateStoreOpen(); 145 | byte[] fromBytes = keySerde().serializer().serialize(null, from); 146 | byte[] toBytes = keySerde().serializer().serialize(null, to); 147 | Iterator> iter = isDescending 148 | ? map.descendingEntryIterator(toBytes, toInclusive, fromBytes, fromInclusive) 149 | : map.entryIterator(fromBytes, fromInclusive, toBytes, toInclusive); 150 | return KeyValueIterators.transformRawIterator(keySerde(), valueSerde(), iter); 151 | } 152 | 153 | @Override 154 | protected KeyValueIterator all(boolean isDescending) { 155 | validateStoreOpen(); 156 | Iterator> iter = isDescending 157 | ? map.descendingEntryIterator() 158 | : map.entryIterator(); 159 | return KeyValueIterators.transformRawIterator(keySerde(), valueSerde(), iter); 160 | } 161 | 162 | @Override 163 | public void flush() { 164 | } 165 | 166 | @Override 167 | protected void closeDB() { 168 | try { 169 | if (db != null) { 170 | db.close(); 171 | } 172 | db = null; 173 | } catch (Exception e) { 174 | log.warn("Error during close", e); 175 | } 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /kcache-benchmark/src/main/java/io/kcache/benchmark/Common.java: -------------------------------------------------------------------------------- 1 | /*- 2 | * #%L 3 | * LmdbJava Benchmarks 4 | * %% 5 | * Copyright (C) 2016 - 2020 The LmdbJava Open Source Project 6 | * %% 7 | * Licensed under the Apache License, Version 2.0 (the "License"); 8 | * you may not use this file except in compliance with the License. 9 | * You may obtain a copy of the License at 10 | * 11 | * http://www.apache.org/licenses/LICENSE-2.0 12 | * 13 | * Unless required by applicable law or agreed to in writing, software 14 | * distributed under the License is distributed on an "AS IS" BASIS, 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | * See the License for the specific language governing permissions and 17 | * limitations under the License. 18 | * #L% 19 | */ 20 | 21 | package io.kcache.benchmark; 22 | 23 | import static java.lang.Integer.BYTES; 24 | import static java.lang.System.getProperty; 25 | import static java.lang.System.out; 26 | import static jnr.posix.POSIXFactory.getPOSIX; 27 | import static org.openjdk.jmh.annotations.Scope.Benchmark; 28 | 29 | import java.io.File; 30 | import java.io.IOException; 31 | import jnr.posix.FileStat; 32 | import jnr.posix.POSIX; 33 | import org.agrona.collections.IntHashSet; 34 | import org.apache.commons.math3.random.BitsStreamGenerator; 35 | import org.apache.commons.math3.random.MersenneTwister; 36 | import org.openjdk.jmh.annotations.Param; 37 | import org.openjdk.jmh.annotations.State; 38 | import org.openjdk.jmh.infra.BenchmarkParams; 39 | 40 | /** 41 | * Common JMH {@link State} superclass for all DB benchmark states. 42 | * 43 | *

44 | * Members do not reflect the typical code standards of the LmdbJava project due to compliance 45 | * requirements with JMH {@link Param} and {@link State}. 46 | */ 47 | @State(Benchmark) 48 | @SuppressWarnings({"checkstyle:designforextension", 49 | "checkstyle:visibilitymodifier"}) 50 | public class Common { 51 | 52 | static final byte[] RND_MB = new byte[1_048_576]; 53 | static final int STRING_KEY_LENGTH = 16; 54 | private static final POSIX POSIX = getPOSIX(); 55 | private static final BitsStreamGenerator RND = new MersenneTwister(); 56 | private static final int S_BLKSIZE = 512; // from sys/stat.h 57 | private static final File TMP_BENCH; 58 | 59 | /** 60 | * Keys are always an integer, however they are actually stored as integers (taking 4 bytes) or 61 | * as zero-padded 16 byte strings. Storing keys as integers offers a major performance gain. 62 | */ 63 | @Param("true") 64 | boolean intKey; 65 | 66 | /** 67 | * Determined during {@link #setup(BenchmarkParams)} based on {@link #intKey} value. 68 | */ 69 | int keySize; 70 | /** 71 | * Keys in designated (random/sequential) order. 72 | */ 73 | int[] keys; 74 | 75 | /** 76 | * Number of entries to read/write to the database. 77 | */ 78 | @Param("1000") 79 | int num; 80 | 81 | /** 82 | * Whether the keys are to be inserted into the database in sequential order (and in the 83 | * "readKeys" case, read back in that order). For LMDB, sequential inserts use {@link 84 | * org.lmdbjava.PutFlags#MDB_APPEND} and offer a major performance gain. If this field is false, 85 | * the append flag will not be used and the keys will instead be inserted (and read back via 86 | * "readKeys") in a random order. 87 | */ 88 | @Param("true") 89 | boolean sequential; 90 | 91 | File tmp; 92 | 93 | /** 94 | * Whether the values contain random bytes or are simply the same as the key. If true, the 95 | * random bytes are obtained sequentially from a 1 MB random byte buffer. 96 | */ 97 | @Param("false") 98 | boolean valRandom; 99 | 100 | /** 101 | * Number of bytes in each value. 102 | */ 103 | @Param("100") 104 | int valSize; 105 | 106 | static { 107 | RND.nextBytes(RND_MB); 108 | final String tmpParent = getProperty("java.io.tmpdir"); 109 | TMP_BENCH = new File(tmpParent, "lmdbjava-benchmark-scratch"); 110 | } 111 | 112 | public void setup(final BenchmarkParams b) throws IOException { 113 | keySize = intKey ? BYTES : STRING_KEY_LENGTH; 114 | final IntHashSet set = new IntHashSet(num); 115 | keys = new int[num]; 116 | for (int i = 0; i < num; i++) { 117 | if (sequential) { 118 | keys[i] = i; 119 | } else { 120 | while (true) { 121 | int candidateKey = RND.nextInt(); 122 | if (candidateKey < 0) { 123 | candidateKey *= -1; 124 | } 125 | if (!set.contains(candidateKey)) { 126 | set.add(candidateKey); 127 | keys[i] = candidateKey; 128 | break; 129 | } 130 | } 131 | } 132 | } 133 | 134 | rmdir(TMP_BENCH); 135 | tmp = create(b, ""); 136 | } 137 | 138 | public void reportSpaceBeforeClose() { 139 | if (tmp.getName().contains(".readKey-")) { 140 | reportSpaceUsed(tmp, "before-close"); 141 | } 142 | } 143 | 144 | public void teardown() throws IOException { 145 | // we only output for key, as all impls offer it and it should be fixed 146 | if (tmp.getName().contains(".readKey-")) { 147 | reportSpaceUsed(tmp, "after-close"); 148 | } 149 | rmdir(TMP_BENCH); 150 | } 151 | 152 | @SuppressWarnings("UseOfSystemOutOrSystemErr") 153 | protected void reportSpaceUsed(final File dir, final String desc) { 154 | final File[] files = dir.listFiles(); 155 | if (files == null) { 156 | return; 157 | } 158 | long bytes = 0; 159 | for (final File f : files) { 160 | if (f.isDirectory()) { 161 | throw new UnsupportedOperationException("impl created directory"); 162 | } 163 | final FileStat stat = POSIX.stat(f.getAbsolutePath()); 164 | bytes += stat.blocks() * S_BLKSIZE; 165 | } 166 | out.println("\nBytes\t" + desc + "\t" + bytes + "\t" + dir.getName()); 167 | } 168 | 169 | final String padKey(final int key) { 170 | final String skey = Integer.toString(key); 171 | return "0000000000000000".substring(0, 16 - skey.length()) + skey; 172 | } 173 | 174 | private File create(final BenchmarkParams b, final String suffix) { 175 | final File f = new File(TMP_BENCH, b.id() + suffix); 176 | if (!f.mkdirs()) { 177 | throw new IllegalStateException("Cannot mkdir " + f); 178 | } 179 | return f; 180 | } 181 | 182 | @SuppressWarnings("checkstyle:ReturnCount") 183 | private void rmdir(final File file) { 184 | if (!file.exists()) { 185 | return; 186 | } 187 | if (file.isDirectory()) { 188 | final File[] files = file.listFiles(); 189 | if (files == null) { 190 | return; 191 | } 192 | for (final File f : files) { 193 | rmdir(f); 194 | } 195 | } 196 | if (!file.delete()) { 197 | throw new IllegalStateException("Cannot delete " + file); 198 | } 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /kcache/src/main/java/io/kcache/Cache.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2018 Confluent Inc. 3 | *

4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package io.kcache; 18 | 19 | import io.kcache.exceptions.CacheInitializationException; 20 | 21 | import java.io.Closeable; 22 | import java.io.IOException; 23 | import java.util.Map; 24 | import java.util.SortedMap; 25 | import org.apache.kafka.common.Configurable; 26 | 27 | public interface Cache extends SortedMap, Configurable, Closeable { 28 | 29 | /** 30 | * Whether the cache is persistent. 31 | * 32 | * @return whether the cache is persistent 33 | */ 34 | default boolean isPersistent() { 35 | return false; 36 | } 37 | 38 | /** 39 | * Configures the cache. 40 | */ 41 | default void configure(Map configs) { 42 | } 43 | 44 | /** 45 | * Initializes the cache. 46 | */ 47 | void init() throws CacheInitializationException; 48 | 49 | /** 50 | * Resets the cache, clearing stale data before a sync. 51 | * This can be used if the leader changes in a cluster. 52 | */ 53 | void reset(); 54 | 55 | /** 56 | * Syncs (or re-initializes) the cache with the backing store. 57 | */ 58 | void sync(); 59 | 60 | /** 61 | * Returns a view of the portion of this cache whose keys range from 62 | * {@code fromKey} to {@code toKey}. If {@code fromKey} and 63 | * {@code toKey} are equal, the returned cache is empty unless 64 | * {@code fromInclusive} and {@code toInclusive} are both true. The 65 | * returned cache is backed by this cache, so changes in the returned cache are 66 | * reflected in this cache, and vice-versa. The returned cache supports all 67 | * optional cache operations that this cache supports. 68 | * 69 | *

The returned cache will throw an {@code IllegalArgumentException} 70 | * on an attempt to insert a key outside of its range, or to construct a 71 | * subcache either of whose endpoints lie outside its range. 72 | * 73 | * @param fromKey low endpoint of the keys in the returned cache; 74 | * {@code null} indicates the beginning 75 | * @param fromInclusive {@code true} if the low endpoint 76 | * is to be included in the returned view 77 | * @param toKey high endpoint of the keys in the returned cache; 78 | * {@code null} indicates the end 79 | * @param toInclusive {@code true} if the high endpoint 80 | * is to be included in the returned view 81 | * @return a view of the portion of this cache whose keys range from 82 | * {@code fromKey} to {@code toKey} 83 | * @throws ClassCastException if {@code fromKey} and {@code toKey} 84 | * cannot be compared to one another using this cache's comparator 85 | * (or, if the cache has no comparator, using natural ordering). 86 | * Implementations may, but are not required to, throw this 87 | * exception if {@code fromKey} or {@code toKey} 88 | * cannot be compared to keys currently in the cache. 89 | * @throws IllegalArgumentException if {@code fromKey} is greater than 90 | * {@code toKey}; or if this cache itself has a restricted 91 | * range, and {@code fromKey} or {@code toKey} lies 92 | * outside the bounds of the range 93 | */ 94 | Cache subCache(K fromKey, boolean fromInclusive, K toKey, boolean toInclusive); 95 | 96 | /** 97 | * Returns an iterator over the portion of this cache whose keys range from 98 | * {@code fromKey} to {@code toKey}. If {@code fromKey} and 99 | * {@code toKey} are equal, the returned iterator is empty unless 100 | * {@code fromInclusive} and {@code toInclusive} are both true. 101 | * 102 | * @param fromKey low endpoint of the keys in the returned iterator; 103 | * {@code null} indicates the beginning 104 | * @param fromInclusive {@code true} if the low endpoint 105 | * is to be included in the returned view 106 | * @param toKey high endpoint of the keys in the returned iterator; 107 | * {@code null} indicates the end 108 | * @param toInclusive {@code true} if the high endpoint 109 | * is to be included in the returned view 110 | * @return an iterator over the portion of this cache whose keys range from 111 | * {@code fromKey} to {@code toKey} 112 | * @throws ClassCastException if {@code fromKey} and {@code toKey} 113 | * cannot be compared to one another using this cache's comparator 114 | * (or, if the cache has no comparator, using natural ordering). 115 | * Implementations may, but are not required to, throw this 116 | * exception if {@code fromKey} or {@code toKey} 117 | * cannot be compared to keys currently in the cache. 118 | * @throws IllegalArgumentException if {@code fromKey} is greater than 119 | * {@code toKey}; or if this cache itself has a restricted 120 | * range, and {@code fromKey} or {@code toKey} lies 121 | * outside the bounds of the range 122 | */ 123 | KeyValueIterator range(K fromKey, boolean fromInclusive, K toKey, boolean toInclusive); 124 | 125 | /** 126 | * Returns an iterator over all key-value pairs in this cache. 127 | * 128 | * @return a {@link KeyValueIterator} over the elements in this collection 129 | */ 130 | KeyValueIterator all(); 131 | 132 | /** 133 | * Returns a reverse order view of the mappings contained in this cache. 134 | * The descending cache is backed by this cache, so changes to the cache are 135 | * reflected in the descending cache, and vice-versa. If either cache is 136 | * modified while an iteration over a collection view of either cache 137 | * is in progress (except through the iterator's own {@code remove} 138 | * operation), the results of the iteration are undefined. 139 | * 140 | * @return a reverse order view of this cache 141 | */ 142 | Cache descendingCache(); 143 | 144 | /** 145 | * Flushes the cache. 146 | */ 147 | void flush(); 148 | 149 | /** 150 | * Destroys the cache, if persistent. 151 | */ 152 | void destroy() throws IOException; 153 | 154 | // Default methods 155 | 156 | default SortedMap subMap(K fromKey, K toKey) { 157 | return subCache(fromKey, true, toKey, false); 158 | } 159 | 160 | default SortedMap headMap(K toKey) { 161 | return subCache(null, false, toKey, false); 162 | } 163 | 164 | default SortedMap tailMap(K fromKey) { 165 | return subCache(fromKey, true, null, false); 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /kcache/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | 4 | 5 | kcache-parent 6 | io.kcache 7 | 5.2.4-SNAPSHOT 8 | 9 | 10 | io.kcache 11 | kcache 12 | jar 13 | kcache 14 | 15 | 16 | 17 | com.google.guava 18 | guava 19 | 20 | 21 | org.apache.kafka 22 | kafka-clients 23 | 24 | 25 | org.slf4j 26 | slf4j-api 27 | 28 | 29 | 30 | org.apache.kafka 31 | kafka-clients 32 | test 33 | test 34 | 35 | 36 | org.apache.kafka 37 | kafka_${kafka.scala.version} 38 | test 39 | 40 | 41 | org.apache.kafka 42 | kafka_${kafka.scala.version} 43 | test 44 | test 45 | 46 | 47 | org.scala-lang 48 | scala-library 49 | test 50 | 51 | 52 | org.bouncycastle 53 | bcpkix-jdk15on 54 | test 55 | 56 | 57 | junit 58 | junit 59 | test 60 | 61 | 62 | org.slf4j 63 | slf4j-log4j12 64 | test 65 | 66 | 69 | 70 | org.apache.directory.api 71 | api-all 72 | test 73 | 74 | 75 | xml-apis 76 | xml-apis 77 | 78 | 79 | org.apache.directory.api 80 | api-ldap-schema-data 81 | 82 | 83 | 84 | 85 | org.apache.directory.server 86 | apacheds-core-api 87 | test 88 | 89 | 90 | org.apache.directory.api 91 | api-ldap-schema-data 92 | 93 | 94 | 95 | 96 | org.apache.directory.server 97 | apacheds-interceptor-kerberos 98 | test 99 | 100 | 101 | org.apache.directory.api 102 | api-ldap-schema-data 103 | 104 | 105 | 106 | 107 | org.apache.directory.server 108 | apacheds-protocol-shared 109 | test 110 | 111 | 112 | org.apache.directory.api 113 | api-ldap-schema-data 114 | 115 | 116 | 117 | 118 | org.apache.directory.server 119 | apacheds-protocol-kerberos 120 | test 121 | 122 | 123 | org.apache.directory.api 124 | api-ldap-schema-data 125 | 126 | 127 | 128 | 129 | org.apache.directory.server 130 | apacheds-protocol-ldap 131 | test 132 | 133 | 134 | org.apache.directory.api 135 | api-ldap-schema-data 136 | 137 | 138 | 139 | 140 | org.apache.directory.server 141 | apacheds-ldif-partition 142 | test 143 | 144 | 145 | org.apache.directory.api 146 | api-ldap-schema-data 147 | 148 | 149 | 150 | 151 | org.apache.directory.server 152 | apacheds-mavibot-partition 153 | test 154 | 155 | 156 | org.apache.directory.api 157 | api-ldap-schema-data 158 | 159 | 160 | 161 | 162 | org.apache.directory.server 163 | apacheds-jdbm-partition 164 | test 165 | 166 | 167 | org.apache.directory.api 168 | api-ldap-schema-data 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | org.apache.maven.plugins 178 | maven-jar-plugin 179 | 3.4.2 180 | 181 | 182 | 183 | test-jar 184 | 185 | test-compile 186 | 187 | 188 | 189 | 190 | 191 | 192 | --------------------------------------------------------------------------------