├── settings.gradle ├── src ├── main │ ├── AndroidManifest.xml │ └── java │ │ └── com │ │ └── couchbase │ │ └── lite │ │ ├── util │ │ └── NativeLibUtils.java │ │ └── store │ │ ├── ForestBridge.java │ │ ├── ForestDBViewStore.java │ │ └── ForestDBStore.java └── androidTest │ └── java │ └── com │ └── couchbase │ └── cbforest │ ├── C4KeyTest.java │ ├── C4EncryptedDatabaseTest.java │ ├── C4TestCase.java │ ├── C4ViewTest.java │ └── C4DatabaseTest.java ├── gradle └── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── gradle.properties ├── vendor └── sqlite │ ├── README.md │ └── sqlite3ext.h ├── .gitmodules ├── jni ├── Application.mk ├── source │ └── native_forestdbstore.cc └── Android.mk ├── proguard-rules.pro ├── Makefile ├── .gitignore ├── README.md ├── gradlew.bat ├── gradlew └── LICENSE /settings.gradle: -------------------------------------------------------------------------------- 1 | include ':couchbase-lite-java-forestdb', ':libraries:couchbase-lite-java-core' -------------------------------------------------------------------------------- /src/main/AndroidManifest.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/couchbaselabs/couchbase-lite-java-forestdb/HEAD/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /gradle.properties: -------------------------------------------------------------------------------- 1 | VS_2015_INCLUDE_DIR=C:/Program Files (x86)/Windows Kits/10/Include/10.0.10150.0/ucrt 2 | VS_2015_LIB_DIR=C:/Program Files (x86)/Windows Kits/10/Lib/10.0.10150.0/ucrt 3 | -------------------------------------------------------------------------------- /vendor/sqlite/README.md: -------------------------------------------------------------------------------- 1 | C source code as an amalgamation, version 3.8.11.1. 2 | 3 | Downloaded from https://www.sqlite.org/2015/sqlite-amalgamation-3081101.zip 4 | 5 | This files are for compiling [sqlite3-unicodesn](https://github.com/snej/sqlite3-unicodesn). 6 | 7 | 8 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | #Mon Dec 28 10:00:20 PST 2015 2 | distributionBase=GRADLE_USER_HOME 3 | distributionPath=wrapper/dists 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists 6 | distributionUrl=https\://services.gradle.org/distributions/gradle-2.10-all.zip 7 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "vendor/cbforest"] 2 | path = vendor/cbforest 3 | url = https://github.com/couchbaselabs/cbforest.git 4 | [submodule "libraries/couchbase-lite-java-core"] 5 | path = libraries/couchbase-lite-java-core 6 | url = https://github.com/couchbase/couchbase-lite-java-core.git 7 | -------------------------------------------------------------------------------- /jni/Application.mk: -------------------------------------------------------------------------------- 1 | # APP_ABI := armeabi mips armeabi-v7a x86 arm64-v8a x86_64 mips64 2 | APP_ABI := all 3 | APP_PLATFORM := android-19 4 | # it seems no backward compatibility. 5 | # APP_PLATFORM := android-21 6 | NDK_TOOLCHAIN_VERSION := clang 7 | APP_STL := gnustl_static 8 | # APP_OPTIM := debug # default is `release` 9 | -------------------------------------------------------------------------------- /proguard-rules.pro: -------------------------------------------------------------------------------- 1 | # Add project specific ProGuard rules here. 2 | # By default, the flags in this file are appended to flags specified 3 | # in /Users/hideki/Library/Android/sdk/tools/proguard/proguard-android.txt 4 | # You can edit the include path and order by changing the proguardFiles 5 | # directive in build.gradle. 6 | # 7 | # For more details, see 8 | # http://developer.android.com/guide/developing/tools/proguard.html 9 | 10 | # Add any project specific keep options here: 11 | 12 | # If your project uses WebView with JS, uncomment the following 13 | # and specify the fully qualified class name to the JavaScript interface 14 | # class: 15 | #-keepclassmembers class fqcn.of.javascript.interface.for.webview { 16 | # public *; 17 | #} 18 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: clean javah ndk-build jar 2 | 3 | JAVA_SRC_DIR=./vendor/cbforest/Java/src 4 | JNI_SRC_DIR=./vendor/cbforest/Java/jni 5 | 6 | clean: 7 | rm -rf $(JNI_SRC_DIR)/com_couchbase_cbforest_*.h 8 | rm -rf libs 9 | rm -rf obj 10 | rm -rf classes 11 | rm -rf cbforest.jar 12 | 13 | javah: 14 | javah -classpath $(JAVA_SRC_DIR) -d $(JNI_SRC_DIR) \ 15 | com.couchbase.cbforest.Database \ 16 | com.couchbase.cbforest.Document \ 17 | com.couchbase.cbforest.DocumentIterator \ 18 | com.couchbase.cbforest.ForestException \ 19 | com.couchbase.cbforest.Logger \ 20 | com.couchbase.cbforest.QueryIterator \ 21 | com.couchbase.cbforest.View 22 | 23 | # Build native liberary by NDK 24 | # NOTE: please modify ndk-build command path below!! 25 | ndk-build: 26 | ndk-build -C jni clean 27 | ndk-build -C jni 28 | 29 | # compile JNI java binding code and make Jar file 30 | jar: 31 | mkdir -p ./classes 32 | javac -source 1.7 -target 1.7 -d ./classes $(JAVA_SRC_DIR)/com/couchbase/cbforest/*.java 33 | jar -cf cbforest.jar -C classes/ . -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ################################### 2 | ### 3 | ################################### 4 | *.swp 5 | 6 | ################################### 7 | ### from C++.gitignore 8 | ################################### 9 | 10 | # Compiled Object files 11 | *.slo 12 | *.lo 13 | *.o 14 | *.obj 15 | 16 | # Precompiled Headers 17 | *.gch 18 | *.pch 19 | 20 | # Compiled Dynamic libraries 21 | # *.so 22 | *.dylib 23 | *.dll 24 | 25 | # Fortran module files 26 | *.mod 27 | 28 | # Compiled Static libraries 29 | *.lai 30 | *.la 31 | *.a 32 | *.lib 33 | 34 | # Executables 35 | *.exe 36 | *.out 37 | *.app 38 | 39 | *.o.d 40 | 41 | ################################### 42 | ### from C.gitignore 43 | ################################### 44 | # Object files 45 | *.o 46 | *.ko 47 | *.obj 48 | *.elf 49 | 50 | # Precompiled Headers 51 | *.gch 52 | *.pch 53 | 54 | # Libraries 55 | *.lib 56 | *.a 57 | *.la 58 | *.lo 59 | 60 | # Shared objects (inc. Windows DLLs) 61 | *.dll 62 | *.so 63 | *.so.* 64 | *.dylib 65 | 66 | # Executables 67 | *.exe 68 | *.out 69 | *.app 70 | *.i*86 71 | *.x86_64 72 | *.hex 73 | 74 | ################################### 75 | ### from Java.gitignore 76 | ################################### 77 | *.class 78 | 79 | # Mobile Tools for Java (J2ME) 80 | .mtj.tmp/ 81 | 82 | # Package Files # 83 | #*.jar 84 | *.war 85 | *.ear 86 | 87 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 88 | hs_err_pid* 89 | 90 | ################################### 91 | ### Generated JNI Headers 92 | ################################### 93 | jni/source/*.h 94 | 95 | ################################### 96 | ### Project specific 97 | ################################### 98 | #jni/cbforest_wrap.cc 99 | #jni/cbforest_wrap.h 100 | # libs/ 101 | obj/ 102 | # src/ 103 | classes/ 104 | # copy.sh 105 | 106 | ################################### 107 | ### From couchbase-lite-java-core 108 | ################################### 109 | /build/ 110 | *.iml 111 | .DS_Store 112 | /src/instrumentTest/assets/local-test.properties 113 | *.class 114 | local.properties 115 | /.gradle/ 116 | \#* 117 | *~ 118 | .#* 119 | \#*\# 120 | *.class 121 | .idea 122 | .gradle -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ⚠️ This repo is obsolete. It was used in Couchbase Lite Android / Java 1.x. 2 | 3 | Java wrapper around [CBForest](https://github.com/couchbaselabs/cbforest). Includes SWIG generated java wrappers for CBForest. 4 | 5 | 6 | ## Quick Start 7 | 8 | ``` 9 | $ git clone 10 | $ cd couchbase-lite-java-forestdb 11 | $ git submodule update --init --recursive 12 | ``` 13 | #### Prerequisites 14 | * 1. Install [SWIG](http://www.swig.org/) -- Optional: This is required if you want to generate JNI interface files (C/C++/Java) from SWIG interface file. 15 | 16 | If you are using Mac OSX 17 | ``` 18 | $ brew install swig 19 | ``` 20 | 21 | * 2. Install [Android NDK](https://developer.android.com/tools/sdk/ndk/index.html) version r10c 22 | 23 | Note: Please add ANDROID SDK and NDK home directories in the envronment PATH 24 | ``` 25 | export ANDROID_HOME= 26 | export PATH=$ANDROID_HOME/tools:$ANDROID_HOME/platform-tools:$PATH 27 | 28 | #export ANDROID_NDK_HOME= 29 | #export PATH=$ANDROID_NDK_HOME:$PATH 30 | ``` 31 | 32 | ### Build by Gradle 33 | 34 | #### How to generate the AAR file for Android 35 | ``` 36 | $ ./gradlew -Pspec=android assemble 37 | $ cd build/outputs/aar 38 | ``` 39 | #### Run UnitTest for Android 40 | ``` 41 | $ ./gradlew -Pspec=android connectedAndroidTest --debug 42 | ``` 43 | #### How to generate the Jar file for Java Desktop 44 | ``` 45 | $ ./gradlew -Pspec=java assemble 46 | $ cd build/libs 47 | ``` 48 | 49 | ### Build cbforest by make for Android platform 50 | 51 | #### Generate JNI java and native (C/C++) binding codes by SWIG 52 | ``` 53 | $ make swig 54 | ``` 55 | #### Compile JNI native (C/C++) codes by Android NDK 56 | Note: Update Makefile to specify your NDK build command path 57 | ``` 58 | $ make ndk-build 59 | ``` 60 | #### Compile JNI java files and make jar file 61 | ``` 62 | $ make jar 63 | ``` 64 | #### Outcome 65 | * cbforest.jar 66 | * libs/[platform]/libcbforest.so 67 | 68 | ## NOTES 69 | ### Build for Linux 70 | * Needs to install clang: `sudo apt-get install clang` for Ubuntu 71 | * Unable to cross-compile x86 build on 64bit machine. `clang` does not work with `gcc-multilib` and `g++-multilib`. We might needs to switch to GCC, but it requires CBForest code changes. 72 | 73 | 74 | -------------------------------------------------------------------------------- /gradlew.bat: -------------------------------------------------------------------------------- 1 | @if "%DEBUG%" == "" @echo off 2 | @rem ########################################################################## 3 | @rem 4 | @rem Gradle startup script for Windows 5 | @rem 6 | @rem ########################################################################## 7 | 8 | @rem Set local scope for the variables with windows NT shell 9 | if "%OS%"=="Windows_NT" setlocal 10 | 11 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 12 | set DEFAULT_JVM_OPTS= 13 | 14 | set DIRNAME=%~dp0 15 | if "%DIRNAME%" == "" set DIRNAME=. 16 | set APP_BASE_NAME=%~n0 17 | set APP_HOME=%DIRNAME% 18 | 19 | @rem Find java.exe 20 | if defined JAVA_HOME goto findJavaFromJavaHome 21 | 22 | set JAVA_EXE=java.exe 23 | %JAVA_EXE% -version >NUL 2>&1 24 | if "%ERRORLEVEL%" == "0" goto init 25 | 26 | echo. 27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 28 | echo. 29 | echo Please set the JAVA_HOME variable in your environment to match the 30 | echo location of your Java installation. 31 | 32 | goto fail 33 | 34 | :findJavaFromJavaHome 35 | set JAVA_HOME=%JAVA_HOME:"=% 36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 37 | 38 | if exist "%JAVA_EXE%" goto init 39 | 40 | echo. 41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 42 | echo. 43 | echo Please set the JAVA_HOME variable in your environment to match the 44 | echo location of your Java installation. 45 | 46 | goto fail 47 | 48 | :init 49 | @rem Get command-line arguments, handling Windowz variants 50 | 51 | if not "%OS%" == "Windows_NT" goto win9xME_args 52 | if "%@eval[2+2]" == "4" goto 4NT_args 53 | 54 | :win9xME_args 55 | @rem Slurp the command line arguments. 56 | set CMD_LINE_ARGS= 57 | set _SKIP=2 58 | 59 | :win9xME_args_slurp 60 | if "x%~1" == "x" goto execute 61 | 62 | set CMD_LINE_ARGS=%* 63 | goto execute 64 | 65 | :4NT_args 66 | @rem Get arguments from the 4NT Shell from JP Software 67 | set CMD_LINE_ARGS=%$ 68 | 69 | :execute 70 | @rem Setup the command line 71 | 72 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 73 | 74 | @rem Execute Gradle 75 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% 76 | 77 | :end 78 | @rem End local scope for the variables with windows NT shell 79 | if "%ERRORLEVEL%"=="0" goto mainEnd 80 | 81 | :fail 82 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 83 | rem the _cmd.exe /c_ return code! 84 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 85 | exit /b 1 86 | 87 | :mainEnd 88 | if "%OS%"=="Windows_NT" endlocal 89 | 90 | :omega 91 | -------------------------------------------------------------------------------- /src/androidTest/java/com/couchbase/cbforest/C4KeyTest.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Created by Hideki Itakura on 10/20/2015. 3 | * Copyright (c) 2015 Couchbase, Inc All rights reserved. 4 | * 5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 6 | * except in compliance with the License. You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software distributed under the 11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 12 | * either express or implied. See the License for the specific language governing permissions 13 | * and limitations under the License. 14 | */ 15 | package com.couchbase.cbforest; 16 | 17 | public class C4KeyTest extends C4TestCase implements Constants { 18 | private long _key = 0; 19 | 20 | @Override 21 | protected void setUp() throws Exception { 22 | super.setUp(); 23 | _key = View.newKey(); 24 | } 25 | 26 | @Override 27 | protected void tearDown() throws Exception { 28 | if(_key !=0) { 29 | View.freeKey(_key); 30 | _key = 0; 31 | } 32 | super.tearDown(); 33 | } 34 | 35 | void populateKey() { 36 | View.keyBeginArray(_key); 37 | View.keyAddNull(_key); 38 | View.keyAdd(_key, false); 39 | View.keyAdd(_key, true); 40 | View.keyAdd(_key, 0); 41 | View.keyAdd(_key, 12345); 42 | View.keyAdd(_key, -2468); 43 | View.keyAdd(_key, "foo"); 44 | View.keyBeginArray(_key); 45 | View.keyEndArray(_key); 46 | View.keyEndArray(_key); 47 | } 48 | 49 | public void testCreateKey() { 50 | populateKey(); 51 | 52 | assertEquals("[null,false,true,0,12345,-2468,\"foo\",[]]", View.keyToJSON(_key)); 53 | } 54 | 55 | public void testReadKey() { 56 | populateKey(); 57 | 58 | long _reader = View.keyReader(_key); 59 | try { 60 | assertEquals(C4KeyToken.kC4Array, View.keyPeek(_reader)); 61 | View.keySkipToken(_reader); 62 | assertEquals(C4KeyToken.kC4Null, View.keyPeek(_reader)); 63 | View.keySkipToken(_reader); 64 | assertEquals(C4KeyToken.kC4Bool, View.keyPeek(_reader)); 65 | assertEquals(false, View.keyReadBool(_reader)); 66 | assertEquals(true, View.keyReadBool(_reader)); 67 | assertEquals(0.0, View.keyReadNumber(_reader)); 68 | assertEquals(12345.0, View.keyReadNumber(_reader)); 69 | assertEquals(-2468.0, View.keyReadNumber(_reader)); 70 | assertEquals("foo", View.keyReadString(_reader)); 71 | assertEquals(C4KeyToken.kC4Array, View.keyPeek(_reader)); 72 | View.keySkipToken(_reader); 73 | assertEquals(C4KeyToken.kC4EndSequence, View.keyPeek(_reader)); 74 | View.keySkipToken(_reader); 75 | assertEquals(C4KeyToken.kC4EndSequence, View.keyPeek(_reader)); 76 | View.keySkipToken(_reader); 77 | }finally { 78 | View.freeKeyReader(_reader); 79 | _reader = 0; 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/androidTest/java/com/couchbase/cbforest/C4EncryptedDatabaseTest.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Created by Hideki Itakura on 10/20/2015. 3 | * Copyright (c) 2015 Couchbase, Inc All rights reserved. 4 | * 5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 6 | * except in compliance with the License. You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software distributed under the 11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 12 | * either express or implied. See the License for the specific language governing permissions 13 | * and limitations under the License. 14 | */ 15 | package com.couchbase.cbforest; 16 | 17 | import java.io.File; 18 | 19 | public class C4EncryptedDatabaseTest extends C4DatabaseTest{ 20 | 21 | //static final int algorithm = -1; // FAKE encryption 22 | static final int algorithm = Database.AES256Encryption; 23 | /** 24 | * For now, the AES encryption mode (kC4EncryptionAES) is only implemented for Apple platforms. 25 | * It should be easy to hook it up to other platforms’ native crypto APIs, though; see forestdb/utils/crypto_primitives.h. 26 | * 27 | * For testing, there is a hidden ‘encryption' mode whose constant is -1. 28 | * It’s a trivial and really insecure XOR-based algorithm, but it’ll validate that the right key is being used. 29 | */ 30 | static final byte[] encryptionKey = "this is not a random key at all...".substring(0, 32).getBytes(); 31 | // original key length is 34. It is reason to substirng to 0..32. 32 | 33 | protected int encryptionAlgorithm() { 34 | return algorithm; 35 | } 36 | 37 | protected byte[] encryptionKey() { 38 | return encryptionKey; 39 | } 40 | 41 | public void testRekey() throws ForestException { 42 | testCreateRawDoc(); 43 | 44 | db.rekey(0, null); 45 | 46 | final String store = "test"; 47 | String key = "key"; 48 | byte[][] metaNbody = db.rawGet(store, key); 49 | assertNotNull(metaNbody); 50 | assertEquals(2, metaNbody.length); 51 | } 52 | 53 | public void testUnEncryptedDatabase() throws Exception { 54 | // Create an unencrypted database: 55 | String dbFileName = "forest_temp_unencrypted.fdb"; 56 | deleteDatabaseFile(dbFileName); 57 | File dbFile = new File(mContext.getFilesDir(), dbFileName); 58 | int flag = Database.Create | Database.AutoCompact; 59 | Database unEnDb = new Database(dbFile.getPath(), flag, Database.NoEncryption, null); 60 | 61 | // Add an document: 62 | final String store = "test"; 63 | final String key = "key"; 64 | final String meta = "meta"; 65 | boolean commit = false; 66 | unEnDb.beginTransaction(); 67 | try { 68 | unEnDb.rawPut(store, key, meta.getBytes(), kBody.getBytes()); 69 | commit = true; 70 | } finally { 71 | unEnDb.endTransaction(commit); 72 | } 73 | 74 | // Close database: 75 | unEnDb.close(); 76 | unEnDb = null; 77 | 78 | // Open database with an encryption key: 79 | ForestException error = null; 80 | try { 81 | unEnDb = new Database(dbFile.getPath(), flag, Database.AES256Encryption, encryptionKey); 82 | } catch (ForestException e) { 83 | error = e; 84 | } 85 | assertNull(unEnDb); 86 | assertNotNull(error); 87 | assertEquals(FDBErrors.FDB_RESULT_NO_DB_HEADERS, error.code); 88 | 89 | deleteDatabaseFile(dbFileName); 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /jni/source/native_forestdbstore.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "com_couchbase_lite_store_ForestDBStore.h" 4 | 5 | 6 | #if !defined (_CRYPTO_CC) \ 7 | && !defined (_CRYPTO_OPENSSL) 8 | #define _CRYPTO_OPENSSL 9 | #endif 10 | 11 | #if defined (_CRYPTO_CC) 12 | 13 | #import 14 | 15 | JNIEXPORT jbyteArray JNICALL Java_com_couchbase_lite_store_ForestDBStore_nativeDerivePBKDF2SHA256Key 16 | (JNIEnv* env, jclass clazz, jstring password, jbyteArray salt, jint rounds) { 17 | if (password == NULL || salt == NULL || rounds < 1) 18 | return NULL; 19 | 20 | // Password: 21 | const char* passwordCStr = env->GetStringUTFChars(password, NULL); 22 | int passwordSize = (int)env->GetStringLength (password); 23 | 24 | // Salt: 25 | int saltSize = env->GetArrayLength (salt); 26 | unsigned char* saltBytes = new unsigned char[saltSize]; 27 | env->GetByteArrayRegion (salt, 0, saltSize, reinterpret_cast(saltBytes)); 28 | 29 | // PBKDF2-SHA256 30 | int outputSize = 32; //256 bit 31 | unsigned char* output = new unsigned char[outputSize]; 32 | int status = CCKeyDerivationPBKDF(kCCPBKDF2, 33 | passwordCStr, passwordSize, 34 | saltBytes, saltSize, 35 | kCCPRFHmacAlgSHA256, rounds, 36 | output, outputSize); 37 | 38 | // Release memory: 39 | env->ReleaseStringUTFChars(password, passwordCStr); 40 | delete[] saltBytes; 41 | 42 | // Return null if not success: 43 | if (status) { 44 | delete[] output; 45 | return NULL; 46 | } 47 | 48 | // Result: 49 | jbyteArray result = env->NewByteArray(outputSize); 50 | env->SetByteArrayRegion(result, 0, outputSize, (jbyte*)output); 51 | 52 | // Release memory: 53 | delete[] output; 54 | 55 | return result; 56 | } 57 | 58 | #elif defined (_CRYPTO_OPENSSL) 59 | 60 | #include "openssl/evp.h" 61 | #include "openssl/sha.h" 62 | 63 | JNIEXPORT jbyteArray JNICALL Java_com_couchbase_lite_store_ForestDBStore_nativeDerivePBKDF2SHA256Key 64 | (JNIEnv* env, jclass clazz, jstring password, jbyteArray salt, jint rounds) { 65 | if (password == NULL || salt == NULL || rounds < 1) 66 | return NULL; 67 | 68 | // Password: 69 | const char* passwordCStr = env->GetStringUTFChars(password, NULL); 70 | int passwordSize = (int)env->GetStringLength (password); 71 | 72 | // Salt: 73 | int saltSize = env->GetArrayLength (salt); 74 | unsigned char* saltBytes = new unsigned char[saltSize]; 75 | env->GetByteArrayRegion (salt, 0, saltSize, reinterpret_cast(saltBytes)); 76 | 77 | // PBKDF2-SHA256 78 | int outputSize = 32; //256 bit 79 | unsigned char* output = new unsigned char[outputSize]; 80 | int status = PKCS5_PBKDF2_HMAC(passwordCStr, passwordSize, saltBytes, saltSize, 81 | (int)rounds, EVP_sha256(), outputSize, output); 82 | // Release memory: 83 | env->ReleaseStringUTFChars(password, passwordCStr); 84 | delete[] saltBytes; 85 | 86 | // Return null if not success: 87 | if (status == 0) { 88 | delete[] output; 89 | return NULL; 90 | } 91 | 92 | // Result: 93 | jbyteArray result = env->NewByteArray(outputSize); 94 | env->SetByteArrayRegion(result, 0, outputSize, (jbyte*)output); 95 | 96 | // Release memory: 97 | delete[] output; 98 | 99 | return result; 100 | } 101 | 102 | #else 103 | #error "NO DEFAULT CRYPTO PROVIDER DEFINED" 104 | #endif 105 | -------------------------------------------------------------------------------- /src/androidTest/java/com/couchbase/cbforest/C4TestCase.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Created by Hideki Itakura on 10/20/2015. 3 | * Copyright (c) 2015 Couchbase, Inc All rights reserved. 4 | * 5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 6 | * except in compliance with the License. You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software distributed under the 11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 12 | * either express or implied. See the License for the specific language governing permissions 13 | * and limitations under the License. 14 | */ 15 | package com.couchbase.cbforest; 16 | 17 | import android.test.AndroidTestCase; 18 | 19 | import com.couchbase.lite.util.Log; 20 | 21 | import java.io.File; 22 | 23 | /** 24 | * Ported from c4Test.cc 25 | */ 26 | public class C4TestCase extends AndroidTestCase implements Constants{ 27 | public static final String TAG = C4TestCase.class.getSimpleName(); 28 | 29 | public static final String DB_FILENAME = "forest_temp.fdb"; 30 | 31 | static { 32 | try { 33 | System.loadLibrary("CouchbaseLiteJavaForestDB"); 34 | } catch (Exception e) { 35 | Log.e(TAG, "ERROR: Failed to load libCouchbaseLiteJavaForestDB"); 36 | fail("ERROR: Failed to load libCouchbaseLiteJavaForestDB."); 37 | } 38 | } 39 | 40 | protected int encryptionAlgorithm() { 41 | return Database.NoEncryption; 42 | } 43 | 44 | protected byte[] encryptionKey() { 45 | return null; 46 | } 47 | 48 | protected Database db = null; 49 | protected static final String kDocID = "mydoc"; 50 | protected static final String kRevID = "1-abcdef"; 51 | protected static final String kRev2ID= "2-d00d3333"; 52 | protected static final String kBody = "{\"name\":007}"; 53 | 54 | @Override 55 | protected void setUp() throws Exception { 56 | super.setUp(); 57 | deleteDatabaseFile(DB_FILENAME); 58 | File dbFile = new File(mContext.getFilesDir(), DB_FILENAME); 59 | db = new Database(dbFile.getPath(), Database.Create, encryptionAlgorithm(), encryptionKey()); 60 | } 61 | 62 | protected void deleteDatabaseFile(String dbFileName) { 63 | deleteFile(dbFileName); 64 | deleteFile(dbFileName + ".0"); 65 | deleteFile(dbFileName + ".1"); 66 | deleteFile(dbFileName + ".meta"); 67 | } 68 | 69 | private void deleteFile(String filename){ 70 | File file = new File(mContext.getFilesDir(), filename); 71 | if (file.exists()) { 72 | if (!file.delete()) { 73 | Log.e(TAG, "ERROR failed to delete: dbFile=" + file); 74 | } 75 | } 76 | } 77 | 78 | @Override 79 | protected void tearDown() throws Exception { 80 | 81 | if (db != null) { 82 | db.close(); 83 | db = null; 84 | } 85 | 86 | super.tearDown(); 87 | } 88 | 89 | protected void createRev(String docID, String revID, byte[] body) throws ForestException{ 90 | createRev(docID, revID, body, true); // 1 for new 91 | } 92 | 93 | /** 94 | * @param isNew true - new (201), false - not new (200) 95 | */ 96 | protected void createRev(String docID, String revID, byte[] body, boolean isNew) throws ForestException{ 97 | boolean commit = false; 98 | db.beginTransaction(); 99 | try { 100 | Document doc = db.getDocument(docID, false); 101 | assertNotNull(doc); 102 | boolean deleted = body == null; 103 | assertEquals(isNew, doc.insertRevision(revID, body, deleted, false, false)); 104 | doc.save(20); 105 | doc.free(); 106 | commit = true; 107 | }finally { 108 | db.endTransaction(commit); 109 | } 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ############################################################################## 4 | ## 5 | ## Gradle start up script for UN*X 6 | ## 7 | ############################################################################## 8 | 9 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 10 | DEFAULT_JVM_OPTS="" 11 | 12 | APP_NAME="Gradle" 13 | APP_BASE_NAME=`basename "$0"` 14 | 15 | # Use the maximum available, or set MAX_FD != -1 to use that value. 16 | MAX_FD="maximum" 17 | 18 | warn ( ) { 19 | echo "$*" 20 | } 21 | 22 | die ( ) { 23 | echo 24 | echo "$*" 25 | echo 26 | exit 1 27 | } 28 | 29 | # OS specific support (must be 'true' or 'false'). 30 | cygwin=false 31 | msys=false 32 | darwin=false 33 | case "`uname`" in 34 | CYGWIN* ) 35 | cygwin=true 36 | ;; 37 | Darwin* ) 38 | darwin=true 39 | ;; 40 | MINGW* ) 41 | msys=true 42 | ;; 43 | esac 44 | 45 | # For Cygwin, ensure paths are in UNIX format before anything is touched. 46 | if $cygwin ; then 47 | [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"` 48 | fi 49 | 50 | # Attempt to set APP_HOME 51 | # Resolve links: $0 may be a link 52 | PRG="$0" 53 | # Need this for relative symlinks. 54 | while [ -h "$PRG" ] ; do 55 | ls=`ls -ld "$PRG"` 56 | link=`expr "$ls" : '.*-> \(.*\)$'` 57 | if expr "$link" : '/.*' > /dev/null; then 58 | PRG="$link" 59 | else 60 | PRG=`dirname "$PRG"`"/$link" 61 | fi 62 | done 63 | SAVED="`pwd`" 64 | cd "`dirname \"$PRG\"`/" >&- 65 | APP_HOME="`pwd -P`" 66 | cd "$SAVED" >&- 67 | 68 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 69 | 70 | # Determine the Java command to use to start the JVM. 71 | if [ -n "$JAVA_HOME" ] ; then 72 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 73 | # IBM's JDK on AIX uses strange locations for the executables 74 | JAVACMD="$JAVA_HOME/jre/sh/java" 75 | else 76 | JAVACMD="$JAVA_HOME/bin/java" 77 | fi 78 | if [ ! -x "$JAVACMD" ] ; then 79 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 80 | 81 | Please set the JAVA_HOME variable in your environment to match the 82 | location of your Java installation." 83 | fi 84 | else 85 | JAVACMD="java" 86 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 87 | 88 | Please set the JAVA_HOME variable in your environment to match the 89 | location of your Java installation." 90 | fi 91 | 92 | # Increase the maximum file descriptors if we can. 93 | if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then 94 | MAX_FD_LIMIT=`ulimit -H -n` 95 | if [ $? -eq 0 ] ; then 96 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then 97 | MAX_FD="$MAX_FD_LIMIT" 98 | fi 99 | ulimit -n $MAX_FD 100 | if [ $? -ne 0 ] ; then 101 | warn "Could not set maximum file descriptor limit: $MAX_FD" 102 | fi 103 | else 104 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" 105 | fi 106 | fi 107 | 108 | # For Darwin, add options to specify how the application appears in the dock 109 | if $darwin; then 110 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" 111 | fi 112 | 113 | # For Cygwin, switch paths to Windows format before running java 114 | if $cygwin ; then 115 | APP_HOME=`cygpath --path --mixed "$APP_HOME"` 116 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` 117 | 118 | # We build the pattern for arguments to be converted via cygpath 119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` 120 | SEP="" 121 | for dir in $ROOTDIRSRAW ; do 122 | ROOTDIRS="$ROOTDIRS$SEP$dir" 123 | SEP="|" 124 | done 125 | OURCYGPATTERN="(^($ROOTDIRS))" 126 | # Add a user-defined pattern to the cygpath arguments 127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then 128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" 129 | fi 130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 131 | i=0 132 | for arg in "$@" ; do 133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` 134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option 135 | 136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition 137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` 138 | else 139 | eval `echo args$i`="\"$arg\"" 140 | fi 141 | i=$((i+1)) 142 | done 143 | case $i in 144 | (0) set -- ;; 145 | (1) set -- "$args0" ;; 146 | (2) set -- "$args0" "$args1" ;; 147 | (3) set -- "$args0" "$args1" "$args2" ;; 148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;; 149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; 150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; 151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; 152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; 153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; 154 | esac 155 | fi 156 | 157 | # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules 158 | function splitJvmOpts() { 159 | JVM_OPTS=("$@") 160 | } 161 | eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS 162 | JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" 163 | 164 | exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" 165 | -------------------------------------------------------------------------------- /src/main/java/com/couchbase/lite/util/NativeLibUtils.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Created by Hideki Itakura on 10/20/2015. 3 | * Copyright (c) 2015 Couchbase, Inc All rights reserved. 4 | * 5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 6 | * except in compliance with the License. You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software distributed under the 11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 12 | * either express or implied. See the License for the specific language governing permissions 13 | * and limitations under the License. 14 | */ 15 | package com.couchbase.lite.util; 16 | 17 | import java.io.File; 18 | import java.io.FileOutputStream; 19 | import java.io.IOException; 20 | import java.io.InputStream; 21 | import java.util.HashMap; 22 | import java.util.Locale; 23 | import java.util.Map; 24 | 25 | public class NativeLibUtils { 26 | public static final String TAG = "Native"; 27 | private static final Map LOADED_LIBRARIES = new HashMap(); 28 | 29 | public static boolean loadLibrary(String libraryName) { 30 | // If the library has already been loaded then no need to reload. 31 | if (LOADED_LIBRARIES.containsKey(libraryName)) return true; 32 | 33 | try { 34 | File libraryFile = null; 35 | 36 | String libraryPath = _getConfiguredLibraryPath(libraryName); 37 | 38 | if (libraryPath != null) { 39 | // If library path is configured then use it. 40 | libraryFile = new File(libraryPath); 41 | } else { 42 | libraryFile = _extractLibrary(libraryName); 43 | } 44 | 45 | System.load(libraryFile.getAbsolutePath()); 46 | 47 | LOADED_LIBRARIES.put(libraryName, true); 48 | } catch (Exception e) { 49 | System.err.println("Error loading library: " + libraryName); 50 | e.printStackTrace(); 51 | return false; 52 | } 53 | 54 | return true; 55 | } 56 | 57 | private static String _getConfiguredLibraryPath(String libraryName) { 58 | String key = String.format(Locale.ENGLISH, "com.couchbase.lite.lib.%s.path", libraryName); 59 | 60 | return System.getProperty(key); 61 | } 62 | 63 | private static String _getLibraryFullName(String libraryName) { 64 | String name = System.mapLibraryName(libraryName); 65 | 66 | // Workaround discrepancy issue between OSX Java6 (.jnilib) 67 | // and Java7 (.dylib) native library file extension. 68 | if (name.endsWith(".jnilib")) { 69 | name = name.replace(".jnilib", ".dylib"); 70 | } 71 | 72 | return name; 73 | } 74 | 75 | private static File _extractLibrary(String libraryName) throws IOException { 76 | String libraryResourcePath = _getLibraryResourcePath(libraryName); 77 | String targetFolder = new File(System.getProperty("java.io.tmpdir")).getAbsolutePath(); 78 | 79 | File targetFile = new File(targetFolder, _getLibraryFullName(libraryName)); 80 | 81 | // If the target already exists, and it's unchanged, then use it, otherwise delete it and 82 | // it will be replaced. 83 | if (targetFile.exists()) { 84 | // Remove old native library file. 85 | if (!targetFile.delete()) { 86 | // If we can't remove the old library file then log a warning and try to use it. 87 | System.err.println("Failed to delete existing library file: " + targetFile.getAbsolutePath()); 88 | return targetFile; 89 | } 90 | } 91 | 92 | // Extract the library to the target directory. 93 | InputStream libraryReader = NativeLibUtils.class.getResourceAsStream(libraryResourcePath); 94 | if (libraryReader == null) { 95 | System.err.println("Library not found: " + libraryResourcePath); 96 | return null; 97 | } 98 | 99 | FileOutputStream libraryWriter = new FileOutputStream(targetFile); 100 | try { 101 | byte[] buffer = new byte[1024]; 102 | int bytesRead = 0; 103 | 104 | while ((bytesRead = libraryReader.read(buffer)) != -1) { 105 | libraryWriter.write(buffer, 0, bytesRead); 106 | } 107 | } finally { 108 | libraryWriter.close(); 109 | libraryReader.close(); 110 | } 111 | 112 | // On non-windows systems set up permissions for the extracted native library. 113 | if (!System.getProperty("os.name").toLowerCase().contains("windows")) { 114 | try { 115 | Runtime.getRuntime().exec(new String[]{"chmod", "755", targetFile.getAbsolutePath()}).waitFor(); 116 | } catch (Throwable e) { 117 | System.err.println("Error executing 'chmod 755' on extracted native library"); 118 | e.printStackTrace(); 119 | } 120 | } 121 | 122 | return targetFile; 123 | } 124 | 125 | private static String _getLibraryResourcePath(String libraryName) { 126 | // Root native folder. 127 | String path = "/native"; 128 | 129 | // OS part of path. 130 | String osName = System.getProperty("os.name"); 131 | if (osName.contains("Linux")) { 132 | path += "/linux"; 133 | } else if (osName.contains("Mac")) { 134 | path += "/osx"; 135 | } else if (osName.contains("Windows")) { 136 | path += "/windows"; 137 | } else { 138 | path += "/" + osName.replaceAll("\\W", "").toLowerCase(); 139 | } 140 | 141 | // Architecture part of path. 142 | String archName = System.getProperty("os.arch"); 143 | path += "/" + archName.replaceAll("\\W", ""); 144 | 145 | // Platform specific name part of path. 146 | path += "/" + _getLibraryFullName(libraryName); 147 | 148 | return path; 149 | } 150 | } 151 | 152 | -------------------------------------------------------------------------------- /src/main/java/com/couchbase/lite/store/ForestBridge.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Created by Hideki Itakura on 10/20/2015. 3 | * Copyright (c) 2015 Couchbase, Inc All rights reserved. 4 | *

5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 6 | * except in compliance with the License. You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software distributed under the 11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 12 | * either express or implied. See the License for the specific language governing permissions 13 | * and limitations under the License. 14 | */ 15 | 16 | package com.couchbase.lite.store; 17 | 18 | import com.couchbase.cbforest.Constants; 19 | import com.couchbase.cbforest.Document; 20 | import com.couchbase.cbforest.ForestException; 21 | import com.couchbase.lite.Manager; 22 | import com.couchbase.lite.Status; 23 | import com.couchbase.lite.internal.RevisionInternal; 24 | import com.couchbase.lite.util.Log; 25 | 26 | import java.io.IOException; 27 | import java.util.ArrayList; 28 | import java.util.List; 29 | import java.util.Map; 30 | 31 | public class ForestBridge implements Constants { 32 | public final static String TAG = ForestBridge.class.getSimpleName(); 33 | 34 | /** 35 | * in CBLForestBridge.m 36 | * + (CBL_MutableRevision*) revisionObjectFromForestDoc: (VersionedDocument&)doc 37 | * revID: (NSString*)revID 38 | * withBody: (BOOL)withBody 39 | */ 40 | public static RevisionInternal revisionObject( 41 | Document doc, 42 | String docID, 43 | String revID, 44 | boolean withBody) { 45 | 46 | boolean deleted = doc.selectedRevDeleted(); 47 | if (revID == null) 48 | revID = doc.getSelectedRevID(); 49 | RevisionInternal rev = new RevisionInternal(docID, revID, deleted); 50 | rev.setSequence(doc.getSelectedSequence()); 51 | if (withBody) { 52 | Status status = loadBodyOfRevisionObject(rev, doc); 53 | if (status.isError() && status.getCode() != Status.GONE) 54 | return null; 55 | } 56 | return rev; 57 | } 58 | 59 | /** 60 | * in CBLForestBridge.m 61 | * + (BOOL) loadBodyOfRevisionObject: (CBL_MutableRevision*)rev 62 | * doc: (VersionedDocument&)doc 63 | */ 64 | public static Status loadBodyOfRevisionObject(RevisionInternal rev, Document doc) { 65 | try { 66 | rev.setSequence(doc.getSelectedSequence()); 67 | doc.selectRevID(rev.getRevID(), true); 68 | rev.setJSON(doc.getSelectedBody()); 69 | return new Status(Status.OK); 70 | } catch (ForestException ex) { 71 | rev.setMissing(true); 72 | return err2status(ex); 73 | } 74 | } 75 | 76 | /** 77 | * in CBLForestBridge.m 78 | * + (BOOL) loadBodyOfRevisionObject: (CBL_MutableRevision*)rev 79 | * doc: (VersionedDocument&)doc 80 | */ 81 | public static Map bodyOfSelectedRevision(Document doc) { 82 | byte[] body; 83 | try { 84 | body = doc.getSelectedBody(); 85 | } catch (ForestException e) { 86 | return null; 87 | } 88 | Map properties = null; 89 | if (body != null && body.length > 0) { 90 | try { 91 | properties = Manager.getObjectMapper().readValue(body, Map.class); 92 | } catch (IOException e) { 93 | Log.w(TAG, "Failed to parse body: [%s]", new String(body)); 94 | } 95 | } 96 | return properties; 97 | } 98 | 99 | 100 | /** 101 | * Not include deleted leaf node 102 | */ 103 | public static List getCurrentRevisionIDs(Document doc) throws ForestException { 104 | List currentRevIDs = new ArrayList(); 105 | do { 106 | currentRevIDs.add(doc.getSelectedRevID()); 107 | } while (doc.selectNextLeaf(false, false)); 108 | return currentRevIDs; 109 | } 110 | 111 | /** 112 | * in CBLForestBridge.m 113 | * CBLStatus err2status(C4Error c4err) 114 | */ 115 | public static Status err2status(ForestException ex) { 116 | return new Status(_err2status(ex)); 117 | } 118 | 119 | /** 120 | * in CBLForestBridge.m 121 | * CBLStatus err2status(C4Error c4err) 122 | */ 123 | public static int _err2status(ForestException ex) { 124 | if (ex == null || ex.code == 0) 125 | return Status.OK; 126 | 127 | Log.d(TAG, "[_err2status()] ForestException: domain=%d, code=%d", ex, ex.domain, ex.code); 128 | 129 | switch (ex.domain) { 130 | case C4ErrorDomain.HTTPDomain: 131 | return ex.code; 132 | case C4ErrorDomain.POSIXDomain: 133 | break; 134 | case C4ErrorDomain.ForestDBDomain: 135 | switch (ex.code) { 136 | case FDBErrors.FDB_RESULT_SUCCESS: 137 | return Status.OK; 138 | case FDBErrors.FDB_RESULT_KEY_NOT_FOUND: 139 | case FDBErrors.FDB_RESULT_NO_SUCH_FILE: 140 | return Status.NOT_FOUND; 141 | case FDBErrors.FDB_RESULT_RONLY_VIOLATION: 142 | return Status.FORBIDDEN; 143 | case FDBErrors.FDB_RESULT_NO_DB_HEADERS: 144 | case FDBErrors.FDB_RESULT_CRYPTO_ERROR: 145 | return Status.UNAUTHORIZED; 146 | case FDBErrors.FDB_RESULT_CHECKSUM_ERROR: 147 | case FDBErrors.FDB_RESULT_FILE_CORRUPTION: 148 | return Status.CORRUPT_ERROR; 149 | } 150 | break; 151 | case C4ErrorDomain.C4Domain: 152 | switch (ex.code) { 153 | case C4DomainErrorCode.kC4ErrorCorruptRevisionData: 154 | case C4DomainErrorCode.kC4ErrorCorruptIndexData: 155 | return Status.CORRUPT_ERROR; 156 | case C4DomainErrorCode.kC4ErrorBadRevisionID: 157 | return Status.BAD_ID; 158 | case C4DomainErrorCode.kC4ErrorAssertionFailed: 159 | break; 160 | default: 161 | break; 162 | } 163 | break; 164 | } 165 | return Status.DB_ERROR; 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /src/androidTest/java/com/couchbase/cbforest/C4ViewTest.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Created by Hideki Itakura on 10/20/2015. 3 | * Copyright (c) 2015 Couchbase, Inc All rights reserved. 4 | *

5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 6 | * except in compliance with the License. You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software distributed under the 11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 12 | * either express or implied. See the License for the specific language governing permissions 13 | * and limitations under the License. 14 | */ 15 | package com.couchbase.cbforest; 16 | 17 | import java.io.File; 18 | import java.util.Arrays; 19 | 20 | public class C4ViewTest extends C4TestCase { 21 | 22 | public static final String TAG = C4ViewTest.class.getSimpleName(); 23 | 24 | public static final String VIEW_INDEX_FILENAME = "forest_temp.view.index"; 25 | 26 | protected View view = null; 27 | protected File indexFile = null; 28 | protected Indexer indexer = null; 29 | 30 | @Override 31 | protected void setUp() throws Exception { 32 | super.setUp(); 33 | indexFile = new File(mContext.getFilesDir(), VIEW_INDEX_FILENAME); 34 | if (indexFile.exists()) { 35 | if (!indexFile.delete()) { 36 | fail(); 37 | } 38 | } 39 | view = new View(db, indexFile.getPath(), Database.Create, 0, null, "myview", "1"); 40 | assertNotNull(view); 41 | } 42 | 43 | @Override 44 | protected void tearDown() throws Exception { 45 | if (view != null) { 46 | view.delete(); 47 | view = null; 48 | } 49 | super.tearDown(); 50 | } 51 | 52 | public void testEmptyState() { 53 | assertEquals(0, view.getTotalRows()); 54 | assertEquals(0, view.getLastSequenceIndexed()); 55 | assertEquals(0, view.getLastSequenceChangedAt()); 56 | } 57 | 58 | protected void createIndex() throws ForestException { 59 | for (int i = 1; i <= 100; i++) { 60 | String docID = String.format("doc-%03d", i); 61 | createRev(docID, kRevID, kBody.getBytes()); 62 | } 63 | updateIndex(); 64 | } 65 | 66 | protected void updateIndex() throws ForestException { 67 | boolean commit = false; 68 | View[] views = {view}; 69 | indexer = new Indexer(views); 70 | try { 71 | DocumentIterator itr = indexer.iterateDocuments(); 72 | Document doc; 73 | while ((doc = itr.nextDocument()) != null) { 74 | try { 75 | // Index 'doc': 76 | Object[] keys = new Object[2]; 77 | byte[][] values = new byte[2][]; 78 | keys[0] = doc.getDocID(); 79 | keys[1] = doc.getSelectedSequence(); 80 | values[0] = "1234".getBytes(); 81 | values[1] = "1234".getBytes(); 82 | indexer.emit(doc, 0, keys, values); 83 | } finally { 84 | doc.free(); 85 | } 86 | } 87 | commit = true; 88 | } finally { 89 | indexer.endIndex(commit); 90 | } 91 | } 92 | 93 | public void testCreateIndex() throws ForestException { 94 | createIndex(); 95 | 96 | assertEquals(200, view.getTotalRows()); 97 | assertEquals(100, view.getLastSequenceIndexed()); 98 | assertEquals(100, view.getLastSequenceChangedAt()); 99 | } 100 | 101 | public void testQueryIndex() throws ForestException { 102 | createIndex(); 103 | 104 | QueryIterator e = view.query(); 105 | assertNotNull(e); 106 | 107 | int i = 0; 108 | while (e.next()) { 109 | ++i; 110 | String buff; 111 | if (i <= 100) { 112 | buff = String.format("%d", i); 113 | assertEquals(i, e.sequence()); 114 | } else { 115 | buff = String.format("\"doc-%03d\"", i - 100); 116 | assertEquals(i - 100, e.sequence()); 117 | } 118 | assertEquals(buff, new String(e.keyJSON())); 119 | assertTrue(Arrays.equals("1234".getBytes(), e.valueJSON())); 120 | } 121 | assertEquals(200, i); 122 | } 123 | 124 | public void testIndexVersion() throws ForestException { 125 | createIndex(); 126 | 127 | // Reopen view with same version string: 128 | view.close(); 129 | view = null; 130 | view = new View(db, indexFile.getPath(), Database.Create, 0, null, "myview", "1"); 131 | assertNotNull(view); 132 | 133 | assertEquals(200, view.getTotalRows()); 134 | assertEquals(100, view.getLastSequenceIndexed()); 135 | assertEquals(100, view.getLastSequenceChangedAt()); 136 | 137 | // Reopen view with different version string: 138 | view.close(); 139 | view = null; 140 | view = new View(db, indexFile.getPath(), Database.Create, 0, null, "myview", "2"); 141 | assertNotNull(view); 142 | 143 | assertEquals(0, view.getTotalRows()); 144 | assertEquals(0, view.getLastSequenceIndexed()); 145 | assertEquals(0, view.getLastSequenceChangedAt()); 146 | } 147 | 148 | public void testDocPurge() throws ForestException { 149 | testDocPurge(false); 150 | } 151 | 152 | public void testDocPurgeWithCompact() throws ForestException { 153 | testDocPurge(true); 154 | } 155 | 156 | protected void testDocPurge(boolean compactAfterPurge) throws ForestException { 157 | createIndex(); 158 | 159 | long lastIndexed = view.getLastSequenceIndexed(); 160 | long lastSeq = db.getLastSequence(); 161 | assertEquals(lastIndexed, lastSeq); 162 | 163 | boolean commit = false; 164 | db.beginTransaction(); 165 | try { 166 | db.purgeDoc("doc-023"); 167 | commit = true; 168 | } finally { 169 | db.endTransaction(commit); 170 | } 171 | 172 | if (compactAfterPurge) 173 | db.compact(); 174 | 175 | // ForestDB assigns sequences to deletions, so the purge bumped the db's sequence, 176 | // invalidating the view index: 177 | lastIndexed = view.getLastSequenceIndexed(); 178 | lastSeq = db.getLastSequence(); 179 | assertTrue(lastIndexed < lastSeq); 180 | 181 | updateIndex(); 182 | 183 | // Verify that the purged doc is no longer in the index: 184 | QueryIterator itr = view.query(); 185 | assertNotNull(itr); 186 | int i = 0; 187 | while (itr.next()) 188 | ++i; 189 | assertEquals(198, i); // 2 rows of doc-023 are gone 190 | } 191 | 192 | /** 193 | * @param odd 0 or 1 194 | */ 195 | protected void createIndex(int odd) throws ForestException { 196 | for (int i = 1; i <= 100; i++) { 197 | String docID = String.format("doc-%03d", i); 198 | createRev(docID, kRevID, kBody.getBytes()); 199 | } 200 | 201 | boolean commit = false; 202 | View[] views = {view}; 203 | indexer = new Indexer(views); 204 | try { 205 | DocumentIterator itr = indexer.iterateDocuments(); 206 | try { 207 | int i = 1; 208 | Document doc; 209 | while ((doc = itr.nextDocument()) != null) { 210 | if (i % 2 == odd) { 211 | Object[] keys = new Object[2]; 212 | byte[][] values = new byte[2][]; 213 | keys[0] = doc.getDocID(); 214 | keys[1] = doc.getSelectedSequence(); 215 | values[0] = "1234".getBytes(); 216 | values[1] = "1234".getBytes(); 217 | indexer.emit(doc, 0, keys, values); 218 | } else { 219 | indexer.emit(doc, 0, new Object[0], null); 220 | } 221 | i++; 222 | } 223 | } finally { 224 | } 225 | commit = true; 226 | } finally { 227 | indexer.endIndex(commit); 228 | } 229 | } 230 | 231 | public void testCreateIndexOdd() throws ForestException { 232 | // Index Odd number document 233 | createIndex(1); 234 | assertEquals(100, view.getTotalRows()); 235 | assertEquals(100, view.getLastSequenceIndexed()); 236 | assertEquals(99, view.getLastSequenceChangedAt()); 237 | } 238 | 239 | public void testCreateIndexEven() throws ForestException { 240 | // Index Even number document 241 | createIndex(0); 242 | assertEquals(100, view.getTotalRows()); 243 | assertEquals(100, view.getLastSequenceIndexed()); 244 | assertEquals(100, view.getLastSequenceChangedAt()); 245 | } 246 | } 247 | -------------------------------------------------------------------------------- /jni/Android.mk: -------------------------------------------------------------------------------- 1 | # File: Android.mk 2 | LOCAL_PATH := $(call my-dir) 3 | PARENT_LOCAL_PATH := $(wildcard ..) 4 | 5 | 6 | include $(CLEAR_VARS) 7 | LOCAL_MODULE := libcrypto 8 | LOCAL_SRC_FILES := $(PARENT_LOCAL_PATH)/vendor/cbforest/vendor/openssl/libs/android/$(TARGET_ARCH_ABI)/libcrypto.a 9 | include $(PREBUILT_STATIC_LIBRARY) 10 | 11 | 12 | include $(CLEAR_VARS) 13 | 14 | LOCAL_MODULE := CouchbaseLiteJavaForestDB 15 | 16 | FORESTDB_PATH := $(PARENT_LOCAL_PATH)/vendor/cbforest/vendor/forestdb 17 | SNAPPY_PATH := $(PARENT_LOCAL_PATH)/vendor/cbforest/vendor/snappy 18 | SQLITE3_PATH := $(PARENT_LOCAL_PATH)/vendor/cbforest/vendor/sqlite3-unicodesn 19 | SQLITE_INC_PATH := $(PARENT_LOCAL_PATH)/vendor/sqlite 20 | OPENSSL_PATH := $(PARENT_LOCAL_PATH)/vendor/cbforest/vendor/openssl/libs/include 21 | CBFOREST_PATH := $(PARENT_LOCAL_PATH)/vendor/cbforest/CBForest 22 | CBFOREST_C_PATH := $(PARENT_LOCAL_PATH)/vendor/cbforest/C 23 | CBFOREST_JAVA_PATH := $(PARENT_LOCAL_PATH)/vendor/cbforest/Java 24 | CBFOREST_JNI_PATH := $(PARENT_LOCAL_PATH)/vendor/cbforest/Java/jni 25 | FORESTDB_STORE_PATH := $(PARENT_LOCAL_PATH)/jni/source 26 | 27 | LOCAL_CFLAGS := -I$(SQLITE3_PATH)/libstemmer_c/runtime/ \ 28 | -I$(SQLITE3_PATH)/libstemmer_c/src_c/ \ 29 | -I$(SQLITE3_PATH)/ \ 30 | -I$(SQLITE_INC_PATH)/ \ 31 | -I$(OPENSSL_PATH)/ 32 | 33 | # For sqlite3-unicodesn 34 | LOCAL_CFLAGS += -DSQLITE_ENABLE_FTS4 \ 35 | -DSQLITE_ENABLE_FTS4_UNICODE61 \ 36 | -DWITH_STEMMER_english \ 37 | -DDOC_COMP \ 38 | -D_DOC_COMP \ 39 | -DHAVE_GCC_ATOMICS=1 \ 40 | -D_CRYPTO_OPENSSL 41 | 42 | LOCAL_CPPFLAGS := -I$(FORESTDB_PATH)/include/ \ 43 | -I$(FORESTDB_PATH)/include/libforestdb/ \ 44 | -I$(FORESTDB_PATH)/src/ \ 45 | -I$(FORESTDB_PATH)/utils/ \ 46 | -I$(FORESTDB_PATH)/option/ \ 47 | -I$(SNAPPY_PATH)/ \ 48 | -I$(OPENSSL_PATH)/ \ 49 | -I$(CBFOREST_PATH)/ \ 50 | -I$(CBFOREST_C_PATH)/ \ 51 | -I$(CBFOREST_JNI_PATH)/ \ 52 | -I$(FORESTDB_STORE_PATH)/ 53 | 54 | ifeq ($(TARGET_ARCH),mips) 55 | LOCAL_CFLAGS += -D__mips32__ 56 | LOCAL_CFLAGS += -D_ALIGN_MEM_ACCESS 57 | LOCAL_CPPFLAGS += -D_ALIGN_MEM_ACCESS 58 | endif 59 | ifeq ($(TARGET_ARCH),arm) 60 | LOCAL_CFLAGS += -D_ALIGN_MEM_ACCESS 61 | LOCAL_CPPFLAGS += -D_ALIGN_MEM_ACCESS 62 | endif 63 | 64 | # https://github.com/couchbase/couchbase-lite-java-core/issues/1437 65 | ifeq ($(TARGET_ARCH),x86) 66 | LOCAL_CFLAGS += -mstackrealign 67 | LOCAL_CPPFLAGS += -mstackrealign 68 | endif 69 | ifeq ($(TARGET_ARCH),x86_64) 70 | LOCAL_CFLAGS += -mstackrealign 71 | LOCAL_CPPFLAGS += -mstackrealign 72 | endif 73 | 74 | LOCAL_CPPFLAGS += -std=c++11 75 | LOCAL_CPPFLAGS += -fexceptions 76 | LOCAL_CPPFLAGS += -fpermissive 77 | LOCAL_CPPFLAGS += -frtti 78 | LOCAL_CPPFLAGS += -D__ANDROID__ 79 | LOCAL_CPPFLAGS += -DC4DB_THREADSAFE 80 | LOCAL_CPPFLAGS += -DFORESTDB_VERSION=\"internal\" 81 | #LOCAL_CPPFLAGS += -DNO_CBFOREST_ENCRYPTION 82 | 83 | # this requires for stdint.h active if android sdk is lower than or equal to android-19 84 | # With android-21, it seems no longer necessary. 85 | # http://stackoverflow.com/questions/986426/what-do-stdc-limit-macros-and-stdc-constant-macros-mean 86 | LOCAL_CPPFLAGS += -D__STDC_LIMIT_MACROS 87 | #LOCAL_CPPFLAGS += -g -O0 # for debugging 88 | LOCAL_CPPFLAGS += -O2 89 | LOCAL_CPPFLAGS += -Wno-unused-value 90 | LOCAL_CPPFLAGS += -Wno-deprecated-register 91 | LOCAL_CPPFLAGS += -fexceptions 92 | 93 | LOCAL_CPP_FEATURES += rtti 94 | LOCAL_CPP_FEATURES += exceptions 95 | 96 | # 97 | PCH_FILE := $(CBFOREST_PATH)/CBForest-Prefix.pch 98 | LOCAL_CPPFLAGS += -include $(PCH_FILE) 99 | 100 | LOCAL_LDLIBS := -llog 101 | LOCAL_LDLIBS += -latomic 102 | 103 | LOCAL_SRC_FILES := $(SQLITE3_PATH)/fts3_unicode2.c \ 104 | $(SQLITE3_PATH)/fts3_unicodesn.c \ 105 | $(SQLITE3_PATH)/libstemmer_c/runtime/api_sq3.c \ 106 | $(SQLITE3_PATH)/libstemmer_c/runtime/utilities_sq3.c \ 107 | $(SQLITE3_PATH)/libstemmer_c/libstemmer/libstemmer_utf8.c \ 108 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_1_danish.c \ 109 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_1_dutch.c \ 110 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_1_english.c \ 111 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_1_finnish.c \ 112 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_1_french.c \ 113 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_1_german.c \ 114 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_1_hungarian.c \ 115 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_1_italian.c \ 116 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_1_norwegian.c \ 117 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_1_porter.c \ 118 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_1_portuguese.c \ 119 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_1_spanish.c \ 120 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_1_swedish.c \ 121 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_ISO_8859_2_romanian.c \ 122 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_KOI8_R_russian.c \ 123 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_danish.c \ 124 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_dutch.c \ 125 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_english.c \ 126 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_finnish.c \ 127 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_french.c \ 128 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_german.c \ 129 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_hungarian.c \ 130 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_italian.c \ 131 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_norwegian.c \ 132 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_porter.c \ 133 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_portuguese.c \ 134 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_romanian.c \ 135 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_russian.c \ 136 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_spanish.c \ 137 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_swedish.c \ 138 | $(SQLITE3_PATH)/libstemmer_c/src_c/stem_UTF_8_turkish.c \ 139 | $(FORESTDB_PATH)/utils/crc32.cc \ 140 | $(FORESTDB_PATH)/utils/debug.cc \ 141 | $(FORESTDB_PATH)/utils/iniparser.cc \ 142 | $(FORESTDB_PATH)/utils/memleak.cc \ 143 | $(FORESTDB_PATH)/utils/partiallock.cc \ 144 | $(FORESTDB_PATH)/utils/system_resource_stats.cc \ 145 | $(FORESTDB_PATH)/utils/time_utils.cc \ 146 | $(FORESTDB_PATH)/src/api_wrapper.cc \ 147 | $(FORESTDB_PATH)/src/avltree.cc \ 148 | $(FORESTDB_PATH)/src/bgflusher.cc \ 149 | $(FORESTDB_PATH)/src/blockcache.cc \ 150 | $(FORESTDB_PATH)/src/breakpad_dummy.cc \ 151 | $(FORESTDB_PATH)/src/btree.cc \ 152 | $(FORESTDB_PATH)/src/btree_fast_str_kv.cc \ 153 | $(FORESTDB_PATH)/src/btree_kv.cc \ 154 | $(FORESTDB_PATH)/src/btree_str_kv.cc \ 155 | $(FORESTDB_PATH)/src/btreeblock.cc \ 156 | $(FORESTDB_PATH)/src/checksum.cc \ 157 | $(FORESTDB_PATH)/src/compactor.cc \ 158 | $(FORESTDB_PATH)/src/configuration.cc \ 159 | $(FORESTDB_PATH)/src/docio.cc \ 160 | $(FORESTDB_PATH)/src/encryption_aes.cc \ 161 | $(FORESTDB_PATH)/src/encryption_bogus.cc \ 162 | $(FORESTDB_PATH)/src/encryption.cc \ 163 | $(FORESTDB_PATH)/src/fdb_errors.cc \ 164 | $(FORESTDB_PATH)/src/filemgr.cc \ 165 | $(FORESTDB_PATH)/src/filemgr_ops.cc \ 166 | $(FORESTDB_PATH)/src/filemgr_ops_linux.cc \ 167 | $(FORESTDB_PATH)/src/filemgr_ops_windows.cc \ 168 | $(FORESTDB_PATH)/src/forestdb.cc \ 169 | $(FORESTDB_PATH)/src/hash.cc \ 170 | $(FORESTDB_PATH)/src/hash_functions.cc \ 171 | $(FORESTDB_PATH)/src/hbtrie.cc \ 172 | $(FORESTDB_PATH)/src/iterator.cc \ 173 | $(FORESTDB_PATH)/src/kv_instance.cc \ 174 | $(FORESTDB_PATH)/src/list.cc \ 175 | $(FORESTDB_PATH)/src/staleblock.cc \ 176 | $(FORESTDB_PATH)/src/superblock.cc \ 177 | $(FORESTDB_PATH)/src/transaction.cc \ 178 | $(FORESTDB_PATH)/src/version.cc \ 179 | $(FORESTDB_PATH)/src/wal.cc \ 180 | $(SNAPPY_PATH)/snappy.cc \ 181 | $(SNAPPY_PATH)/snappy-c.cc \ 182 | $(SNAPPY_PATH)/snappy-sinksource.cc \ 183 | $(SNAPPY_PATH)/snappy-stubs-internal.cc \ 184 | $(CBFOREST_PATH)/slice.cc \ 185 | $(CBFOREST_PATH)/sqlite_glue.c \ 186 | $(CBFOREST_PATH)/varint.cc \ 187 | $(CBFOREST_PATH)/Collatable.cc \ 188 | $(CBFOREST_PATH)/Database.cc \ 189 | $(CBFOREST_PATH)/DocEnumerator.cc \ 190 | $(CBFOREST_PATH)/Document.cc \ 191 | $(CBFOREST_PATH)/Error.cc \ 192 | $(CBFOREST_PATH)/FullTextIndex.cc \ 193 | $(CBFOREST_PATH)/Geohash.cc \ 194 | $(CBFOREST_PATH)/GeoIndex.cc \ 195 | $(CBFOREST_PATH)/Index.cc \ 196 | $(CBFOREST_PATH)/KeyStore.cc \ 197 | $(CBFOREST_PATH)/RevID.cc \ 198 | $(CBFOREST_PATH)/RevTree.cc \ 199 | $(CBFOREST_PATH)/VersionedDocument.cc \ 200 | $(CBFOREST_PATH)/MapReduceIndex.cc \ 201 | $(CBFOREST_PATH)/Tokenizer.cc \ 202 | $(CBFOREST_C_PATH)/c4.c \ 203 | $(CBFOREST_C_PATH)/c4Database.cc \ 204 | $(CBFOREST_C_PATH)/c4DocEnumerator.cc \ 205 | $(CBFOREST_C_PATH)/c4Document.cc \ 206 | $(CBFOREST_C_PATH)/c4ExpiryEnumerator.cc \ 207 | $(CBFOREST_C_PATH)/c4Key.cc \ 208 | $(CBFOREST_C_PATH)/c4View.cc \ 209 | $(CBFOREST_JNI_PATH)/native_database.cc \ 210 | $(CBFOREST_JNI_PATH)/native_document.cc \ 211 | $(CBFOREST_JNI_PATH)/native_documentiterator.cc \ 212 | $(CBFOREST_JNI_PATH)/native_glue.cc \ 213 | $(CBFOREST_JNI_PATH)/native_indexer.cc \ 214 | $(CBFOREST_JNI_PATH)/native_queryIterator.cc \ 215 | $(CBFOREST_JNI_PATH)/native_view.cc \ 216 | $(FORESTDB_STORE_PATH)/native_forestdbstore.cc 217 | 218 | LOCAL_STATIC_LIBRARIES := libcrypto 219 | 220 | include $(BUILD_SHARED_LIBRARY) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. -------------------------------------------------------------------------------- /src/androidTest/java/com/couchbase/cbforest/C4DatabaseTest.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Created by Hideki Itakura on 10/20/2015. 3 | * Copyright (c) 2015 Couchbase, Inc All rights reserved. 4 | *

5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 6 | * except in compliance with the License. You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software distributed under the 11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 12 | * either express or implied. See the License for the specific language governing permissions 13 | * and limitations under the License. 14 | */ 15 | package com.couchbase.cbforest; 16 | 17 | import java.util.ArrayList; 18 | import java.util.Arrays; 19 | import java.util.List; 20 | import java.util.Random; 21 | 22 | /** 23 | * Ported from c4DatabaseTest.cc 24 | */ 25 | public class C4DatabaseTest extends C4TestCase { 26 | 27 | public void testForestException() { 28 | try { 29 | new Database("", 0, 0, null); 30 | fail(); 31 | } catch (ForestException e) { 32 | assertEquals(Constants.C4ErrorDomain.ForestDBDomain, e.domain); 33 | assertEquals(Constants.FDBErrors.FDB_RESULT_NO_SUCH_FILE, e.code); 34 | assertEquals("no such file", e.getMessage()); 35 | } 36 | 37 | try { 38 | db.getDocument("a", true); 39 | fail(); 40 | } catch (ForestException e) { 41 | assertEquals(Constants.C4ErrorDomain.ForestDBDomain, e.domain); 42 | assertEquals(Constants.FDBErrors.FDB_RESULT_KEY_NOT_FOUND, e.code); 43 | assertEquals("key not found", e.getMessage()); 44 | } 45 | 46 | try { 47 | db.getDocument(null, true); 48 | fail(); 49 | } catch (ForestException e) { 50 | assertEquals(Constants.C4ErrorDomain.ForestDBDomain, e.domain); 51 | assertEquals(Constants.FDBErrors.FDB_RESULT_INVALID_ARGS, e.code); 52 | assertEquals("invalid arguments", e.getMessage()); 53 | } 54 | } 55 | 56 | public void testTransaction() throws ForestException { 57 | assertEquals(0, db.getDocumentCount()); 58 | assertFalse(db.isInTransaction()); 59 | db.beginTransaction(); 60 | assertTrue(db.isInTransaction()); 61 | db.beginTransaction(); 62 | assertTrue(db.isInTransaction()); 63 | db.endTransaction(true); 64 | assertTrue(db.isInTransaction()); 65 | db.endTransaction(true); 66 | assertFalse(db.isInTransaction()); 67 | } 68 | 69 | public void testCreateRawDoc() throws ForestException { 70 | // 1. normal case 71 | final String store = "test"; 72 | final String key = "key"; 73 | final String meta = "meta"; 74 | boolean commit = false; 75 | db.beginTransaction(); 76 | try { 77 | db.rawPut(store, key, meta.getBytes(), kBody.getBytes()); 78 | commit = true; 79 | } finally { 80 | db.endTransaction(commit); 81 | } 82 | byte[][] metaNbody = db.rawGet(store, key); 83 | assertNotNull(metaNbody); 84 | assertEquals(2, metaNbody.length); 85 | assertTrue(Arrays.equals(meta.getBytes(), metaNbody[0])); 86 | assertTrue(Arrays.equals(kBody.getBytes(), metaNbody[1])); 87 | } 88 | 89 | public void testCreateRawDocWithNullValue() throws ForestException { 90 | final String store = "test"; 91 | final String key = "key"; 92 | final String meta = "meta"; 93 | 94 | // 2. null meta 95 | boolean commit = false; 96 | db.beginTransaction(); 97 | try { 98 | db.rawPut(store, key, null, kBody.getBytes()); 99 | commit = true; 100 | } finally { 101 | db.endTransaction(commit); 102 | } 103 | byte[][] metaNbody = db.rawGet(store, key); 104 | assertNotNull(metaNbody); 105 | assertEquals(2, metaNbody.length); 106 | assertTrue(Arrays.equals(null, metaNbody[0])); 107 | assertTrue(Arrays.equals(kBody.getBytes(), metaNbody[1])); 108 | 109 | // 3. null body 110 | commit = false; 111 | db.beginTransaction(); 112 | try { 113 | db.rawPut(store, key, meta.getBytes(), null); 114 | commit = true; 115 | } finally { 116 | db.endTransaction(commit); 117 | } 118 | metaNbody = db.rawGet(store, key); 119 | assertNotNull(metaNbody); 120 | assertEquals(2, metaNbody.length); 121 | assertTrue(Arrays.equals(meta.getBytes(), metaNbody[0])); 122 | assertTrue(Arrays.equals(null, metaNbody[1])); 123 | 124 | // null meta and null body -> it delete 125 | commit = false; 126 | db.beginTransaction(); 127 | try { 128 | db.rawPut(store, key, null, null); 129 | commit = true; 130 | } finally { 131 | db.endTransaction(commit); 132 | } 133 | 134 | try { 135 | db.rawGet(store, key); 136 | fail("ForestException should be thrown"); 137 | } catch (ForestException e) { 138 | assertEquals(Constants.C4ErrorDomain.ForestDBDomain, e.domain); 139 | assertEquals(FDBErrors.FDB_RESULT_KEY_NOT_FOUND, e.code); 140 | } 141 | } 142 | 143 | public void testCreateVersionedDoc() throws ForestException { 144 | // Try reading doc with mustExist=true, which should fail: 145 | Document doc = null; 146 | try { 147 | doc = db.getDocument(kDocID, true); 148 | fail("Should be thrown ForestException"); 149 | } catch (ForestException e) { 150 | assertEquals(FDBErrors.FDB_RESULT_KEY_NOT_FOUND, e.code); 151 | assertEquals(C4ErrorDomain.ForestDBDomain, e.domain); 152 | } 153 | assertNull(doc); 154 | 155 | // Now get the doc with mustExist=false, which returns an empty doc: 156 | doc = db.getDocument(kDocID, false); 157 | assertNotNull(doc); 158 | assertEquals(0, doc.getFlags()); 159 | assertEquals(kDocID, doc.getDocID()); 160 | assertNull(doc.getRevID()); 161 | { 162 | boolean commit = false; 163 | db.beginTransaction(); 164 | try { 165 | doc.insertRevision(kRevID, kBody.getBytes(), false, false, false); 166 | assertEquals(kRevID, doc.getRevID()); 167 | assertEquals(kRevID, doc.getSelectedRevID()); 168 | assertEquals(C4RevisionFlags.kRevNew | C4RevisionFlags.kRevLeaf, doc.getSelectedRevFlags()); 169 | assertTrue(Arrays.equals(kBody.getBytes(), doc.getSelectedBody())); 170 | doc.save(20); // 20 is default value of maxRevTreeDepth which is defined in Database.java and ForestDBStore.java 171 | commit = true; 172 | } finally { 173 | db.endTransaction(commit); 174 | } 175 | } 176 | doc.free(); 177 | 178 | // Reload the doc: 179 | doc = db.getDocument(kDocID, true); 180 | assertNotNull(doc); 181 | assertEquals(C4DocumentFlags.kExists, doc.getFlags()); 182 | assertEquals(kDocID, doc.getDocID()); 183 | assertEquals(kRevID, doc.getRevID()); 184 | assertEquals(kRevID, doc.getSelectedRevID()); 185 | assertEquals(1, doc.getSelectedSequence()); 186 | assertTrue(Arrays.equals(kBody.getBytes(), doc.getSelectedBody())); 187 | doc.free(); 188 | 189 | // Get the doc by its sequence 190 | doc = db.getDocumentBySequence(1); 191 | assertNotNull(doc); 192 | assertEquals(C4DocumentFlags.kExists, doc.getFlags()); 193 | assertEquals(kDocID, doc.getDocID()); 194 | assertEquals(kRevID, doc.getRevID()); 195 | assertEquals(kRevID, doc.getSelectedRevID()); 196 | assertEquals(1, doc.getSelectedSequence()); 197 | assertTrue(Arrays.equals(kBody.getBytes(), doc.getSelectedBody())); 198 | } 199 | 200 | public void testCreateMultipleRevisions() throws ForestException { 201 | final String kBody2 = "{\"ok\":\"go\"}"; 202 | createRev(kDocID, kRevID, kBody.getBytes()); 203 | createRev(kDocID, kRev2ID, kBody2.getBytes()); 204 | createRev(kDocID, kRev2ID, kBody2.getBytes(), false);// test redundant insert 205 | 206 | // Reload the doc: 207 | Document doc = db.getDocument(kDocID, true); 208 | assertNotNull(doc); 209 | assertEquals(C4DocumentFlags.kExists, doc.getFlags()); 210 | assertEquals(kDocID, doc.getDocID()); 211 | assertEquals(kRev2ID, doc.getRevID()); 212 | assertEquals(kRev2ID, doc.getSelectedRevID()); 213 | assertEquals(2, doc.getSelectedSequence()); 214 | assertTrue(Arrays.equals(kBody2.getBytes(), doc.getSelectedBody())); 215 | 216 | // Select 1st revision: 217 | assertTrue(doc.selectParentRev()); 218 | assertEquals(kRevID, doc.getSelectedRevID()); 219 | assertEquals(1, doc.getSelectedSequence()); 220 | assertNull(doc.getSelectedBodyTest()); 221 | assertTrue(Arrays.equals(kBody.getBytes(), doc.getSelectedBody())); 222 | assertFalse(doc.selectParentRev()); 223 | 224 | // Compact database: 225 | db.compact(); 226 | 227 | // Reload the doc: 228 | doc = db.getDocument(kDocID, true); 229 | assertNotNull(doc); 230 | assertTrue(doc.selectParentRev()); 231 | assertEquals(kRevID, doc.getSelectedRevID()); 232 | assertEquals(1, doc.getSelectedSequence()); 233 | assertNull(doc.getSelectedBodyTest()); 234 | assertFalse(doc.hasRevisionBody()); 235 | try { 236 | doc.getSelectedBody(); 237 | fail("should be thrown exception"); 238 | } catch (ForestException e) { 239 | assertEquals(Constants.C4ErrorDomain.HTTPDomain, e.domain); 240 | assertEquals(410, e.code); 241 | } 242 | 243 | // Purge doc 244 | { 245 | boolean commit = false; 246 | db.beginTransaction(); 247 | try { 248 | int nPurged = doc.purgeRevision(kRev2ID); 249 | assertEquals(2, nPurged); 250 | doc.save(20); 251 | commit = true; 252 | } finally { 253 | db.endTransaction(commit); 254 | } 255 | } 256 | } 257 | 258 | // JNI does not wrap c4doc_getForPut 259 | //public void testGetForPut() throws ForestException { 260 | //} 261 | 262 | public void testInsertRevisionWithHistory() throws ForestException { 263 | _testInsertRevisionWithHistory(20); 264 | } 265 | 266 | public void testInsertRevisionWith512History() throws ForestException { 267 | _testInsertRevisionWithHistory(512); 268 | } 269 | 270 | public void testInsertRevisionWith1024History() throws ForestException { 271 | _testInsertRevisionWithHistory(1024); 272 | } 273 | 274 | public void _testInsertRevisionWithHistory(int kHistoryCount) throws ForestException { 275 | String kBody2 = "{\"ok\":\"go\"}"; 276 | createRev(kDocID, kRevID, kBody.getBytes()); 277 | createRev(kDocID, kRev2ID, kBody2.getBytes()); 278 | 279 | // Reload the doc: 280 | Document doc = db.getDocument(kDocID, true); 281 | 282 | // Add 18 revisions; the last two entries in the history repeat the two existing revs: 283 | Random r = new Random(); 284 | List revIDs = new ArrayList(); 285 | for (int i = kHistoryCount - 1; i >= 2; i--) { 286 | String revID = String.format("%d-%08x", i + 1, r.nextInt()); 287 | revIDs.add(revID); 288 | } 289 | revIDs.add(kRev2ID); 290 | revIDs.add(kRevID); 291 | 292 | int n; 293 | { 294 | boolean commit = false; 295 | db.beginTransaction(); 296 | try { 297 | n = doc.insertRevisionWithHistory("{\"foo\":true}".getBytes(), 298 | false, false, 299 | revIDs.toArray(new String[revIDs.size()])); 300 | commit = true; 301 | } finally { 302 | db.endTransaction(commit); 303 | } 304 | } 305 | assertEquals(kHistoryCount - 2, n); 306 | } 307 | 308 | // JNI does not wrap c4doc_put 309 | //public void testPut() throws ForestException { 310 | //} 311 | 312 | private void setupAllDocs() throws ForestException { 313 | for (int i = 1; i < 100; i++) { 314 | String docID = String.format("doc-%03d", i); 315 | createRev(docID, kRevID, kBody.getBytes()); 316 | } 317 | 318 | // Add a deleted doc to make sure it's skipped by default: 319 | createRev("doc-005DEL", kRevID, null); 320 | } 321 | 322 | public void testAllDocs() throws ForestException { 323 | setupAllDocs(); 324 | 325 | // No start or end ID: 326 | int iteratorFlags = IteratorFlags.kDefault; 327 | iteratorFlags &= ~IteratorFlags.kIncludeBodies; 328 | DocumentIterator itr = db.iterator(null, null, 0, iteratorFlags); 329 | assertNotNull(itr); 330 | Document doc; 331 | int i = 1; 332 | while ((doc = itr.nextDocument()) != null) { 333 | try { 334 | String docID = String.format("doc-%03d", i); 335 | assertEquals(docID, doc.getDocID()); 336 | assertEquals(kRevID, doc.getRevID()); 337 | assertEquals(kRevID, doc.getSelectedRevID()); 338 | assertEquals(i, doc.getSelectedSequence()); 339 | assertNull(doc.getSelectedBodyTest()); 340 | // Doc was loaded without its body, but it should load on demand: 341 | assertTrue(Arrays.equals(kBody.getBytes(), doc.getSelectedBody())); 342 | i++; 343 | } finally { 344 | doc.free(); 345 | } 346 | } 347 | assertEquals(100, i); 348 | 349 | // Start and end ID: 350 | itr = db.iterator("doc-007", "doc-090", 0, IteratorFlags.kDefault); 351 | assertNotNull(itr); 352 | i = 7; 353 | while ((doc = itr.nextDocument()) != null) { 354 | try { 355 | String docID = String.format("doc-%03d", i); 356 | assertEquals(docID, doc.getDocID()); 357 | i++; 358 | } finally { 359 | doc.free(); 360 | } 361 | } 362 | assertEquals(91, i); 363 | 364 | // Some docs, by ID: 365 | String[] docIDs = {"doc-042", "doc-007", "bogus", "doc-001"}; 366 | iteratorFlags = IteratorFlags.kDefault; 367 | iteratorFlags |= IteratorFlags.kIncludeDeleted; 368 | itr = db.iterator(docIDs, iteratorFlags); 369 | assertNotNull(itr); 370 | i = 0; 371 | while ((doc = itr.nextDocument()) != null) { 372 | try { 373 | assertEquals(docIDs[i], doc.getDocID()); 374 | assertEquals(i != 2, doc.getSelectedSequence() != 0); 375 | i++; 376 | } finally { 377 | doc.free(); 378 | } 379 | } 380 | assertEquals(4, i); 381 | } 382 | 383 | public void testAllDocsIncludeDeleted() throws ForestException { 384 | setupAllDocs(); 385 | int iteratorFlags = IteratorFlags.kDefault; 386 | iteratorFlags |= IteratorFlags.kIncludeDeleted; 387 | DocumentIterator itr = db.iterator("doc-004", "doc-007", 0, iteratorFlags); 388 | assertNotNull(itr); 389 | Document doc; 390 | int i = 4; 391 | while ((doc = itr.nextDocument()) != null) { 392 | try { 393 | String docID; 394 | if (i == 6) 395 | docID = "doc-005DEL"; 396 | else 397 | docID = String.format("doc-%03d", i >= 6 ? i - 1 : i); 398 | assertEquals(docID, doc.getDocID()); 399 | i++; 400 | } finally { 401 | doc.free(); 402 | } 403 | } 404 | assertEquals(9, i); 405 | } 406 | 407 | public void testAllDocsInfo() throws ForestException { 408 | setupAllDocs(); 409 | 410 | // No start or end ID: 411 | int iteratorFlags = IteratorFlags.kDefault; 412 | DocumentIterator itr = db.iterator(null, null, 0, iteratorFlags); 413 | assertNotNull(itr); 414 | Document doc; 415 | int i = 1; 416 | while ((doc = itr.nextDocument()) != null) { 417 | try { 418 | String docID = String.format("doc-%03d", i); 419 | assertEquals(docID, doc.getDocID()); 420 | assertEquals(kRevID, doc.getRevID()); 421 | assertEquals(kRevID, doc.getSelectedRevID()); 422 | assertEquals(i, doc.getSequence()); 423 | assertEquals(i, doc.getSelectedSequence()); 424 | assertEquals(C4DocumentFlags.kExists, doc.getFlags()); 425 | i++; 426 | } finally { 427 | doc.free(); 428 | } 429 | } 430 | assertEquals(100, i); 431 | } 432 | 433 | public void testChanges() throws ForestException { 434 | for (int i = 1; i < 100; i++) { 435 | String docID = String.format("doc-%03d", i); 436 | createRev(docID, kRevID, kBody.getBytes()); 437 | } 438 | 439 | // Since start: 440 | int iteratorFlags = IteratorFlags.kDefault; 441 | iteratorFlags &= ~IteratorFlags.kIncludeBodies; 442 | DocumentIterator itr = new DocumentIterator(db._handle, 0, iteratorFlags); 443 | assertNotNull(itr); 444 | Document doc; 445 | long seq = 1; 446 | while ((doc = itr.nextDocument()) != null) { 447 | try { 448 | String docID = String.format("doc-%03d", seq); 449 | assertEquals(docID, doc.getDocID()); 450 | assertEquals(seq, doc.getSelectedSequence()); 451 | seq++; 452 | } finally { 453 | doc.free(); 454 | } 455 | } 456 | assertEquals(100L, seq); 457 | 458 | // Since 6: 459 | itr = new DocumentIterator(db._handle, 6, iteratorFlags); 460 | assertNotNull(itr); 461 | seq = 7; 462 | while ((doc = itr.nextDocument()) != null) { 463 | try { 464 | String docID = String.format("doc-%03d", seq); 465 | assertEquals(docID, doc.getDocID()); 466 | assertEquals(seq, doc.getSelectedSequence()); 467 | seq++; 468 | } finally { 469 | doc.free(); 470 | } 471 | } 472 | assertEquals(100L, seq); 473 | } 474 | } 475 | -------------------------------------------------------------------------------- /vendor/sqlite/sqlite3ext.h: -------------------------------------------------------------------------------- 1 | /* 2 | ** 2006 June 7 3 | ** 4 | ** The author disclaims copyright to this source code. In place of 5 | ** a legal notice, here is a blessing: 6 | ** 7 | ** May you do good and not evil. 8 | ** May you find forgiveness for yourself and forgive others. 9 | ** May you share freely, never taking more than you give. 10 | ** 11 | ************************************************************************* 12 | ** This header file defines the SQLite interface for use by 13 | ** shared libraries that want to be imported as extensions into 14 | ** an SQLite instance. Shared libraries that intend to be loaded 15 | ** as extensions by SQLite should #include this file instead of 16 | ** sqlite3.h. 17 | */ 18 | #ifndef _SQLITE3EXT_H_ 19 | #define _SQLITE3EXT_H_ 20 | #include "sqlite3.h" 21 | 22 | typedef struct sqlite3_api_routines sqlite3_api_routines; 23 | 24 | /* 25 | ** The following structure holds pointers to all of the SQLite API 26 | ** routines. 27 | ** 28 | ** WARNING: In order to maintain backwards compatibility, add new 29 | ** interfaces to the end of this structure only. If you insert new 30 | ** interfaces in the middle of this structure, then older different 31 | ** versions of SQLite will not be able to load each other's shared 32 | ** libraries! 33 | */ 34 | struct sqlite3_api_routines { 35 | void * (*aggregate_context)(sqlite3_context*,int nBytes); 36 | int (*aggregate_count)(sqlite3_context*); 37 | int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*)); 38 | int (*bind_double)(sqlite3_stmt*,int,double); 39 | int (*bind_int)(sqlite3_stmt*,int,int); 40 | int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64); 41 | int (*bind_null)(sqlite3_stmt*,int); 42 | int (*bind_parameter_count)(sqlite3_stmt*); 43 | int (*bind_parameter_index)(sqlite3_stmt*,const char*zName); 44 | const char * (*bind_parameter_name)(sqlite3_stmt*,int); 45 | int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*)); 46 | int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*)); 47 | int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*); 48 | int (*busy_handler)(sqlite3*,int(*)(void*,int),void*); 49 | int (*busy_timeout)(sqlite3*,int ms); 50 | int (*changes)(sqlite3*); 51 | int (*close)(sqlite3*); 52 | int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*, 53 | int eTextRep,const char*)); 54 | int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*, 55 | int eTextRep,const void*)); 56 | const void * (*column_blob)(sqlite3_stmt*,int iCol); 57 | int (*column_bytes)(sqlite3_stmt*,int iCol); 58 | int (*column_bytes16)(sqlite3_stmt*,int iCol); 59 | int (*column_count)(sqlite3_stmt*pStmt); 60 | const char * (*column_database_name)(sqlite3_stmt*,int); 61 | const void * (*column_database_name16)(sqlite3_stmt*,int); 62 | const char * (*column_decltype)(sqlite3_stmt*,int i); 63 | const void * (*column_decltype16)(sqlite3_stmt*,int); 64 | double (*column_double)(sqlite3_stmt*,int iCol); 65 | int (*column_int)(sqlite3_stmt*,int iCol); 66 | sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol); 67 | const char * (*column_name)(sqlite3_stmt*,int); 68 | const void * (*column_name16)(sqlite3_stmt*,int); 69 | const char * (*column_origin_name)(sqlite3_stmt*,int); 70 | const void * (*column_origin_name16)(sqlite3_stmt*,int); 71 | const char * (*column_table_name)(sqlite3_stmt*,int); 72 | const void * (*column_table_name16)(sqlite3_stmt*,int); 73 | const unsigned char * (*column_text)(sqlite3_stmt*,int iCol); 74 | const void * (*column_text16)(sqlite3_stmt*,int iCol); 75 | int (*column_type)(sqlite3_stmt*,int iCol); 76 | sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol); 77 | void * (*commit_hook)(sqlite3*,int(*)(void*),void*); 78 | int (*complete)(const char*sql); 79 | int (*complete16)(const void*sql); 80 | int (*create_collation)(sqlite3*,const char*,int,void*, 81 | int(*)(void*,int,const void*,int,const void*)); 82 | int (*create_collation16)(sqlite3*,const void*,int,void*, 83 | int(*)(void*,int,const void*,int,const void*)); 84 | int (*create_function)(sqlite3*,const char*,int,int,void*, 85 | void (*xFunc)(sqlite3_context*,int,sqlite3_value**), 86 | void (*xStep)(sqlite3_context*,int,sqlite3_value**), 87 | void (*xFinal)(sqlite3_context*)); 88 | int (*create_function16)(sqlite3*,const void*,int,int,void*, 89 | void (*xFunc)(sqlite3_context*,int,sqlite3_value**), 90 | void (*xStep)(sqlite3_context*,int,sqlite3_value**), 91 | void (*xFinal)(sqlite3_context*)); 92 | int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*); 93 | int (*data_count)(sqlite3_stmt*pStmt); 94 | sqlite3 * (*db_handle)(sqlite3_stmt*); 95 | int (*declare_vtab)(sqlite3*,const char*); 96 | int (*enable_shared_cache)(int); 97 | int (*errcode)(sqlite3*db); 98 | const char * (*errmsg)(sqlite3*); 99 | const void * (*errmsg16)(sqlite3*); 100 | int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**); 101 | int (*expired)(sqlite3_stmt*); 102 | int (*finalize)(sqlite3_stmt*pStmt); 103 | void (*free)(void*); 104 | void (*free_table)(char**result); 105 | int (*get_autocommit)(sqlite3*); 106 | void * (*get_auxdata)(sqlite3_context*,int); 107 | int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**); 108 | int (*global_recover)(void); 109 | void (*interruptx)(sqlite3*); 110 | sqlite_int64 (*last_insert_rowid)(sqlite3*); 111 | const char * (*libversion)(void); 112 | int (*libversion_number)(void); 113 | void *(*malloc)(int); 114 | char * (*mprintf)(const char*,...); 115 | int (*open)(const char*,sqlite3**); 116 | int (*open16)(const void*,sqlite3**); 117 | int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); 118 | int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); 119 | void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*); 120 | void (*progress_handler)(sqlite3*,int,int(*)(void*),void*); 121 | void *(*realloc)(void*,int); 122 | int (*reset)(sqlite3_stmt*pStmt); 123 | void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*)); 124 | void (*result_double)(sqlite3_context*,double); 125 | void (*result_error)(sqlite3_context*,const char*,int); 126 | void (*result_error16)(sqlite3_context*,const void*,int); 127 | void (*result_int)(sqlite3_context*,int); 128 | void (*result_int64)(sqlite3_context*,sqlite_int64); 129 | void (*result_null)(sqlite3_context*); 130 | void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*)); 131 | void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*)); 132 | void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*)); 133 | void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*)); 134 | void (*result_value)(sqlite3_context*,sqlite3_value*); 135 | void * (*rollback_hook)(sqlite3*,void(*)(void*),void*); 136 | int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*, 137 | const char*,const char*),void*); 138 | void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*)); 139 | char * (*snprintf)(int,char*,const char*,...); 140 | int (*step)(sqlite3_stmt*); 141 | int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*, 142 | char const**,char const**,int*,int*,int*); 143 | void (*thread_cleanup)(void); 144 | int (*total_changes)(sqlite3*); 145 | void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*); 146 | int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*); 147 | void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*, 148 | sqlite_int64),void*); 149 | void * (*user_data)(sqlite3_context*); 150 | const void * (*value_blob)(sqlite3_value*); 151 | int (*value_bytes)(sqlite3_value*); 152 | int (*value_bytes16)(sqlite3_value*); 153 | double (*value_double)(sqlite3_value*); 154 | int (*value_int)(sqlite3_value*); 155 | sqlite_int64 (*value_int64)(sqlite3_value*); 156 | int (*value_numeric_type)(sqlite3_value*); 157 | const unsigned char * (*value_text)(sqlite3_value*); 158 | const void * (*value_text16)(sqlite3_value*); 159 | const void * (*value_text16be)(sqlite3_value*); 160 | const void * (*value_text16le)(sqlite3_value*); 161 | int (*value_type)(sqlite3_value*); 162 | char *(*vmprintf)(const char*,va_list); 163 | /* Added ??? */ 164 | int (*overload_function)(sqlite3*, const char *zFuncName, int nArg); 165 | /* Added by 3.3.13 */ 166 | int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); 167 | int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); 168 | int (*clear_bindings)(sqlite3_stmt*); 169 | /* Added by 3.4.1 */ 170 | int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*, 171 | void (*xDestroy)(void *)); 172 | /* Added by 3.5.0 */ 173 | int (*bind_zeroblob)(sqlite3_stmt*,int,int); 174 | int (*blob_bytes)(sqlite3_blob*); 175 | int (*blob_close)(sqlite3_blob*); 176 | int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64, 177 | int,sqlite3_blob**); 178 | int (*blob_read)(sqlite3_blob*,void*,int,int); 179 | int (*blob_write)(sqlite3_blob*,const void*,int,int); 180 | int (*create_collation_v2)(sqlite3*,const char*,int,void*, 181 | int(*)(void*,int,const void*,int,const void*), 182 | void(*)(void*)); 183 | int (*file_control)(sqlite3*,const char*,int,void*); 184 | sqlite3_int64 (*memory_highwater)(int); 185 | sqlite3_int64 (*memory_used)(void); 186 | sqlite3_mutex *(*mutex_alloc)(int); 187 | void (*mutex_enter)(sqlite3_mutex*); 188 | void (*mutex_free)(sqlite3_mutex*); 189 | void (*mutex_leave)(sqlite3_mutex*); 190 | int (*mutex_try)(sqlite3_mutex*); 191 | int (*open_v2)(const char*,sqlite3**,int,const char*); 192 | int (*release_memory)(int); 193 | void (*result_error_nomem)(sqlite3_context*); 194 | void (*result_error_toobig)(sqlite3_context*); 195 | int (*sleep)(int); 196 | void (*soft_heap_limit)(int); 197 | sqlite3_vfs *(*vfs_find)(const char*); 198 | int (*vfs_register)(sqlite3_vfs*,int); 199 | int (*vfs_unregister)(sqlite3_vfs*); 200 | int (*xthreadsafe)(void); 201 | void (*result_zeroblob)(sqlite3_context*,int); 202 | void (*result_error_code)(sqlite3_context*,int); 203 | int (*test_control)(int, ...); 204 | void (*randomness)(int,void*); 205 | sqlite3 *(*context_db_handle)(sqlite3_context*); 206 | int (*extended_result_codes)(sqlite3*,int); 207 | int (*limit)(sqlite3*,int,int); 208 | sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*); 209 | const char *(*sql)(sqlite3_stmt*); 210 | int (*status)(int,int*,int*,int); 211 | int (*backup_finish)(sqlite3_backup*); 212 | sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*); 213 | int (*backup_pagecount)(sqlite3_backup*); 214 | int (*backup_remaining)(sqlite3_backup*); 215 | int (*backup_step)(sqlite3_backup*,int); 216 | const char *(*compileoption_get)(int); 217 | int (*compileoption_used)(const char*); 218 | int (*create_function_v2)(sqlite3*,const char*,int,int,void*, 219 | void (*xFunc)(sqlite3_context*,int,sqlite3_value**), 220 | void (*xStep)(sqlite3_context*,int,sqlite3_value**), 221 | void (*xFinal)(sqlite3_context*), 222 | void(*xDestroy)(void*)); 223 | int (*db_config)(sqlite3*,int,...); 224 | sqlite3_mutex *(*db_mutex)(sqlite3*); 225 | int (*db_status)(sqlite3*,int,int*,int*,int); 226 | int (*extended_errcode)(sqlite3*); 227 | void (*log)(int,const char*,...); 228 | sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64); 229 | const char *(*sourceid)(void); 230 | int (*stmt_status)(sqlite3_stmt*,int,int); 231 | int (*strnicmp)(const char*,const char*,int); 232 | int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*); 233 | int (*wal_autocheckpoint)(sqlite3*,int); 234 | int (*wal_checkpoint)(sqlite3*,const char*); 235 | void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*); 236 | int (*blob_reopen)(sqlite3_blob*,sqlite3_int64); 237 | int (*vtab_config)(sqlite3*,int op,...); 238 | int (*vtab_on_conflict)(sqlite3*); 239 | /* Version 3.7.16 and later */ 240 | int (*close_v2)(sqlite3*); 241 | const char *(*db_filename)(sqlite3*,const char*); 242 | int (*db_readonly)(sqlite3*,const char*); 243 | int (*db_release_memory)(sqlite3*); 244 | const char *(*errstr)(int); 245 | int (*stmt_busy)(sqlite3_stmt*); 246 | int (*stmt_readonly)(sqlite3_stmt*); 247 | int (*stricmp)(const char*,const char*); 248 | int (*uri_boolean)(const char*,const char*,int); 249 | sqlite3_int64 (*uri_int64)(const char*,const char*,sqlite3_int64); 250 | const char *(*uri_parameter)(const char*,const char*); 251 | char *(*vsnprintf)(int,char*,const char*,va_list); 252 | int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*); 253 | /* Version 3.8.7 and later */ 254 | int (*auto_extension)(void(*)(void)); 255 | int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64, 256 | void(*)(void*)); 257 | int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64, 258 | void(*)(void*),unsigned char); 259 | int (*cancel_auto_extension)(void(*)(void)); 260 | int (*load_extension)(sqlite3*,const char*,const char*,char**); 261 | void *(*malloc64)(sqlite3_uint64); 262 | sqlite3_uint64 (*msize)(void*); 263 | void *(*realloc64)(void*,sqlite3_uint64); 264 | void (*reset_auto_extension)(void); 265 | void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64, 266 | void(*)(void*)); 267 | void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64, 268 | void(*)(void*), unsigned char); 269 | int (*strglob)(const char*,const char*); 270 | /* Version 3.8.11 and later */ 271 | sqlite3_value *(*value_dup)(const sqlite3_value*); 272 | void (*value_free)(sqlite3_value*); 273 | int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64); 274 | int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64); 275 | }; 276 | 277 | /* 278 | ** The following macros redefine the API routines so that they are 279 | ** redirected through the global sqlite3_api structure. 280 | ** 281 | ** This header file is also used by the loadext.c source file 282 | ** (part of the main SQLite library - not an extension) so that 283 | ** it can get access to the sqlite3_api_routines structure 284 | ** definition. But the main library does not want to redefine 285 | ** the API. So the redefinition macros are only valid if the 286 | ** SQLITE_CORE macros is undefined. 287 | */ 288 | #ifndef SQLITE_CORE 289 | #define sqlite3_aggregate_context sqlite3_api->aggregate_context 290 | #ifndef SQLITE_OMIT_DEPRECATED 291 | #define sqlite3_aggregate_count sqlite3_api->aggregate_count 292 | #endif 293 | #define sqlite3_bind_blob sqlite3_api->bind_blob 294 | #define sqlite3_bind_double sqlite3_api->bind_double 295 | #define sqlite3_bind_int sqlite3_api->bind_int 296 | #define sqlite3_bind_int64 sqlite3_api->bind_int64 297 | #define sqlite3_bind_null sqlite3_api->bind_null 298 | #define sqlite3_bind_parameter_count sqlite3_api->bind_parameter_count 299 | #define sqlite3_bind_parameter_index sqlite3_api->bind_parameter_index 300 | #define sqlite3_bind_parameter_name sqlite3_api->bind_parameter_name 301 | #define sqlite3_bind_text sqlite3_api->bind_text 302 | #define sqlite3_bind_text16 sqlite3_api->bind_text16 303 | #define sqlite3_bind_value sqlite3_api->bind_value 304 | #define sqlite3_busy_handler sqlite3_api->busy_handler 305 | #define sqlite3_busy_timeout sqlite3_api->busy_timeout 306 | #define sqlite3_changes sqlite3_api->changes 307 | #define sqlite3_close sqlite3_api->close 308 | #define sqlite3_collation_needed sqlite3_api->collation_needed 309 | #define sqlite3_collation_needed16 sqlite3_api->collation_needed16 310 | #define sqlite3_column_blob sqlite3_api->column_blob 311 | #define sqlite3_column_bytes sqlite3_api->column_bytes 312 | #define sqlite3_column_bytes16 sqlite3_api->column_bytes16 313 | #define sqlite3_column_count sqlite3_api->column_count 314 | #define sqlite3_column_database_name sqlite3_api->column_database_name 315 | #define sqlite3_column_database_name16 sqlite3_api->column_database_name16 316 | #define sqlite3_column_decltype sqlite3_api->column_decltype 317 | #define sqlite3_column_decltype16 sqlite3_api->column_decltype16 318 | #define sqlite3_column_double sqlite3_api->column_double 319 | #define sqlite3_column_int sqlite3_api->column_int 320 | #define sqlite3_column_int64 sqlite3_api->column_int64 321 | #define sqlite3_column_name sqlite3_api->column_name 322 | #define sqlite3_column_name16 sqlite3_api->column_name16 323 | #define sqlite3_column_origin_name sqlite3_api->column_origin_name 324 | #define sqlite3_column_origin_name16 sqlite3_api->column_origin_name16 325 | #define sqlite3_column_table_name sqlite3_api->column_table_name 326 | #define sqlite3_column_table_name16 sqlite3_api->column_table_name16 327 | #define sqlite3_column_text sqlite3_api->column_text 328 | #define sqlite3_column_text16 sqlite3_api->column_text16 329 | #define sqlite3_column_type sqlite3_api->column_type 330 | #define sqlite3_column_value sqlite3_api->column_value 331 | #define sqlite3_commit_hook sqlite3_api->commit_hook 332 | #define sqlite3_complete sqlite3_api->complete 333 | #define sqlite3_complete16 sqlite3_api->complete16 334 | #define sqlite3_create_collation sqlite3_api->create_collation 335 | #define sqlite3_create_collation16 sqlite3_api->create_collation16 336 | #define sqlite3_create_function sqlite3_api->create_function 337 | #define sqlite3_create_function16 sqlite3_api->create_function16 338 | #define sqlite3_create_module sqlite3_api->create_module 339 | #define sqlite3_create_module_v2 sqlite3_api->create_module_v2 340 | #define sqlite3_data_count sqlite3_api->data_count 341 | #define sqlite3_db_handle sqlite3_api->db_handle 342 | #define sqlite3_declare_vtab sqlite3_api->declare_vtab 343 | #define sqlite3_enable_shared_cache sqlite3_api->enable_shared_cache 344 | #define sqlite3_errcode sqlite3_api->errcode 345 | #define sqlite3_errmsg sqlite3_api->errmsg 346 | #define sqlite3_errmsg16 sqlite3_api->errmsg16 347 | #define sqlite3_exec sqlite3_api->exec 348 | #ifndef SQLITE_OMIT_DEPRECATED 349 | #define sqlite3_expired sqlite3_api->expired 350 | #endif 351 | #define sqlite3_finalize sqlite3_api->finalize 352 | #define sqlite3_free sqlite3_api->free 353 | #define sqlite3_free_table sqlite3_api->free_table 354 | #define sqlite3_get_autocommit sqlite3_api->get_autocommit 355 | #define sqlite3_get_auxdata sqlite3_api->get_auxdata 356 | #define sqlite3_get_table sqlite3_api->get_table 357 | #ifndef SQLITE_OMIT_DEPRECATED 358 | #define sqlite3_global_recover sqlite3_api->global_recover 359 | #endif 360 | #define sqlite3_interrupt sqlite3_api->interruptx 361 | #define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid 362 | #define sqlite3_libversion sqlite3_api->libversion 363 | #define sqlite3_libversion_number sqlite3_api->libversion_number 364 | #define sqlite3_malloc sqlite3_api->malloc 365 | #define sqlite3_mprintf sqlite3_api->mprintf 366 | #define sqlite3_open sqlite3_api->open 367 | #define sqlite3_open16 sqlite3_api->open16 368 | #define sqlite3_prepare sqlite3_api->prepare 369 | #define sqlite3_prepare16 sqlite3_api->prepare16 370 | #define sqlite3_prepare_v2 sqlite3_api->prepare_v2 371 | #define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 372 | #define sqlite3_profile sqlite3_api->profile 373 | #define sqlite3_progress_handler sqlite3_api->progress_handler 374 | #define sqlite3_realloc sqlite3_api->realloc 375 | #define sqlite3_reset sqlite3_api->reset 376 | #define sqlite3_result_blob sqlite3_api->result_blob 377 | #define sqlite3_result_double sqlite3_api->result_double 378 | #define sqlite3_result_error sqlite3_api->result_error 379 | #define sqlite3_result_error16 sqlite3_api->result_error16 380 | #define sqlite3_result_int sqlite3_api->result_int 381 | #define sqlite3_result_int64 sqlite3_api->result_int64 382 | #define sqlite3_result_null sqlite3_api->result_null 383 | #define sqlite3_result_text sqlite3_api->result_text 384 | #define sqlite3_result_text16 sqlite3_api->result_text16 385 | #define sqlite3_result_text16be sqlite3_api->result_text16be 386 | #define sqlite3_result_text16le sqlite3_api->result_text16le 387 | #define sqlite3_result_value sqlite3_api->result_value 388 | #define sqlite3_rollback_hook sqlite3_api->rollback_hook 389 | #define sqlite3_set_authorizer sqlite3_api->set_authorizer 390 | #define sqlite3_set_auxdata sqlite3_api->set_auxdata 391 | #define sqlite3_snprintf sqlite3_api->snprintf 392 | #define sqlite3_step sqlite3_api->step 393 | #define sqlite3_table_column_metadata sqlite3_api->table_column_metadata 394 | #define sqlite3_thread_cleanup sqlite3_api->thread_cleanup 395 | #define sqlite3_total_changes sqlite3_api->total_changes 396 | #define sqlite3_trace sqlite3_api->trace 397 | #ifndef SQLITE_OMIT_DEPRECATED 398 | #define sqlite3_transfer_bindings sqlite3_api->transfer_bindings 399 | #endif 400 | #define sqlite3_update_hook sqlite3_api->update_hook 401 | #define sqlite3_user_data sqlite3_api->user_data 402 | #define sqlite3_value_blob sqlite3_api->value_blob 403 | #define sqlite3_value_bytes sqlite3_api->value_bytes 404 | #define sqlite3_value_bytes16 sqlite3_api->value_bytes16 405 | #define sqlite3_value_double sqlite3_api->value_double 406 | #define sqlite3_value_int sqlite3_api->value_int 407 | #define sqlite3_value_int64 sqlite3_api->value_int64 408 | #define sqlite3_value_numeric_type sqlite3_api->value_numeric_type 409 | #define sqlite3_value_text sqlite3_api->value_text 410 | #define sqlite3_value_text16 sqlite3_api->value_text16 411 | #define sqlite3_value_text16be sqlite3_api->value_text16be 412 | #define sqlite3_value_text16le sqlite3_api->value_text16le 413 | #define sqlite3_value_type sqlite3_api->value_type 414 | #define sqlite3_vmprintf sqlite3_api->vmprintf 415 | #define sqlite3_overload_function sqlite3_api->overload_function 416 | #define sqlite3_prepare_v2 sqlite3_api->prepare_v2 417 | #define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 418 | #define sqlite3_clear_bindings sqlite3_api->clear_bindings 419 | #define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob 420 | #define sqlite3_blob_bytes sqlite3_api->blob_bytes 421 | #define sqlite3_blob_close sqlite3_api->blob_close 422 | #define sqlite3_blob_open sqlite3_api->blob_open 423 | #define sqlite3_blob_read sqlite3_api->blob_read 424 | #define sqlite3_blob_write sqlite3_api->blob_write 425 | #define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2 426 | #define sqlite3_file_control sqlite3_api->file_control 427 | #define sqlite3_memory_highwater sqlite3_api->memory_highwater 428 | #define sqlite3_memory_used sqlite3_api->memory_used 429 | #define sqlite3_mutex_alloc sqlite3_api->mutex_alloc 430 | #define sqlite3_mutex_enter sqlite3_api->mutex_enter 431 | #define sqlite3_mutex_free sqlite3_api->mutex_free 432 | #define sqlite3_mutex_leave sqlite3_api->mutex_leave 433 | #define sqlite3_mutex_try sqlite3_api->mutex_try 434 | #define sqlite3_open_v2 sqlite3_api->open_v2 435 | #define sqlite3_release_memory sqlite3_api->release_memory 436 | #define sqlite3_result_error_nomem sqlite3_api->result_error_nomem 437 | #define sqlite3_result_error_toobig sqlite3_api->result_error_toobig 438 | #define sqlite3_sleep sqlite3_api->sleep 439 | #define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit 440 | #define sqlite3_vfs_find sqlite3_api->vfs_find 441 | #define sqlite3_vfs_register sqlite3_api->vfs_register 442 | #define sqlite3_vfs_unregister sqlite3_api->vfs_unregister 443 | #define sqlite3_threadsafe sqlite3_api->xthreadsafe 444 | #define sqlite3_result_zeroblob sqlite3_api->result_zeroblob 445 | #define sqlite3_result_error_code sqlite3_api->result_error_code 446 | #define sqlite3_test_control sqlite3_api->test_control 447 | #define sqlite3_randomness sqlite3_api->randomness 448 | #define sqlite3_context_db_handle sqlite3_api->context_db_handle 449 | #define sqlite3_extended_result_codes sqlite3_api->extended_result_codes 450 | #define sqlite3_limit sqlite3_api->limit 451 | #define sqlite3_next_stmt sqlite3_api->next_stmt 452 | #define sqlite3_sql sqlite3_api->sql 453 | #define sqlite3_status sqlite3_api->status 454 | #define sqlite3_backup_finish sqlite3_api->backup_finish 455 | #define sqlite3_backup_init sqlite3_api->backup_init 456 | #define sqlite3_backup_pagecount sqlite3_api->backup_pagecount 457 | #define sqlite3_backup_remaining sqlite3_api->backup_remaining 458 | #define sqlite3_backup_step sqlite3_api->backup_step 459 | #define sqlite3_compileoption_get sqlite3_api->compileoption_get 460 | #define sqlite3_compileoption_used sqlite3_api->compileoption_used 461 | #define sqlite3_create_function_v2 sqlite3_api->create_function_v2 462 | #define sqlite3_db_config sqlite3_api->db_config 463 | #define sqlite3_db_mutex sqlite3_api->db_mutex 464 | #define sqlite3_db_status sqlite3_api->db_status 465 | #define sqlite3_extended_errcode sqlite3_api->extended_errcode 466 | #define sqlite3_log sqlite3_api->log 467 | #define sqlite3_soft_heap_limit64 sqlite3_api->soft_heap_limit64 468 | #define sqlite3_sourceid sqlite3_api->sourceid 469 | #define sqlite3_stmt_status sqlite3_api->stmt_status 470 | #define sqlite3_strnicmp sqlite3_api->strnicmp 471 | #define sqlite3_unlock_notify sqlite3_api->unlock_notify 472 | #define sqlite3_wal_autocheckpoint sqlite3_api->wal_autocheckpoint 473 | #define sqlite3_wal_checkpoint sqlite3_api->wal_checkpoint 474 | #define sqlite3_wal_hook sqlite3_api->wal_hook 475 | #define sqlite3_blob_reopen sqlite3_api->blob_reopen 476 | #define sqlite3_vtab_config sqlite3_api->vtab_config 477 | #define sqlite3_vtab_on_conflict sqlite3_api->vtab_on_conflict 478 | /* Version 3.7.16 and later */ 479 | #define sqlite3_close_v2 sqlite3_api->close_v2 480 | #define sqlite3_db_filename sqlite3_api->db_filename 481 | #define sqlite3_db_readonly sqlite3_api->db_readonly 482 | #define sqlite3_db_release_memory sqlite3_api->db_release_memory 483 | #define sqlite3_errstr sqlite3_api->errstr 484 | #define sqlite3_stmt_busy sqlite3_api->stmt_busy 485 | #define sqlite3_stmt_readonly sqlite3_api->stmt_readonly 486 | #define sqlite3_stricmp sqlite3_api->stricmp 487 | #define sqlite3_uri_boolean sqlite3_api->uri_boolean 488 | #define sqlite3_uri_int64 sqlite3_api->uri_int64 489 | #define sqlite3_uri_parameter sqlite3_api->uri_parameter 490 | #define sqlite3_uri_vsnprintf sqlite3_api->vsnprintf 491 | #define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2 492 | /* Version 3.8.7 and later */ 493 | #define sqlite3_auto_extension sqlite3_api->auto_extension 494 | #define sqlite3_bind_blob64 sqlite3_api->bind_blob64 495 | #define sqlite3_bind_text64 sqlite3_api->bind_text64 496 | #define sqlite3_cancel_auto_extension sqlite3_api->cancel_auto_extension 497 | #define sqlite3_load_extension sqlite3_api->load_extension 498 | #define sqlite3_malloc64 sqlite3_api->malloc64 499 | #define sqlite3_msize sqlite3_api->msize 500 | #define sqlite3_realloc64 sqlite3_api->realloc64 501 | #define sqlite3_reset_auto_extension sqlite3_api->reset_auto_extension 502 | #define sqlite3_result_blob64 sqlite3_api->result_blob64 503 | #define sqlite3_result_text64 sqlite3_api->result_text64 504 | #define sqlite3_strglob sqlite3_api->strglob 505 | /* Version 3.8.11 and later */ 506 | #define sqlite3_value_dup sqlite3_api->value_dup 507 | #define sqlite3_value_free sqlite3_api->value_free 508 | #define sqlite3_result_zeroblob64 sqlite3_api->result_zeroblob64 509 | #define sqlite3_bind_zeroblob64 sqlite3_api->bind_zeroblob64 510 | #endif /* SQLITE_CORE */ 511 | 512 | #ifndef SQLITE_CORE 513 | /* This case when the file really is being compiled as a loadable 514 | ** extension */ 515 | # define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0; 516 | # define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v; 517 | # define SQLITE_EXTENSION_INIT3 \ 518 | extern const sqlite3_api_routines *sqlite3_api; 519 | #else 520 | /* This case when the file is being statically linked into the 521 | ** application */ 522 | # define SQLITE_EXTENSION_INIT1 /*no-op*/ 523 | # define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */ 524 | # define SQLITE_EXTENSION_INIT3 /*no-op*/ 525 | #endif 526 | 527 | #endif /* _SQLITE3EXT_H_ */ 528 | -------------------------------------------------------------------------------- /src/main/java/com/couchbase/lite/store/ForestDBViewStore.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Created by Hideki Itakura on 10/20/2015. 3 | * Copyright (c) 2015 Couchbase, Inc All rights reserved. 4 | *

5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 6 | * except in compliance with the License. You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software distributed under the 11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 12 | * either express or implied. See the License for the specific language governing permissions 13 | * and limitations under the License. 14 | */ 15 | package com.couchbase.lite.store; 16 | 17 | import com.couchbase.cbforest.Constants; 18 | import com.couchbase.cbforest.Database; 19 | import com.couchbase.cbforest.Document; 20 | import com.couchbase.cbforest.DocumentIterator; 21 | import com.couchbase.cbforest.ForestException; 22 | import com.couchbase.cbforest.Indexer; 23 | import com.couchbase.cbforest.QueryIterator; 24 | import com.couchbase.cbforest.View; 25 | import com.couchbase.lite.CouchbaseLiteException; 26 | import com.couchbase.lite.Emitter; 27 | import com.couchbase.lite.Manager; 28 | import com.couchbase.lite.Mapper; 29 | import com.couchbase.lite.Misc; 30 | import com.couchbase.lite.Predicate; 31 | import com.couchbase.lite.QueryOptions; 32 | import com.couchbase.lite.QueryRow; 33 | import com.couchbase.lite.Reducer; 34 | import com.couchbase.lite.Status; 35 | import com.couchbase.lite.internal.RevisionInternal; 36 | import com.couchbase.lite.support.action.Action; 37 | import com.couchbase.lite.support.action.ActionBlock; 38 | import com.couchbase.lite.support.action.ActionException; 39 | import com.couchbase.lite.support.security.SymmetricKey; 40 | import com.couchbase.lite.util.Log; 41 | 42 | import java.io.File; 43 | import java.io.IOException; 44 | import java.io.UnsupportedEncodingException; 45 | import java.net.URLDecoder; 46 | import java.net.URLEncoder; 47 | import java.util.ArrayList; 48 | import java.util.HashMap; 49 | import java.util.List; 50 | import java.util.Locale; 51 | import java.util.Map; 52 | import java.util.regex.Matcher; 53 | import java.util.regex.Pattern; 54 | 55 | public class ForestDBViewStore implements ViewStore, QueryRowStore, Constants { 56 | public static String TAG = Log.TAG_VIEW; 57 | 58 | public static final String kViewIndexPathExtension = "viewindex"; 59 | private static final Pattern kViewNameRegex = Pattern.compile("^(.*)\\.viewindex(.\\d+)?$"); 60 | 61 | // Close the index db after it's inactive this many seconds 62 | private static final Float kCloseDelay = 60.0f; 63 | 64 | private static final int REDUCE_BATCH_SIZE = 100; 65 | 66 | // lock for updateIndexes method 67 | private final Object lockUpdateIndexes = new Object(); 68 | 69 | /////////////////////////////////////////////////////////////////////////// 70 | // ForestDBViewStore 71 | /////////////////////////////////////////////////////////////////////////// 72 | 73 | // public 74 | private String name; 75 | private ViewStoreDelegate delegate; 76 | 77 | // private 78 | private ForestDBStore _dbStore; 79 | private String _path; 80 | private View _view; 81 | 82 | /////////////////////////////////////////////////////////////////////////// 83 | // Constructor 84 | /////////////////////////////////////////////////////////////////////////// 85 | 86 | protected ForestDBViewStore(ForestDBStore dbStore, String name, boolean create) 87 | throws CouchbaseLiteException { 88 | this._dbStore = dbStore; 89 | this.name = name; 90 | this._path = new File(dbStore.directory, viewNameToFileName(name)).getPath(); 91 | 92 | // Somewhat of a hack: There probably won't be a file at the exact _path because ForestDB 93 | // likes to append ".0" etc., but there will be a file with a ".meta" extension: 94 | File metaFile = new File(this._path + ".meta"); 95 | if (!metaFile.exists()) { 96 | // migration: CBL Android/Java specific 97 | { 98 | // NOTE: .0, .1, etc is created by forestdb if auto compact is enabled. 99 | // renaming forestdb file name with .0 etc with different name could cause problem. 100 | // Following migration could work because forestdb filename is without .0 etc. 101 | // Once filename has .0 etc, do not rename file. 102 | 103 | // if old index file exists, rename it to new name 104 | File file = new File(this._path); 105 | File oldFile = new File(dbStore.directory, oldViewNameToFileName(name)); 106 | if (oldFile.exists() && !oldFile.equals(file)) { 107 | if (oldFile.renameTo(file)) 108 | return; 109 | // if fail to rename, delete it and create new one from scratch. 110 | oldFile.delete(); 111 | } 112 | } 113 | if (!create) 114 | throw new CouchbaseLiteException(Status.NOT_FOUND); 115 | try { 116 | openIndex(Database.Create, true); 117 | } catch (ForestException e) { 118 | throw new CouchbaseLiteException(e.code); 119 | } 120 | } 121 | } 122 | 123 | /////////////////////////////////////////////////////////////////////////// 124 | // Implementation of ViewStorage 125 | /////////////////////////////////////////////////////////////////////////// 126 | 127 | @Override 128 | public boolean rowValueIsEntireDoc(byte[] valueData) { 129 | return false; 130 | } 131 | 132 | @Override 133 | public Object parseRowValue(byte[] valueData) { 134 | return null; 135 | } 136 | 137 | @Override 138 | public Map getDocumentProperties(String docID, long sequence) { 139 | return null; 140 | } 141 | 142 | @Override 143 | public String getName() { 144 | return name; 145 | } 146 | 147 | @Override 148 | public ViewStoreDelegate getDelegate() { 149 | return delegate; 150 | } 151 | 152 | @Override 153 | public void setDelegate(ViewStoreDelegate delegate) { 154 | this.delegate = delegate; 155 | } 156 | 157 | @Override 158 | public void close() { 159 | closeIndex(); 160 | } 161 | 162 | @Override 163 | public void deleteIndex() { 164 | if (_view != null) { 165 | try { 166 | _view.eraseIndex(); 167 | } catch (ForestException e) { 168 | Log.e(TAG, "Failed to eraseIndex: " + _view); 169 | } 170 | } 171 | } 172 | 173 | @Override 174 | public void deleteView() { 175 | deleteViewFiles(); 176 | } 177 | 178 | @Override 179 | public boolean setVersion(String version) { 180 | closeIndex(); 181 | return true; 182 | } 183 | 184 | @Override 185 | public int getTotalRows() { 186 | try { 187 | openIndex(); 188 | } catch (ForestException e) { 189 | Log.e(TAG, "Exception opening index while getting total rows", e); 190 | return 0; 191 | } 192 | return (int) _view.getTotalRows(); 193 | } 194 | 195 | @Override 196 | public long getLastSequenceIndexed() { 197 | try { 198 | openIndex(); // in case the _mapVersion changed, invalidating the _view 199 | } catch (ForestException e) { 200 | Log.e(TAG, "Exception opening index while getting last sequence indexed", e); 201 | return -1; 202 | } 203 | return _view.getLastSequenceIndexed(); 204 | } 205 | 206 | @Override 207 | public long getLastSequenceChangedAt() { 208 | try { 209 | openIndex(); // in case the _mapVersion changed, invalidating the _view 210 | } catch (ForestException e) { 211 | Log.e(TAG, "Exception opening index while getting last sequence changed at", e); 212 | return -1; 213 | } 214 | return _view.getLastSequenceChangedAt(); 215 | } 216 | 217 | /** 218 | * NOTE: updateIndexes() is not thread-safe without synchronized. 219 | * see https://github.com/couchbase/couchbase-lite-java-core/issues/1363 220 | */ 221 | @Override 222 | public Status updateIndexes(List inputViews) throws CouchbaseLiteException { 223 | synchronized (lockUpdateIndexes) { 224 | assert (inputViews != null); 225 | 226 | // workaround 227 | if (!inputViews.contains(this)) 228 | inputViews.add(this); 229 | 230 | final ArrayList views = new ArrayList(inputViews.size()); 231 | final ArrayList mapBlocks = new ArrayList(inputViews.size()); 232 | final ArrayList docTypes = new ArrayList(inputViews.size()); 233 | boolean useDocType = false; 234 | for (ViewStore v : inputViews) { 235 | ForestDBViewStore view = (ForestDBViewStore) v; 236 | ViewStoreDelegate delegate = view.getDelegate(); 237 | Mapper map = delegate != null ? delegate.getMap() : null; 238 | if (map == null) { 239 | Log.v(Log.TAG_VIEW, " %s has no map block; skipping it", view.getName()); 240 | continue; 241 | } 242 | try { 243 | view.openIndex(); 244 | } catch (ForestException e) { 245 | throw new CouchbaseLiteException(ForestBridge.err2status(e)); 246 | } 247 | views.add(view._view); 248 | mapBlocks.add(map); 249 | String docType = delegate.getDocumentType(); 250 | docTypes.add(docType); 251 | if (docType != null && !useDocType) 252 | useDocType = true; 253 | } 254 | 255 | if (views.size() == 0) { 256 | Log.v(TAG, " No input views to update the index"); 257 | return new Status(Status.NOT_MODIFIED); 258 | } 259 | 260 | boolean success = false; 261 | Indexer indexer = null; 262 | try { 263 | indexer = new Indexer(views.toArray(new View[views.size()])); 264 | indexer.triggerOnView(this._view); 265 | DocumentIterator itr; 266 | try { 267 | itr = indexer.iterateDocuments(); 268 | if (itr == null) 269 | return new Status(Status.NOT_MODIFIED); 270 | } catch (ForestException e) { 271 | if (e.code == FDBErrors.FDB_RESULT_SUCCESS) 272 | return new Status(Status.NOT_MODIFIED); 273 | else 274 | throw new CouchbaseLiteException(ForestBridge.err2status(e)); 275 | } 276 | // Now enumerate the docs: 277 | Document doc; 278 | while ((doc = itr.nextDocument()) != null) { 279 | // For each updated document: 280 | try { 281 | String docType = useDocType ? doc.getType() : null; 282 | // Skip design docs 283 | boolean validDocToIndex = 284 | !doc.deleted() && !doc.getDocID().startsWith("_design/"); 285 | // Read the document body: 286 | Map body = ForestBridge.bodyOfSelectedRevision(doc); 287 | body.put("_id", doc.getDocID()); 288 | body.put("_rev", doc.getRevID()); 289 | body.put("_local_seq", doc.getSequence()); 290 | if (doc.conflicted()) { 291 | List currentRevIDs = ForestBridge.getCurrentRevisionIDs(doc); 292 | if (currentRevIDs != null && currentRevIDs.size() > 1) 293 | body.put("_conflicts", 294 | currentRevIDs.subList(1, currentRevIDs.size())); 295 | } 296 | // Feed it to each view's map function: 297 | for (int viewNumber = 0; viewNumber < views.size(); viewNumber++) { 298 | if (!indexer.shouldIndex(doc, viewNumber)) 299 | continue; 300 | 301 | boolean indexIt = validDocToIndex; 302 | if (indexIt && useDocType) { 303 | String viewDocType = docTypes.get(viewNumber); 304 | if (viewDocType != null) 305 | indexIt = viewDocType.equals(docType); 306 | } 307 | 308 | if (indexIt) 309 | emit(indexer, viewNumber, doc, body, mapBlocks.get(viewNumber)); 310 | else 311 | emit(indexer, viewNumber, doc, body, null); 312 | } 313 | } finally { 314 | doc.free(); 315 | } 316 | } 317 | success = true; 318 | } catch (ForestException e) { 319 | throw new CouchbaseLiteException(ForestBridge.err2status(e)); 320 | } finally { 321 | if (indexer != null) { 322 | try { 323 | indexer.endIndex(success); 324 | } catch (ForestException ex) { 325 | Log.e(TAG, "Failed to call Indexer.endIndex(boolean)", ex); 326 | if (success) 327 | throw new CouchbaseLiteException(ForestBridge.err2status(ex)); 328 | } 329 | } 330 | } 331 | Log.v(TAG, "... Finished re-indexing (%s)", viewNames(inputViews)); 332 | return new Status(Status.OK); 333 | } 334 | } 335 | 336 | private void emit(Indexer indexer, int viewNumber, Document doc, 337 | Map properties, Mapper mapper) 338 | throws ForestException, CouchbaseLiteException { 339 | final List keys = new ArrayList(); 340 | final List values = new ArrayList(); 341 | if (mapper != null) { 342 | try { 343 | // Set up the emit block: 344 | mapper.map(properties, new Emitter() { 345 | @Override 346 | public void emit(Object key, Object value) { 347 | if (key == null) { 348 | Log.w(Log.TAG_VIEW, "emit() called with nil key; ignoring"); 349 | return; 350 | } 351 | try { 352 | byte[] json = Manager.getObjectMapper().writeValueAsBytes(value); 353 | keys.add(key); 354 | values.add(json); 355 | } catch (Exception e) { 356 | Log.e(TAG, "Error in obj -> json", e); 357 | throw new RuntimeException(e); 358 | } 359 | } 360 | }); 361 | } catch (Throwable e) { 362 | throw new CouchbaseLiteException(e, Status.CALLBACK_ERROR); 363 | } 364 | } 365 | final byte[][] jsons = new byte[values.size()][]; 366 | for (int i = 0; i < values.size(); i++) { 367 | jsons[i] = values.get(i); 368 | } 369 | indexer.emit(doc, viewNumber, keys.toArray(), jsons); 370 | } 371 | 372 | @Override 373 | public List regularQuery(QueryOptions options) throws CouchbaseLiteException { 374 | try { 375 | openIndex(); 376 | } catch (ForestException e) { 377 | Log.e(TAG, "Exception opening index while getting total rows", e); 378 | throw new CouchbaseLiteException(e.code); 379 | } 380 | 381 | final Predicate postFilter = options.getPostFilter(); 382 | int limit = options.getLimit(); 383 | int skip = options.getSkip(); 384 | if (postFilter != null) { 385 | // #574: Custom post-filter means skip/limit apply to the filtered rows, not to the 386 | // underlying query, so handle them specially: 387 | options.setLimit(QueryOptions.QUERY_OPTIONS_DEFAULT_LIMIT); 388 | options.setSkip(0); 389 | } 390 | 391 | List rows = new ArrayList(); 392 | QueryIterator itr; 393 | try { 394 | itr = forestQuery(options); 395 | while (itr.next()) { 396 | RevisionInternal docRevision = null; 397 | byte[] bKey = itr.keyJSON(); 398 | byte[] bValue = itr.valueJSON(); 399 | Object key = fromJSON(bKey, Object.class); 400 | Object value = fromJSON(bValue, Object.class); 401 | String docID = itr.docID(); 402 | long sequence = itr.sequence(); 403 | if (options.isIncludeDocs()) { 404 | String linkedID = null; 405 | if (value instanceof Map) 406 | linkedID = (String) ((Map) value).get("_id"); 407 | Status status = new Status(); 408 | if (linkedID != null) { 409 | // http://wiki.apache.org/couchdb/Introduction_to_CouchDB_views 410 | // #Linked_documents 411 | String linkedRev = (String) ((Map) value).get("_rev"); 412 | docRevision = _dbStore.getDocument(linkedID, linkedRev, true, status); 413 | if (docRevision != null) 414 | sequence = docRevision.getSequence(); 415 | else 416 | Log.w(TAG, "Couldn't load linked doc %s rev %s: status %d", 417 | linkedID, linkedRev, status.getCode()); 418 | } else { 419 | docRevision = _dbStore.getDocument(docID, null, true, status); 420 | } 421 | } 422 | Log.v(TAG, "Query %s: Found row with key=%s, value=%s, id=%s", 423 | name, key == null ? "" : key, value == null ? "" : value, docID); 424 | // Create a CBLQueryRow: 425 | QueryRow row = new QueryRow(docID, sequence, 426 | key, value, 427 | docRevision); 428 | if (postFilter != null) { 429 | if (!postFilter.apply(row)) { 430 | continue; 431 | } 432 | if (skip > 0) { 433 | --skip; 434 | continue; 435 | } 436 | } 437 | rows.add(row); 438 | if (--limit == 0) 439 | break; 440 | } 441 | } catch (ForestException e) { 442 | Log.e(TAG, "Error in regularQuery()", e); 443 | throw new CouchbaseLiteException(e.code); 444 | } catch (IOException e) { 445 | Log.e(TAG, "Error in regularQuery()", e); 446 | throw new CouchbaseLiteException(Status.UNKNOWN); 447 | } 448 | return rows; 449 | } 450 | 451 | /** 452 | * Queries the view, with reducing or grouping as per the options. 453 | * in CBL_ForestDBViewStorage.m 454 | * - (CBLQueryIteratorBlock) reducedQueryWithOptions: (CBLQueryOptions*)options 455 | * status: (CBLStatus*)outStatus 456 | */ 457 | @Override 458 | public List reducedQuery(QueryOptions options) throws CouchbaseLiteException { 459 | Predicate postFilter = options.getPostFilter(); 460 | 461 | int groupLevel = options.getGroupLevel(); 462 | boolean group = options.isGroup() || (groupLevel > 0); 463 | Reducer reduce = delegate.getReduce(); 464 | if (options.isReduceSpecified()) { 465 | if (options.isReduce() && reduce == null) { 466 | Log.w(TAG, "Cannot use reduce option in view %s which has no reduce block defined", 467 | name); 468 | throw new CouchbaseLiteException(new Status(Status.BAD_PARAM)); 469 | } 470 | } 471 | 472 | final List keysToReduce = new ArrayList(REDUCE_BATCH_SIZE); 473 | final List valuesToReduce = new ArrayList(REDUCE_BATCH_SIZE); 474 | final Object[] lastKeys = new Object[1]; 475 | lastKeys[0] = null; 476 | final ForestDBViewStore that = this; 477 | final List rows = new ArrayList(); 478 | 479 | try { 480 | openIndex(); 481 | } catch (ForestException e) { 482 | throw new CouchbaseLiteException(e.code); 483 | } 484 | 485 | QueryIterator itr; 486 | try { 487 | itr = forestQuery(options); 488 | 489 | while (itr.next()) { 490 | byte[] bKey = itr.keyJSON(); 491 | byte[] bValue = itr.valueJSON(); 492 | Object keyObject = fromJSON(bKey, Object.class); 493 | Object valueObject = fromJSON(bValue, Object.class); 494 | if (group && !groupTogether(keyObject, lastKeys[0], groupLevel)) { 495 | if (lastKeys[0] != null) { 496 | // This pair starts a new group, so reduce & record the last one: 497 | Object key = groupKey(lastKeys[0], groupLevel); 498 | Object reduced = (reduce != null) ? 499 | reduce.reduce(keysToReduce, valuesToReduce, false) : null; 500 | QueryRow row = new QueryRow(null, 0, key, reduced, null); 501 | if (postFilter == null || postFilter.apply(row)) 502 | rows.add(row); 503 | keysToReduce.clear(); 504 | valuesToReduce.clear(); 505 | } 506 | lastKeys[0] = keyObject; 507 | } 508 | 509 | keysToReduce.add(keyObject); 510 | valuesToReduce.add(valueObject); 511 | } 512 | 513 | } catch (ForestException e) { 514 | Log.e(TAG, "Error in reducedQuery()", e); 515 | } catch (IOException e) { 516 | Log.e(TAG, "Error in reducedQuery()", e); 517 | throw new CouchbaseLiteException(Status.UNKNOWN); 518 | } 519 | 520 | if (keysToReduce != null && keysToReduce.size() > 0) { 521 | // Finish the last group (or the entire list, if no grouping): 522 | Object key = group ? groupKey(lastKeys[0], groupLevel) : null; 523 | Object reduced = (reduce != null) ? 524 | reduce.reduce(keysToReduce, valuesToReduce, false) : null; 525 | Log.v(TAG, String.format(Locale.ENGLISH, "Query %s: Reduced to key=%s, value=%s", name, key, reduced)); 526 | QueryRow row = new QueryRow(null, 0, key, reduced, null); 527 | if (postFilter == null || postFilter.apply(row)) 528 | rows.add(row); 529 | } 530 | return rows; 531 | } 532 | 533 | @Override 534 | public List> dump() { 535 | try { 536 | openIndex(); 537 | } catch (ForestException e) { 538 | Log.e(TAG, "ERROR in openIndex()", e); 539 | return null; 540 | } 541 | 542 | List> result = new ArrayList>(); 543 | try { 544 | QueryIterator itr = forestQuery(new QueryOptions()); 545 | while (itr.next()) { 546 | Map dict = new HashMap(); 547 | dict.put("key", new String(itr.keyJSON())); 548 | 549 | byte[] bytes = itr.valueJSON(); 550 | dict.put("value", fromJSON(bytes, Object.class)); 551 | dict.put("seq", itr.sequence()); 552 | result.add(dict); 553 | } 554 | } catch (Exception ex) { 555 | Log.e(TAG, "Error in dump()", ex); 556 | } 557 | return result; 558 | } 559 | 560 | @Override 561 | public void setCollation(com.couchbase.lite.View.TDViewCollation collation) { 562 | Log.w(TAG, "This method should be removed"); 563 | } 564 | 565 | /////////////////////////////////////////////////////////////////////////// 566 | // Internal (Protected/Private) Methods 567 | /////////////////////////////////////////////////////////////////////////// 568 | 569 | // Opens the index. You MUST call this (or a method that calls it) before dereferencing _view. 570 | private View openIndex() throws ForestException { 571 | return openIndex(0); 572 | } 573 | 574 | private View openIndex(int flags) throws ForestException { 575 | return openIndex(flags, false); 576 | } 577 | 578 | /** 579 | * Opens the index, specifying ForestDB database flags 580 | * in CBLView.m 581 | * - (MapReduceIndex*) openIndexWithOptions: (Database::openFlags)options 582 | */ 583 | private View openIndex(int flags, boolean dryRun) throws ForestException { 584 | if (_view == null) { 585 | // Flags: 586 | if (_dbStore.getAutoCompact()) 587 | flags |= Database.AutoCompact; 588 | 589 | // Encryption: 590 | SymmetricKey encryptionKey = _dbStore.getEncryptionKey(); 591 | int enAlgorithm = Database.NoEncryption; 592 | byte[] enKey = null; 593 | if (encryptionKey != null) { 594 | enAlgorithm = Database.AES256Encryption; 595 | enKey = encryptionKey.getKey(); 596 | } 597 | 598 | _view = new View(_dbStore.forest, _path, flags, enAlgorithm, enKey, name, 599 | dryRun ? "0" : delegate.getMapVersion()); 600 | if (dryRun) { 601 | closeIndex(); 602 | } 603 | } 604 | return _view; 605 | } 606 | 607 | /** 608 | * in CBL_ForestDBViewStorage.mm 609 | * - (void) closeIndex 610 | */ 611 | private void closeIndex() { 612 | // TODO 613 | //NSObject cancelPreviousPerformRequestsWithTarget: self selector: @selector(closeIndex) object: nil]; 614 | 615 | // NOTE: view could be busy for indexing. as result, view.close() could fail. 616 | // It requires to wait till view is not busy. CBL Java/Android waits maximum 10 seconds. 617 | for (int i = 0; i < 100 && _view != null; i++) { 618 | try { 619 | _view.close(); 620 | _view = null; 621 | } catch (ForestException e) { 622 | Log.w(TAG, "Failed to close Index: [%s] [%s]", _view, Thread.currentThread().getName()); 623 | try { 624 | Thread.sleep(100); // 100 ms (maximum wait time: 10sec) 625 | } catch (Exception ex) { 626 | } 627 | } 628 | } 629 | } 630 | 631 | private boolean deleteViewFiles() { 632 | closeIndex(); 633 | int flags = 0; 634 | if (_dbStore.getAutoCompact()) 635 | flags |= Database.AutoCompact; 636 | try { 637 | View.deleteAtPath(_path, flags); 638 | return true; 639 | } catch (ForestException e) { 640 | Log.e(TAG, "error in deleteAtPath() _path=[%s]", e, _path); 641 | return false; 642 | } 643 | } 644 | 645 | private static String viewNames(List views) { 646 | StringBuilder sb = new StringBuilder(); 647 | boolean first = true; 648 | for (ViewStore view : views) { 649 | if (first) 650 | first = false; 651 | else 652 | sb.append(", "); 653 | sb.append(view.getName()); 654 | } 655 | return sb.toString(); 656 | } 657 | 658 | /** 659 | * Starts a view query, returning a CBForest enumerator. 660 | * - (C4QueryEnumerator*) _forestQueryWithOptions: (CBLQueryOptions*)options 661 | * error: (C4Error*)outError 662 | */ 663 | private QueryIterator forestQuery(QueryOptions options) throws ForestException { 664 | // NOTE: Geo & FullText queries are not supported yet 665 | if (options == null) 666 | options = new QueryOptions(); 667 | long skip = options.getSkip(); 668 | long limit = options.getLimit(); 669 | boolean descending = options.isDescending(); 670 | boolean inclusiveStart = options.isInclusiveStart(); 671 | boolean inclusiveEnd = options.isInclusiveEnd(); 672 | if (options.getKeys() != null && options.getKeys().size() > 0) { 673 | Object[] keys = options.getKeys().toArray(); 674 | return _view.query( 675 | skip, 676 | limit, 677 | descending, 678 | inclusiveStart, 679 | inclusiveEnd, 680 | keys); 681 | } else { 682 | Object endKey = Misc.keyForPrefixMatch(options.getEndKey(), 683 | options.getPrefixMatchLevel()); 684 | Object startKey = options.getStartKey(); 685 | String startKeyDocID = options.getStartKeyDocId(); 686 | String endKeyDocID = options.getEndKeyDocId(); 687 | return _view.query( 688 | skip, 689 | limit, 690 | descending, 691 | inclusiveStart, 692 | inclusiveEnd, 693 | startKey, 694 | endKey, 695 | startKeyDocID, 696 | endKeyDocID); 697 | } 698 | } 699 | 700 | /////////////////////////////////////////////////////////////////////////// 701 | // Internal (Package) Methods 702 | /////////////////////////////////////////////////////////////////////////// 703 | 704 | Action getActionToChangeEncryptionKey() { 705 | Action action = new Action(); 706 | action.add( 707 | new ActionBlock() { 708 | @Override 709 | public void execute() throws ActionException { 710 | if (!deleteViewFiles()) { 711 | throw new ActionException("Cannot delete view files"); 712 | } 713 | } 714 | }, 715 | new ActionBlock() { 716 | @Override 717 | public void execute() throws ActionException { 718 | try { 719 | openIndex(Database.Create); 720 | } catch (ForestException e) { 721 | throw new ActionException("Cannot open index", e); 722 | } 723 | closeIndex(); 724 | } 725 | } 726 | ); 727 | return action; 728 | } 729 | 730 | /////////////////////////////////////////////////////////////////////////// 731 | // Internal (Protected/Private) Static Methods 732 | /////////////////////////////////////////////////////////////////////////// 733 | 734 | protected static String oldFileNameToViewName(String fileName) throws CouchbaseLiteException { 735 | if (!fileName.endsWith(kViewIndexPathExtension) || fileName.startsWith(".")) 736 | throw new CouchbaseLiteException(Status.BAD_PARAM); 737 | String viewName = fileName.substring(0, fileName.indexOf(".")); 738 | return viewName.replaceAll(":", "/"); 739 | } 740 | 741 | private static String oldViewNameToFileName(String viewName) throws CouchbaseLiteException { 742 | if (viewName.startsWith(".") || viewName.indexOf(":") > 0) 743 | throw new CouchbaseLiteException(Status.BAD_PARAM); 744 | return viewName.replaceAll("/", ":") + "." + kViewIndexPathExtension; 745 | } 746 | 747 | protected static String fileNameToViewName(String fileName) throws CouchbaseLiteException { 748 | Matcher m = kViewNameRegex.matcher(fileName); 749 | if (!m.matches()) 750 | throw new CouchbaseLiteException(Status.BAD_PARAM); 751 | String viewName = fileName.substring(0, fileName.indexOf(".")); 752 | return unescapeViewName(viewName); 753 | } 754 | 755 | private static String viewNameToFileName(String viewName) throws CouchbaseLiteException { 756 | if (viewName.startsWith(".") || viewName.indexOf(":") > 0) 757 | throw new CouchbaseLiteException(Status.BAD_PARAM); 758 | return escapeViewName(viewName) + "." + kViewIndexPathExtension; 759 | } 760 | 761 | private static String escapeViewName(String viewName) throws CouchbaseLiteException { 762 | try { 763 | viewName = URLEncoder.encode(viewName, "UTF-8"); 764 | } catch (UnsupportedEncodingException e) { 765 | Log.w(TAG, "Error to url decode: " + viewName, e); 766 | throw new CouchbaseLiteException(e, Status.BAD_ENCODING); 767 | } 768 | return viewName.replaceAll("\\*", "%2A"); 769 | } 770 | 771 | private static String unescapeViewName(String viewName) throws CouchbaseLiteException { 772 | viewName = viewName.replaceAll("%2A", "*"); 773 | try { 774 | return URLDecoder.decode(viewName, "UTF-8"); 775 | } catch (UnsupportedEncodingException e) { 776 | Log.w(TAG, "Error to url decode: " + viewName, e); 777 | throw new CouchbaseLiteException(e, Status.BAD_ENCODING); 778 | } 779 | } 780 | 781 | /** 782 | * Are key1 and key2 grouped together at this groupLevel? 783 | */ 784 | private static boolean groupTogether(Object key1, Object key2, int groupLevel) { 785 | if (groupLevel == 0 || !(key1 instanceof List) || !(key2 instanceof List)) { 786 | return key1.equals(key2); 787 | } 788 | @SuppressWarnings("unchecked") 789 | List key1List = (List) key1; 790 | @SuppressWarnings("unchecked") 791 | List key2List = (List) key2; 792 | 793 | // if either key list is smaller than groupLevel and the key lists are different 794 | // sizes, they cannot be equal. 795 | if ((key1List.size() < groupLevel || key2List.size() < groupLevel) && 796 | key1List.size() != key2List.size()) { 797 | return false; 798 | } 799 | 800 | int end = Math.min(groupLevel, Math.min(key1List.size(), key2List.size())); 801 | for (int i = 0; i < end; ++i) { 802 | if (key1List.get(i) != null && !key1List.get(i).equals(key2List.get(i))) 803 | return false; 804 | else if (key1List.get(i) == null && key2List.get(i) != null) 805 | return false; 806 | } 807 | return true; 808 | } 809 | 810 | /** 811 | * Returns the prefix of the key to use in the result row, at this groupLevel 812 | */ 813 | public static Object groupKey(Object key, int groupLevel) { 814 | if (groupLevel > 0 && (key instanceof List) && (((List) key).size() > groupLevel)) { 815 | return ((List) key).subList(0, groupLevel); 816 | } else { 817 | return key; 818 | } 819 | } 820 | 821 | // helper method 822 | private static T fromJSON(byte[] src, Class valueType) throws IOException { 823 | if (src == null) 824 | return null; 825 | return Manager.getObjectMapper().readValue(src, valueType); 826 | } 827 | } 828 | -------------------------------------------------------------------------------- /src/main/java/com/couchbase/lite/store/ForestDBStore.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Created by Hideki Itakura on 10/20/2015. 3 | * Copyright (c) 2015 Couchbase, Inc All rights reserved. 4 | *

5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 6 | * except in compliance with the License. You may obtain a copy of the License at 7 | *

8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | *

10 | * Unless required by applicable law or agreed to in writing, software distributed under the 11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 12 | * either express or implied. See the License for the specific language governing permissions 13 | * and limitations under the License. 14 | */ 15 | package com.couchbase.lite.store; 16 | 17 | import com.couchbase.cbforest.Constants; 18 | import com.couchbase.cbforest.Database; 19 | import com.couchbase.cbforest.Document; 20 | import com.couchbase.cbforest.DocumentIterator; 21 | import com.couchbase.cbforest.ForestException; 22 | import com.couchbase.lite.BlobKey; 23 | import com.couchbase.lite.ChangesOptions; 24 | import com.couchbase.lite.CouchbaseLiteException; 25 | import com.couchbase.lite.DocumentChange; 26 | import com.couchbase.lite.Manager; 27 | import com.couchbase.lite.Misc; 28 | import com.couchbase.lite.Predicate; 29 | import com.couchbase.lite.Query; 30 | import com.couchbase.lite.QueryOptions; 31 | import com.couchbase.lite.QueryRow; 32 | import com.couchbase.lite.ReplicationFilter; 33 | import com.couchbase.lite.Revision; 34 | import com.couchbase.lite.RevisionList; 35 | import com.couchbase.lite.Status; 36 | import com.couchbase.lite.TransactionalTask; 37 | import com.couchbase.lite.View; 38 | import com.couchbase.lite.internal.RevisionInternal; 39 | import com.couchbase.lite.support.RevisionUtils; 40 | import com.couchbase.lite.support.action.Action; 41 | import com.couchbase.lite.support.action.ActionBlock; 42 | import com.couchbase.lite.support.action.ActionException; 43 | import com.couchbase.lite.support.security.SymmetricKey; 44 | import com.couchbase.lite.util.Log; 45 | import com.couchbase.lite.util.NativeLibUtils; 46 | 47 | import java.io.File; 48 | import java.io.IOException; 49 | import java.net.URL; 50 | import java.util.ArrayList; 51 | import java.util.Collections; 52 | import java.util.Comparator; 53 | import java.util.HashMap; 54 | import java.util.HashSet; 55 | import java.util.Iterator; 56 | import java.util.List; 57 | import java.util.Locale; 58 | import java.util.Map; 59 | import java.util.Set; 60 | import java.util.concurrent.atomic.AtomicBoolean; 61 | import java.util.concurrent.atomic.AtomicInteger; 62 | 63 | import static com.couchbase.cbforest.Constants.C4ErrorDomain.ForestDBDomain; 64 | import static com.couchbase.cbforest.Constants.FDBErrors.FDB_RESULT_HANDLE_BUSY; 65 | 66 | public class ForestDBStore implements Store, EncryptableStore, Constants { 67 | 68 | public static String TAG = Log.TAG_DATABASE; 69 | 70 | private final static String NATIVE_LIB_NAME = "CouchbaseLiteJavaForestDB"; 71 | 72 | /** static constructor */ 73 | static { 74 | try { 75 | System.loadLibrary(NATIVE_LIB_NAME); 76 | } catch (UnsatisfiedLinkError e) { 77 | if (!NativeLibUtils.loadLibrary(NATIVE_LIB_NAME)) 78 | Log.e(TAG, "ERROR: Failed to load %s", NATIVE_LIB_NAME); 79 | } 80 | } 81 | 82 | public static String kDBFilename = "db.forest"; 83 | 84 | private static final int MAX_RECORDS_TO_READ_FROM_FORESTDB_AT_ONCE = 500; 85 | 86 | private static final int kDefaultMaxRevTreeDepth = 20; 87 | 88 | protected String directory; 89 | private String forestPath; 90 | private Manager manager; 91 | protected Database forest; 92 | private StoreDelegate delegate; 93 | private int maxRevTreeDepth; 94 | private boolean autoCompact; 95 | private boolean readOnly = false; 96 | private SymmetricKey encryptionKey; 97 | 98 | private ThreadLocal transactionLevel4Thread = new ThreadLocal() { 99 | @Override 100 | protected Integer initialValue() { 101 | return 0; 102 | } 103 | }; 104 | 105 | // Native method for deriving PBDDF2-SHA256 key: 106 | private static native byte[] nativeDerivePBKDF2SHA256Key( 107 | String password, byte[] salt, int rounds); 108 | 109 | /////////////////////////////////////////////////////////////////////////// 110 | // Constructor 111 | /////////////////////////////////////////////////////////////////////////// 112 | 113 | public ForestDBStore(String directory, Manager manager, StoreDelegate delegate) { 114 | assert (new File(directory).isAbsolute()); // path must be absolute 115 | this.directory = directory; 116 | File dir = new File(directory); 117 | if (!dir.exists() || !dir.isDirectory()) { 118 | throw new IllegalArgumentException( 119 | String.format(Locale.ENGLISH, "directory '%s' does not exist or not directory", directory)); 120 | } 121 | this.forestPath = new File(directory, kDBFilename).getPath(); 122 | this.manager = manager; 123 | this.delegate = delegate; 124 | 125 | this.forest = null; 126 | this.autoCompact = true; 127 | this.maxRevTreeDepth = kDefaultMaxRevTreeDepth; 128 | } 129 | 130 | /////////////////////////////////////////////////////////////////////////// 131 | // Implementation of Storage 132 | /////////////////////////////////////////////////////////////////////////// 133 | 134 | /////////////////////////////////////////////////////////////////////////// 135 | // INITIALIZATION AND CONFIGURATION: 136 | /////////////////////////////////////////////////////////////////////////// 137 | 138 | @Override 139 | public boolean databaseExists(String directory) { 140 | if (new File(directory, kDBFilename).exists()) 141 | return true; 142 | // If "db.forest" doesn't exist (auto-compaction will add numeric suffixes), check for meta: 143 | return new File(directory, kDBFilename + ".meta").exists(); 144 | } 145 | 146 | @Override 147 | public void open() throws CouchbaseLiteException { 148 | // Flag: 149 | int flags = readOnly ? Database.ReadOnly : Database.Create; 150 | if (autoCompact) 151 | flags |= Database.AutoCompact; 152 | 153 | // Encryption: 154 | int enAlgorithm = Database.NoEncryption; 155 | byte[] enKey = null; 156 | if (encryptionKey != null) { 157 | enAlgorithm = Database.AES256Encryption; 158 | enKey = encryptionKey.getKey(); 159 | } 160 | 161 | try { 162 | forest = new Database(forestPath, flags, enAlgorithm, enKey); 163 | } catch (ForestException e) { 164 | Log.e(TAG, "Failed to open the forestdb: domain=%d, error=%d", e.domain, e.code, e); 165 | if (e.domain == ForestDBDomain && 166 | (e.code == FDBErrors.FDB_RESULT_NO_DB_HEADERS || 167 | e.code == FDBErrors.FDB_RESULT_CRYPTO_ERROR)) { 168 | throw new CouchbaseLiteException("Cannot create database", e, Status.UNAUTHORIZED); 169 | } 170 | throw new CouchbaseLiteException("Cannot create database", e, Status.DB_ERROR); 171 | } 172 | } 173 | 174 | @Override 175 | public void close() { 176 | if (forest != null) { 177 | try { 178 | forest.close(); 179 | } catch (ForestException e) { 180 | Log.e(TAG, "Failed to close Database: " + forest); 181 | } 182 | forest = null; 183 | } 184 | } 185 | 186 | @Override 187 | public void setDelegate(StoreDelegate delegate) { 188 | this.delegate = delegate; 189 | } 190 | 191 | @Override 192 | public StoreDelegate getDelegate() { 193 | return delegate; 194 | } 195 | 196 | @Override 197 | public void setMaxRevTreeDepth(int maxRevTreeDepth) { 198 | this.maxRevTreeDepth = maxRevTreeDepth; 199 | } 200 | 201 | @Override 202 | public int getMaxRevTreeDepth() { 203 | return maxRevTreeDepth; 204 | } 205 | 206 | @Override 207 | public void setAutoCompact(boolean value) { 208 | autoCompact = value; 209 | } 210 | 211 | @Override 212 | public boolean getAutoCompact() { 213 | return autoCompact; 214 | } 215 | 216 | /////////////////////////////////////////////////////////////////////////// 217 | // DATABASE ATTRIBUTES & OPERATIONS: 218 | /////////////////////////////////////////////////////////////////////////// 219 | 220 | // #pragma mark - INFO FOR KEY: 221 | 222 | /** 223 | * TODO return value from long to Status 224 | */ 225 | @Override 226 | public long setInfo(String key, String info) { 227 | final String k = key; 228 | final String i = info; 229 | try { 230 | Status status = inTransaction(new Task() { 231 | @Override 232 | public Status run() { 233 | try { 234 | forest.rawPut("info", k, null, i == null ? null : i.getBytes()); 235 | return new Status(Status.OK); 236 | } catch (ForestException e) { 237 | Log.e(TAG, "Error in KeyStoreWriter.set()", e); 238 | return ForestBridge.err2status(e); 239 | } 240 | } 241 | }); 242 | return status.getCode(); 243 | } catch (Exception e) { 244 | Log.e(TAG, "Exception in setInfo()", e); 245 | return Status.UNKNOWN; 246 | } 247 | } 248 | 249 | @Override 250 | public String getInfo(String key) { 251 | try { 252 | byte[][] metaNbody = forest.rawGet("info", key); 253 | return new String(metaNbody[1]); 254 | } catch (ForestException e) { 255 | // KEY NOT FOUND 256 | if (e.domain == ForestDBDomain && 257 | e.code == FDBErrors.FDB_RESULT_KEY_NOT_FOUND) { 258 | Log.i(TAG, "[getInfo()] Key(\"%s\") is not found.", key); 259 | } 260 | // UNEXPECTED ERROR 261 | else { 262 | Log.e(TAG, "[getInfo()] Unexpected Error", e); 263 | } 264 | return null; 265 | } 266 | } 267 | 268 | @Override 269 | public int getDocumentCount() { 270 | return (int) forest.getDocumentCount(); 271 | } 272 | 273 | @Override 274 | public long getLastSequence() { 275 | return forest.getLastSequence(); 276 | } 277 | 278 | @Override 279 | public boolean inTransaction() { 280 | return transactionLevel4Thread.get() > 0; 281 | } 282 | 283 | @Override 284 | public void compact() throws CouchbaseLiteException { 285 | try { 286 | forest.compact(); 287 | } catch (ForestException e) { 288 | Log.e(TAG, "Failed to compact(): domain=%d code=%d", e, e.domain, e.code); 289 | throw new CouchbaseLiteException(Status.UNKNOWN); 290 | } 291 | } 292 | 293 | /** 294 | * @note Throw RuntimeException if TransactionalTask throw Exception. 295 | * Otherwise return true or false 296 | */ 297 | @Override 298 | public boolean runInTransaction(TransactionalTask task) { 299 | if (inTransaction()) 300 | return task.run(); 301 | else { 302 | if (!beginTransaction()) 303 | return false; 304 | boolean commit = true; 305 | try { 306 | commit = task.run(); 307 | } catch (Exception e) { 308 | commit = false; 309 | Log.e(TAG, "[ForestDBStore.runInTransaction()] Error in TransactionalTask", e); 310 | throw new RuntimeException(e); 311 | } finally { 312 | if (!endTransaction(commit)) 313 | return false; 314 | } 315 | return commit; 316 | } 317 | } 318 | 319 | @Override 320 | public RevisionInternal getDocument(String docID, String inRevID, boolean withBody, Status outStatus) { 321 | Document doc = getDocument(docID); 322 | if (doc == null) 323 | return null; 324 | try { 325 | Status res = selectRev(doc, inRevID, withBody); 326 | outStatus.setCode(res.getCode()); 327 | if (outStatus.isError() && outStatus.getCode() != Status.GONE) 328 | return null; 329 | if (inRevID == null && doc.selectedRevDeleted()) { 330 | outStatus.setCode(Status.DELETED); 331 | return null; 332 | } 333 | return ForestBridge.revisionObject(doc, docID, inRevID, withBody); 334 | } finally { 335 | doc.free(); 336 | } 337 | } 338 | 339 | private Document getDocument(String docID) { 340 | try { 341 | return _getDocument(docID); 342 | } catch (CouchbaseLiteException e) { 343 | return null; 344 | } 345 | } 346 | 347 | /** 348 | * @note return value should not be null. 349 | */ 350 | private Document _getDocument(String docID) throws CouchbaseLiteException { 351 | Document doc; 352 | try { 353 | doc = forest.getDocument(docID, true); 354 | } catch (ForestException e) { 355 | Log.d(TAG, "ForestDB Warning: getDocument(docID, true) docID=[%s] error=[%s]", 356 | docID, e.toString()); 357 | throw new CouchbaseLiteException(ForestBridge.err2status(e)); 358 | } 359 | if (!doc.exists()) { 360 | doc.free(); 361 | throw new CouchbaseLiteException(Status.NOT_FOUND); 362 | } 363 | return doc; 364 | } 365 | 366 | private Status selectRev(Document doc, String revID, boolean withBody) { 367 | Status status = new Status(Status.OK); 368 | if (revID != null) { 369 | try { 370 | doc.selectRevID(revID, withBody); 371 | } catch (ForestException e) { 372 | status = ForestBridge.err2status(e); 373 | } 374 | } else { 375 | if (!doc.selectCurrentRev()) 376 | status = new Status(Status.DELETED); 377 | } 378 | return status; 379 | } 380 | 381 | @Override 382 | public RevisionInternal loadRevisionBody(RevisionInternal rev) 383 | throws CouchbaseLiteException { 384 | Document doc = _getDocument(rev.getDocID()); 385 | Status status = selectRev(doc, rev.getRevID(), true); 386 | if (status.isError()) 387 | throw new CouchbaseLiteException(status); 388 | status = ForestBridge.loadBodyOfRevisionObject(rev, doc); 389 | if (status.isError()) 390 | throw new CouchbaseLiteException(status); 391 | return rev; 392 | } 393 | 394 | @Override 395 | public RevisionInternal getParentRevision(RevisionInternal rev) { 396 | if (rev.getDocID() == null || rev.getRevID() == null) 397 | return null; 398 | Document doc = getDocument(rev.getDocID()); 399 | if (doc == null) 400 | return null; 401 | try { 402 | Status status = selectRev(doc, rev.getRevID(), true); 403 | if (status.isError()) 404 | return null; 405 | if (!doc.selectParentRev()) 406 | return null; 407 | return ForestBridge.revisionObject(doc, rev.getDocID(), null, true); 408 | } finally { 409 | doc.free(); 410 | } 411 | } 412 | 413 | // TODO: Set ancestorRevIDs as additional parameter 414 | @Override 415 | public List getRevisionHistory(RevisionInternal rev) { 416 | Document doc = getDocument(rev.getDocID()); 417 | if (doc == null) 418 | return null; 419 | try { 420 | try { 421 | if (!doc.selectRevID(rev.getRevID(), false)) 422 | return null; 423 | } catch (ForestException e) { 424 | Log.e(TAG, "Error in getRevisionHistory() rev=" + rev, e); 425 | return null; 426 | } 427 | List history = new ArrayList(); 428 | do { 429 | RevisionInternal ancestor = ForestBridge.revisionObject( 430 | doc, rev.getDocID(), null, false); 431 | if (ancestor == null) 432 | break; 433 | ancestor.setMissing(!doc.hasRevisionBody()); 434 | history.add(ancestor); 435 | // TODO: implement once ancestorRevIDs param is given. 436 | //if(ancestorRevIDs!=null&&ancestorRevIDs.contains(ancestor.getRevID())) 437 | // break; 438 | } while (doc.selectParentRev()); 439 | return history; 440 | } finally { 441 | doc.free(); 442 | } 443 | } 444 | 445 | @Override 446 | public RevisionList getAllRevisions(String docID, boolean onlyCurrent) { 447 | Document doc = getDocument(docID); 448 | if (doc == null) 449 | return null; 450 | try { 451 | RevisionList revs = new RevisionList(); 452 | do { 453 | if (onlyCurrent && !doc.selectedRevLeaf()) 454 | continue; 455 | RevisionInternal rev = ForestBridge.revisionObject(doc, docID, null, false); 456 | if (rev != null) 457 | revs.add(rev); 458 | } while (doc.selectNextRev()); 459 | return revs; 460 | } finally { 461 | doc.free(); 462 | } 463 | } 464 | 465 | @Override 466 | public List getPossibleAncestorRevisionIDs(RevisionInternal rev, 467 | int limit, 468 | AtomicBoolean outHaveBodies, 469 | boolean withBodiesOnly) { 470 | int generation = RevisionInternal.generationFromRevID(rev.getRevID()); 471 | if (generation <= 1) 472 | return null; 473 | 474 | Document doc = getDocument(rev.getDocID()); 475 | if (doc == null) 476 | return null; 477 | try { 478 | if (outHaveBodies != null) outHaveBodies.set(true); 479 | List revIDs = new ArrayList(); 480 | for (int leaf = 1; leaf >= 0; --leaf) { 481 | doc.selectCurrentRev(); 482 | do { 483 | String revID = doc.getSelectedRevID(); 484 | int revFlags = (int) doc.getSelectedRevFlags(); 485 | if (((revFlags & C4RevisionFlags.kRevLeaf) != 0) == (leaf == 1 ? true : false) && 486 | RevisionInternal.generationFromRevID(revID) < generation) { 487 | 488 | if (outHaveBodies != null && !doc.hasRevisionBody()) { 489 | outHaveBodies.set(false); 490 | if (withBodiesOnly) 491 | continue; 492 | } 493 | if (withBodiesOnly) { 494 | byte[] body = null; 495 | try { 496 | body = doc.getSelectedBody(); 497 | } catch (ForestException e) { 498 | Log.e(TAG, e.toString(), e); 499 | } 500 | if (body != null && body.length > 0) { 501 | Map props = getDocProperties(body); 502 | if (props != null && props.containsKey("_removed") && (Boolean) props.get("_removed") == true) 503 | continue; 504 | } 505 | } 506 | 507 | revIDs.add(revID); 508 | if (limit > 0 && revIDs.size() >= limit) 509 | break; 510 | } 511 | } while (doc.selectNextRev()); 512 | if (revIDs.size() > 0) 513 | return revIDs; 514 | } 515 | } finally { 516 | doc.free(); 517 | } 518 | return null; 519 | } 520 | 521 | @Override 522 | public int findMissingRevisions(RevisionList revs) { 523 | int numRevisionsRemoved = 0; 524 | if (revs.size() == 0) 525 | return numRevisionsRemoved; 526 | 527 | RevisionList sortedRevs = (RevisionList) revs.clone(); 528 | sortedRevs.sortByDocID(); 529 | 530 | Document doc = null; 531 | String lastDocID = null; 532 | for (int i = 0; i < sortedRevs.size(); i++) { 533 | RevisionInternal rev = sortedRevs.get(i); 534 | if (!rev.getDocID().equals(lastDocID)) { 535 | lastDocID = rev.getDocID(); 536 | if (doc != null) 537 | doc.free(); 538 | try { 539 | doc = forest.getDocument(rev.getDocID(), true); 540 | } catch (ForestException e) { 541 | Status status = ForestBridge.err2status(e); 542 | if (status.getCode() != Status.NOT_FOUND) 543 | Log.e(TAG, "Error in getDocument() docID=" + rev.getDocID(), e); 544 | doc = null; 545 | } 546 | } 547 | try { 548 | if (doc != null && doc.selectRevID(rev.getRevID(), false)) { 549 | revs.remove(rev); // not missing, so remove from list 550 | numRevisionsRemoved += 1; 551 | } 552 | } catch (ForestException e) { 553 | // ignore 554 | } 555 | } 556 | if (doc != null) 557 | doc.free(); 558 | return numRevisionsRemoved; 559 | } 560 | 561 | @Override 562 | public String findCommonAncestorOf(RevisionInternal rev, List revIDs) { 563 | long generation = Revision.generationFromRevID(rev.getRevID()); 564 | if (generation <= 1 || (revIDs == null || revIDs.size() == 0)) 565 | return null; 566 | Collections.sort(revIDs, new Comparator() { 567 | @Override 568 | public int compare(String id1, String id2) { 569 | // descending order of generation 570 | return RevisionInternal.CBLCompareRevIDs(id2, id1); 571 | } 572 | }); 573 | Document doc = getDocument(rev.getDocID()); 574 | if (doc == null) 575 | return null; 576 | String commonAncestor = null; 577 | try { 578 | for (String possibleRevID : revIDs) { 579 | if (Revision.generationFromRevID(possibleRevID) <= generation) { 580 | try { 581 | if (doc.selectRevID(possibleRevID, false)) 582 | commonAncestor = possibleRevID; 583 | } catch (ForestException e) { 584 | Log.i(TAG, "Error in Document.selectRevID() revID=%s", e, possibleRevID); 585 | } 586 | if (commonAncestor != null) 587 | break; 588 | } 589 | } 590 | } finally { 591 | doc.free(); 592 | } 593 | return commonAncestor; 594 | } 595 | 596 | @Override 597 | public Set findAllAttachmentKeys() throws CouchbaseLiteException { 598 | Set keys = new HashSet(); 599 | try { 600 | DocumentIterator itr = forest.iterator(null, null, 0, IteratorFlags.kDefault); 601 | Document doc; 602 | while ((doc = itr.nextDocument()) != null) { 603 | try { 604 | if (!doc.hasAttachments() || (doc.deleted() && !doc.conflicted())) 605 | continue; 606 | // Since db is assumed to have just been compacted, 607 | // we know that non-current revisions 608 | // won't have any bodies. So only scan the current revs. 609 | do { 610 | if (doc.selectedRevHasAttachments()) { 611 | byte[] body = doc.getSelectedBody(); 612 | if (body != null && body.length > 0) { 613 | Map props = getDocProperties(body); 614 | if (props != null && props.containsKey("_attachments")) { 615 | Map attachments = 616 | (Map) props.get("_attachments"); 617 | Iterator itr2 = attachments.keySet().iterator(); 618 | while (itr2.hasNext()) { 619 | String name = itr2.next(); 620 | Map attachment = 621 | (Map) attachments.get(name); 622 | if (attachment != null && 623 | attachment.containsKey("digest")) { 624 | String digest = (String) attachment.get("digest"); 625 | if (digest != null) { 626 | keys.add(new BlobKey(digest)); 627 | } 628 | } 629 | } 630 | } 631 | } 632 | } 633 | } while (doc.selectNextLeaf(false, false)); 634 | } finally { 635 | doc.free(); 636 | } 637 | } 638 | } catch (ForestException e) { 639 | throw new CouchbaseLiteException(ForestBridge.err2status(e)); 640 | } 641 | return keys; 642 | } 643 | 644 | @Override 645 | public Map getAllDocs(QueryOptions options) throws CouchbaseLiteException { 646 | Map result = new HashMap(); 647 | List rows; 648 | 649 | if (options == null) 650 | options = new QueryOptions(); 651 | 652 | boolean includeDocs = (options.isIncludeDocs() || 653 | options.getPostFilter() != null || 654 | options.getAllDocsMode() == Query.AllDocsMode.SHOW_CONFLICTS); 655 | boolean includeDeletedDocs = options.getAllDocsMode() == Query.AllDocsMode.INCLUDE_DELETED; 656 | int limit = options.getLimit(); 657 | int skip = options.getSkip(); 658 | Predicate filter = options.getPostFilter(); 659 | 660 | int iteratorFlags = IteratorFlags.kDefault; 661 | if (!includeDocs) 662 | iteratorFlags &= ~IteratorFlags.kIncludeBodies; 663 | if (options.isDescending()) 664 | iteratorFlags |= IteratorFlags.kDescending; 665 | if (!options.isInclusiveStart()) 666 | iteratorFlags &= ~IteratorFlags.kInclusiveStart; 667 | if (!options.isInclusiveEnd()) 668 | iteratorFlags &= ~IteratorFlags.kInclusiveEnd; 669 | if (includeDeletedDocs) 670 | iteratorFlags |= IteratorFlags.kIncludeDeleted; 671 | // TODO: kCBLOnlyConflicts 672 | 673 | if (options.getKeys() != null) { 674 | rows = new ArrayList(); 675 | iteratorFlags |= IteratorFlags.kIncludeDeleted; 676 | int total = options.getKeys().size(); 677 | int read = 0; 678 | while (total > read) { // loop till consume all requested docIDs 679 | int plan = Math.min(total - read, MAX_RECORDS_TO_READ_FROM_FORESTDB_AT_ONCE); 680 | String[] docIDs = options.getKeys().subList(read, read + plan).toArray(new String[plan]); 681 | try { 682 | DocumentIterator itr = forest.iterator(docIDs, iteratorFlags); 683 | try { 684 | List retRows = readFromIterator(itr, options, includeDocs, filter, limit); 685 | rows.addAll(retRows); 686 | limit -= retRows.size(); 687 | read += plan; // number of attempted doc IDs as retRows.size() could be smaller than plan. 688 | } finally { 689 | if (itr != null) 690 | itr.close(); 691 | } 692 | } catch (ForestException e) { 693 | if (e.domain == ForestDBDomain && e.code == FDB_RESULT_HANDLE_BUSY) { 694 | Log.w(TAG, "ForestDB handle is busy, retry it after 300ms. error=%s", e.toString()); 695 | try { 696 | Thread.sleep(300); // 300 ms 697 | } catch (InterruptedException ie) { 698 | } 699 | continue; 700 | } else { 701 | Log.e(TAG, "Error in getAllDocs()", e); 702 | return null; 703 | } 704 | } 705 | } 706 | } else { 707 | String startKey; 708 | String endKey; 709 | if (options.isDescending()) { 710 | startKey = (String) View.keyForPrefixMatch( 711 | options.getStartKey(), options.getPrefixMatchLevel()); 712 | endKey = (String) options.getEndKey(); 713 | } else { 714 | startKey = (String) options.getStartKey(); 715 | endKey = (String) View.keyForPrefixMatch( 716 | options.getEndKey(), options.getPrefixMatchLevel()); 717 | } 718 | try { 719 | DocumentIterator itr = forest.iterator(startKey, endKey, skip, iteratorFlags); 720 | try { 721 | rows = readFromIterator(itr, options, includeDocs, filter, limit); 722 | } finally { 723 | if (itr != null) 724 | itr.close(); 725 | } 726 | } catch (ForestException e) { 727 | Log.e(TAG, "Error in getAllDocs()", e); 728 | return null; 729 | } 730 | } 731 | 732 | result.put("rows", rows); 733 | result.put("total_rows", rows.size()); 734 | result.put("offset", options.getSkip()); 735 | return result; 736 | } 737 | 738 | private static List readFromIterator(DocumentIterator itr, 739 | QueryOptions options, 740 | boolean includeDocs, 741 | Predicate filter, 742 | int limit) throws ForestException { 743 | List rows = new ArrayList(); 744 | Document doc; 745 | while ((doc = itr.nextDocument()) != null) { 746 | try { 747 | String docID = doc.getDocID(); 748 | if (!doc.exists()) { 749 | Log.v(TAG, "AllDocs: No such row with key=\"%s\"", docID); 750 | QueryRow row = new QueryRow(null, 0, docID, null, null); 751 | rows.add(row); 752 | continue; 753 | } 754 | 755 | boolean deleted = doc.deleted(); 756 | if (deleted && 757 | options.getAllDocsMode() != Query.AllDocsMode.INCLUDE_DELETED && 758 | options.getKeys() == null) 759 | continue; // skip deleted doc 760 | if (!doc.conflicted() && 761 | options.getAllDocsMode() == Query.AllDocsMode.ONLY_CONFLICTS) 762 | continue; // skip non-conflicted doc 763 | 764 | String revID = doc.getSelectedRevID(); 765 | long sequence = doc.getSelectedSequence(); 766 | 767 | RevisionInternal docRevision = null; 768 | if (includeDocs) { 769 | // Fill in the document contents: 770 | docRevision = ForestBridge.revisionObject(doc, docID, revID, true); 771 | if (docRevision == null) 772 | Log.w(TAG, "AllDocs: Unable to read body of doc %s", docID); 773 | } 774 | 775 | List conflicts = new ArrayList(); 776 | if ((options.getAllDocsMode() == Query.AllDocsMode.SHOW_CONFLICTS 777 | || options.getAllDocsMode() == Query.AllDocsMode.ONLY_CONFLICTS) 778 | && doc.conflicted()) { 779 | conflicts = ForestBridge.getCurrentRevisionIDs(doc); 780 | if (conflicts != null && conflicts.size() == 1) 781 | conflicts = null; 782 | } 783 | 784 | Map value = new HashMap(); 785 | value.put("rev", revID); 786 | if (deleted) // Note: In case of false, should not add for java 787 | value.put("deleted", (deleted ? true : null)); 788 | value.put("_conflicts", conflicts);// (not found in CouchDB) 789 | 790 | QueryRow row = new QueryRow(docID, 791 | sequence, 792 | docID, 793 | value, 794 | docRevision); 795 | if (filter != null && !filter.apply(row)) { 796 | Log.v(TAG, " ... on 2nd thought, filter predicate skipped that row"); 797 | continue; 798 | } 799 | rows.add(row); 800 | 801 | if (limit > 0 && --limit == 0) 802 | break; 803 | } finally { 804 | if (doc != null) 805 | doc.free(); 806 | } 807 | } 808 | return rows; 809 | } 810 | 811 | @Override 812 | public RevisionList changesSince(long lastSequence, 813 | ChangesOptions options, 814 | ReplicationFilter filter, 815 | Map filterParams) { 816 | // http://wiki.apache.org/couchdb/HTTP_database_API#Changes 817 | if (options == null) 818 | options = new ChangesOptions(); 819 | 820 | boolean withBody = (options.isIncludeDocs() || filter != null); 821 | int limit = options.getLimit(); 822 | 823 | RevisionList changes = new RevisionList(); 824 | try { 825 | int iteratorFlags = IteratorFlags.kDefault; 826 | iteratorFlags |= IteratorFlags.kIncludeDeleted; 827 | DocumentIterator itr = forest.iterateChanges(lastSequence, iteratorFlags); 828 | try { 829 | Document doc; 830 | while (limit-- > 0 && (doc = itr.nextDocument()) != null) { 831 | try { 832 | Log.v(TAG, "[changesSince()] docID=%s seq=%d conflicted=%s", 833 | doc.getDocID(), doc.getSelectedSequence(), doc.conflicted()); 834 | String docID = doc.getDocID(); 835 | do { 836 | RevisionInternal rev = ForestBridge.revisionObject( 837 | doc, docID, null, withBody); 838 | if (rev == null) 839 | return null; 840 | if (filter == null || delegate.runFilter(filter, filterParams, rev)) { 841 | if (!options.isIncludeDocs()) 842 | rev.setBody(null); 843 | changes.add(rev); 844 | } 845 | } 846 | while (options.isIncludeConflicts() && doc.selectNextLeaf(true, withBody)); 847 | } finally { 848 | doc.free(); 849 | } 850 | } 851 | } finally { 852 | } 853 | } catch (ForestException e) { 854 | Log.e(TAG, "Error in changesSince()", e); 855 | return null; 856 | } 857 | return changes; 858 | } 859 | 860 | /////////////////////////////////////////////////////////////////////////// 861 | // INSERTION / DELETION: 862 | /////////////////////////////////////////////////////////////////////////// 863 | 864 | @Override 865 | public RevisionInternal add(String inDocID, 866 | String inPrevRevID, 867 | Map properties, 868 | boolean deleting, 869 | boolean allowConflict, 870 | StorageValidation validationBlock, 871 | Status outStatus) 872 | throws CouchbaseLiteException { 873 | if (outStatus != null) 874 | outStatus.setCode(Status.OK); 875 | 876 | if (readOnly) 877 | throw new CouchbaseLiteException(Status.FORBIDDEN); 878 | 879 | RevisionInternal putRev = null; 880 | DocumentChange change = null; 881 | 882 | // TODO: need to implement JNI for c4doc_put() 883 | // TODO: use inTransaction(Task) 884 | if (!beginTransaction()) 885 | throw new CouchbaseLiteException(Status.DB_ERROR); 886 | try { 887 | String docID = inDocID; 888 | String prevRevID = inPrevRevID; 889 | 890 | Document doc; 891 | if (docID == null || docID.isEmpty()) 892 | docID = Misc.CreateUUID(); 893 | 894 | try { 895 | doc = forest.getDocument(docID, false); 896 | } catch (ForestException e) { 897 | Log.e(TAG, "ForestDB Error: getDocument(docID, false) docID=[%s]", e, docID); 898 | throw new CouchbaseLiteException(Status.DB_ERROR); 899 | } 900 | 901 | if (properties != null && properties.containsKey("_attachments")) { 902 | Map attachments = (Map) properties.get("_attachments"); 903 | if (attachments != null) { 904 | // https://github.com/couchbase/couchbase-lite-net/issues/749 905 | // Need to ensure revpos is correct for a revision inserted on top of a deletion 906 | if (doc.deleted()) { 907 | Iterator itr = attachments.keySet().iterator(); 908 | while (itr.hasNext()) { 909 | String name = itr.next(); 910 | Map metadata = (Map) attachments.get(name); 911 | if (metadata != null) { 912 | metadata.put("revpos", Revision.generationFromRevID(doc.getRevID()) + 1); 913 | } 914 | } 915 | } 916 | } 917 | } 918 | 919 | byte[] json; 920 | if (properties != null && properties.size() > 0) { 921 | json = RevisionUtils.asCanonicalJSON(properties); 922 | if (json == null) 923 | throw new CouchbaseLiteException( 924 | Status.BAD_JSON); 925 | } else { 926 | json = "{}".getBytes(); 927 | } 928 | 929 | try { 930 | if (prevRevID != null) { 931 | // Updating an existing revision; make sure it exists and is a leaf: 932 | try { 933 | if (!doc.selectRevID(prevRevID, false)) 934 | throw new CouchbaseLiteException(Status.NOT_FOUND); 935 | if (!allowConflict && !doc.selectedRevLeaf()) 936 | throw new CouchbaseLiteException(Status.CONFLICT); 937 | } catch (ForestException e) { 938 | Log.e(TAG, "ForestDB Error: selectRevID(prevRevID, false) prevRevID=[%s]", 939 | e, prevRevID); 940 | throw new CouchbaseLiteException(Status.DB_ERROR); 941 | } 942 | } else { 943 | // No parent revision given: 944 | if (deleting) { 945 | // Didn't specify a revision to delete: NotFound or a Conflict, depending 946 | throw new CouchbaseLiteException(doc.exists() ? 947 | Status.CONFLICT : Status.NOT_FOUND); 948 | } 949 | // If doc exists, current rev must be in a deleted state or 950 | // there will be a conflict: 951 | if (doc.selectCurrentRev()) { 952 | if (doc.selectedRevDeleted()) { 953 | // New rev will be child of the tombstone: 954 | // (T0D0: Write a horror novel called "Child Of The Tombstone"!) 955 | prevRevID = doc.getSelectedRevID(); 956 | } else { 957 | throw new CouchbaseLiteException(Status.CONFLICT); 958 | } 959 | } 960 | } 961 | 962 | // Compute the new revID. 963 | // (Can't be done earlier because prevRevID may have changed.) 964 | String newRevID = delegate.generateRevID(json, deleting, prevRevID); 965 | if (newRevID == null) 966 | // invalid previous revID (no numeric prefix) 967 | throw new CouchbaseLiteException(Status.BAD_ID); 968 | 969 | // Create the new CBL_Revision: 970 | putRev = new RevisionInternal(docID, newRevID, deleting); 971 | if (properties != null) { 972 | properties.put("_id", docID); 973 | properties.put("_rev", newRevID); 974 | putRev.setProperties(properties); 975 | } 976 | 977 | // Run any validation blocks: 978 | if (validationBlock != null) { 979 | // Fetch the previous revision and validate the new one against it: 980 | RevisionInternal prevRev = null; 981 | if (prevRevID != null) 982 | prevRev = new RevisionInternal(docID, prevRevID, doc.selectedRevDeleted()); 983 | Status status = validationBlock.validate(putRev, prevRev, prevRevID); 984 | if (status.isError()) { 985 | outStatus.setCode(status.getCode()); 986 | throw new CouchbaseLiteException(status); 987 | } 988 | } 989 | 990 | try { 991 | if (doc.insertRevision(newRevID, json, deleting, 992 | putRev.getAttachments() != null, allowConflict)) { 993 | if (deleting) 994 | outStatus.setCode(Status.OK); // 200 995 | else 996 | outStatus.setCode(Status.CREATED); // 201 (created) 997 | } else 998 | outStatus.setCode(Status.OK); // 200 (already exists) 999 | } catch (ForestException e) { 1000 | Log.e(TAG, "Error in insertRevision()", e); 1001 | throw new CouchbaseLiteException(e.code); 1002 | } 1003 | 1004 | // Save the updated doc: 1005 | boolean isWinner; 1006 | try { 1007 | isWinner = saveForest(doc, newRevID, properties); 1008 | } catch (ForestException e) { 1009 | Log.e(TAG, "Error in saveForest()", e); 1010 | throw new CouchbaseLiteException(Status.DB_ERROR); 1011 | } 1012 | putRev.setSequence(doc.getSequence()); 1013 | change = changeWithNewRevision(putRev, isWinner, doc, null); 1014 | } finally { 1015 | doc.free(); 1016 | } 1017 | } finally { 1018 | if (!endTransaction(outStatus.isSuccessful())) 1019 | throw new CouchbaseLiteException(Status.DB_ERROR); 1020 | } 1021 | 1022 | if (change != null) 1023 | delegate.databaseStorageChanged(change); 1024 | return putRev; 1025 | } 1026 | 1027 | private Document getDocumentWithRetry(String docID, boolean mustExist, int retry) 1028 | throws ForestException { 1029 | 1030 | ForestException ex = null; 1031 | for (int i = 0; i < retry; i++) { 1032 | try { 1033 | Document doc = forest.getDocument(docID, mustExist); 1034 | return doc; 1035 | } catch (ForestException fe) { 1036 | ex = fe; 1037 | if (fe.domain == ForestDBDomain && fe.code == FDB_RESULT_HANDLE_BUSY) { 1038 | try { 1039 | Thread.sleep(300); // 300ms 1040 | } catch (InterruptedException e) { 1041 | } 1042 | continue; 1043 | } else { 1044 | throw fe; 1045 | } 1046 | } 1047 | } 1048 | Log.e(TAG, "Retried %s times. But keep failing ForestDB.getDocument() docID=%s", ex, retry, docID); 1049 | throw ex; 1050 | } 1051 | 1052 | /** 1053 | * Add an existing revision of a document (probably being pulled) plus its ancestors. 1054 | */ 1055 | @Override 1056 | public void forceInsert(RevisionInternal inRev, 1057 | List inHistory, 1058 | final StorageValidation validationBlock, 1059 | URL inSource) 1060 | throws CouchbaseLiteException { 1061 | if (readOnly) 1062 | throw new CouchbaseLiteException(Status.FORBIDDEN); 1063 | 1064 | final byte[] json = inRev.getJson(); 1065 | if (json == null) 1066 | throw new CouchbaseLiteException(Status.BAD_JSON); 1067 | 1068 | final RevisionInternal rev = inRev.copy(); 1069 | final List history = inHistory; 1070 | final URL source = inSource; 1071 | 1072 | final DocumentChange[] change = new DocumentChange[1]; 1073 | 1074 | // TODO: need to implement JNI for c4doc_put() 1075 | Status status = inTransaction(new Task() { 1076 | @Override 1077 | public Status run() { 1078 | try { 1079 | // First get the CBForest doc: 1080 | Document doc = getDocumentWithRetry(rev.getDocID(), false, 5); 1081 | try { 1082 | int common = doc.insertRevisionWithHistory( 1083 | json, 1084 | rev.isDeleted(), 1085 | rev.getAttachments() != null, 1086 | history.toArray(new String[history.size()])); 1087 | if (common < 0) 1088 | // generation numbers not in descending order 1089 | return new Status(Status.BAD_REQUEST); 1090 | else if (common == 0) 1091 | // No-op: No new revisions were inserted. 1092 | return new Status(Status.OK); 1093 | // Validate against the common ancestor: 1094 | if (validationBlock != null) { 1095 | RevisionInternal prev = null; 1096 | if (common < history.size()) { 1097 | String revID = history.get(common); 1098 | if (!doc.selectRevID(revID, false)) { 1099 | Log.w(TAG, "Unable to select RevID: " + revID); 1100 | return new Status(Status.BAD_REQUEST); 1101 | } 1102 | prev = new RevisionInternal(rev.getDocID(), revID, doc.deleted()); 1103 | } 1104 | String parentRevID = (history.size() > 1) ? history.get(1) : null; 1105 | Status status = validationBlock.validate(rev, prev, parentRevID); 1106 | if (status.isError()) 1107 | return status; 1108 | } 1109 | // Save updated doc back to the database: 1110 | boolean isWinner = saveForest(doc, history.get(0), rev.getProperties()); 1111 | rev.setSequence(doc.getSelectedSequence()); 1112 | change[0] = changeWithNewRevision(rev, isWinner, doc, source); 1113 | return new Status(Status.CREATED); 1114 | } finally { 1115 | doc.free(); 1116 | } 1117 | } catch (ForestException e) { 1118 | Log.e(TAG, "ForestDB Error: forceInsert()", e); 1119 | return new Status(Status.UNKNOWN); 1120 | } 1121 | } 1122 | }); 1123 | 1124 | if (change[0] != null) 1125 | delegate.databaseStorageChanged(change[0]); 1126 | 1127 | if (status.isError()) 1128 | throw new CouchbaseLiteException(status.getCode()); 1129 | } 1130 | 1131 | @Override 1132 | public Map purgeRevisions(Map> inDocsToRevs) { 1133 | final Map result = new HashMap(); 1134 | final Map> docsToRevs = inDocsToRevs; 1135 | Status status = inTransaction(new Task() { 1136 | @Override 1137 | public Status run() { 1138 | for (String docID : docsToRevs.keySet()) { 1139 | List revsPurged = new ArrayList(); 1140 | List revIDs = docsToRevs.get(docID); 1141 | if (revIDs == null) { 1142 | return new Status(Status.BAD_PARAM); 1143 | } else if (revIDs.size() == 0) { 1144 | // nothing to do. 1145 | } else if (revIDs.contains("*")) { 1146 | // Delete all revisions if magic "*" revision ID is given: 1147 | try { 1148 | forest.purgeDoc(docID); 1149 | } catch (ForestException e) { 1150 | return ForestBridge.err2status(e); 1151 | } 1152 | notifyPurgedDocument(docID); 1153 | revsPurged.add("*"); 1154 | Log.v(TAG, "Purged doc '%s'", docID); 1155 | } else { 1156 | Document doc; 1157 | try { 1158 | doc = forest.getDocument(docID, true); 1159 | } catch (ForestException e) { 1160 | return ForestBridge.err2status(e); 1161 | } 1162 | try { 1163 | List purged = new ArrayList(); 1164 | for (String revID : revIDs) { 1165 | try { 1166 | if (doc.purgeRevision(revID) > 0) 1167 | purged.add(revID); 1168 | } catch (ForestException e) { 1169 | Log.e(TAG, "error in purgeRevision()", e); 1170 | } 1171 | } 1172 | if (purged.size() > 0) { 1173 | try { 1174 | doc.save(maxRevTreeDepth); 1175 | } catch (ForestException e) { 1176 | return ForestBridge.err2status(e); 1177 | } 1178 | Log.v(TAG, "Purged doc '%s' revs '%s'", docID, revIDs); 1179 | } 1180 | revsPurged = purged; 1181 | } finally { 1182 | doc.free(); 1183 | } 1184 | } 1185 | result.put(docID, revsPurged); 1186 | } 1187 | return new Status(Status.OK); 1188 | } 1189 | }); 1190 | return result; 1191 | } 1192 | 1193 | /////////////////////////////////////////////////////////////////////////// 1194 | // EXPIRATION: 1195 | /////////////////////////////////////////////////////////////////////////// 1196 | 1197 | /** 1198 | * @return Java Time 1199 | */ 1200 | @Override 1201 | public long expirationOfDocument(String docID) { 1202 | try { 1203 | return forest.expirationOfDoc(docID) * 1000L; 1204 | } catch (ForestException e) { 1205 | Log.e(TAG, "Error: expirationOfDoc() docID=%s", e, docID); 1206 | return 0; 1207 | } 1208 | } 1209 | 1210 | @Override 1211 | public boolean setExpirationOfDocument(long timestamp, String docID) { 1212 | try { 1213 | forest.setExpiration(docID, timestamp); 1214 | return true; 1215 | } catch (ForestException e) { 1216 | Log.e(TAG, "Error: setExpiration() docID=%s", e, docID); 1217 | return false; 1218 | } 1219 | } 1220 | 1221 | /** 1222 | * @return Java Time 1223 | */ 1224 | @Override 1225 | public long nextDocumentExpiry() { 1226 | long expiry = 0; 1227 | try { 1228 | expiry = forest.nextDocExpiration() * 1000L; 1229 | } catch (ForestException e) { 1230 | Log.e(TAG, "Error: nextDocExpiration()", e); 1231 | } 1232 | return expiry; 1233 | } 1234 | 1235 | @Override 1236 | public int purgeExpiredDocuments() { 1237 | final AtomicInteger purged = new AtomicInteger(); 1238 | runInTransaction(new TransactionalTask() { 1239 | @Override 1240 | public boolean run() { 1241 | try { 1242 | String[] docIDs = forest.purgeExpiredDocuments(); 1243 | for (String docID : docIDs) 1244 | notifyPurgedDocument(docID); 1245 | purged.set(docIDs.length); 1246 | return true; 1247 | } catch (ForestException e) { 1248 | Log.e(TAG, "Error: purgeExpiredDocuments()", e); 1249 | return false; 1250 | } 1251 | } 1252 | }); 1253 | return purged.get(); 1254 | } 1255 | 1256 | private void notifyPurgedDocument(String docID) { 1257 | delegate.databaseStorageChanged(new DocumentChange(docID)); 1258 | } 1259 | 1260 | 1261 | @Override 1262 | public ViewStore getViewStorage(String name, boolean create) throws CouchbaseLiteException { 1263 | return new ForestDBViewStore(this, name, create); 1264 | } 1265 | 1266 | @Override 1267 | public List getAllViewNames() { 1268 | List result = new ArrayList(); 1269 | String[] fileNames = new File(directory).list(); 1270 | for (String filename : fileNames) { 1271 | try { 1272 | result.add(ForestDBViewStore.fileNameToViewName(filename)); 1273 | } catch (CouchbaseLiteException e) { 1274 | // ignore 1275 | } 1276 | } 1277 | return result; 1278 | } 1279 | 1280 | @Override 1281 | public RevisionInternal getLocalDocument(String docID, String revID) { 1282 | if (docID == null || !docID.startsWith("_local/")) 1283 | return null; 1284 | byte[][] metaNbody; 1285 | try { 1286 | metaNbody = forest.rawGet("_local", docID); 1287 | } catch (ForestException e) { 1288 | return null; 1289 | } 1290 | 1291 | // meta -> revID 1292 | String gotRevID = new String(metaNbody[0]); 1293 | if (gotRevID == null || (revID != null && !revID.equals(gotRevID))) 1294 | return null; 1295 | 1296 | // body -> properties 1297 | Map properties = getDocProperties(metaNbody[1]); 1298 | if (properties == null) 1299 | return null; 1300 | 1301 | properties.put("_id", docID); 1302 | properties.put("_rev", gotRevID); 1303 | RevisionInternal result = new RevisionInternal(docID, gotRevID, false); 1304 | result.setProperties(properties); 1305 | return result; 1306 | } 1307 | 1308 | @Override 1309 | public RevisionInternal putLocalRevision(final RevisionInternal revision, 1310 | final String prevRevID, 1311 | final boolean obeyMVCC) 1312 | throws CouchbaseLiteException { 1313 | final String docID = revision.getDocID(); 1314 | if (!docID.startsWith("_local/")) 1315 | throw new CouchbaseLiteException(Status.BAD_ID); 1316 | 1317 | if (revision.isDeleted()) { 1318 | // DELETE: 1319 | Status status = deleteLocalDocument(docID, prevRevID, obeyMVCC); 1320 | if (status.isSuccessful()) 1321 | return revision; 1322 | else 1323 | throw new CouchbaseLiteException(status.getCode()); 1324 | } else { 1325 | // PUT: 1326 | final RevisionInternal[] result = new RevisionInternal[1]; 1327 | Status status = inTransaction(new Task() { 1328 | @Override 1329 | public Status run() { 1330 | byte[] json = revision.getJson(); 1331 | if (json == null) 1332 | return new Status(Status.BAD_JSON); 1333 | 1334 | byte[][] metaNbody = null; 1335 | try { 1336 | metaNbody = forest.rawGet("_local", docID); 1337 | } catch (ForestException e) { 1338 | } 1339 | 1340 | int generation = RevisionInternal.generationFromRevID(prevRevID); 1341 | if (obeyMVCC) { 1342 | if (prevRevID != null) { 1343 | if (metaNbody != null && !prevRevID.equals(new String(metaNbody[0]))) 1344 | return new Status(Status.CONFLICT); 1345 | if (generation == 0) 1346 | return new Status(Status.BAD_ID); 1347 | } else { 1348 | if (metaNbody != null) 1349 | return new Status(Status.CONFLICT); 1350 | } 1351 | } 1352 | String newRevID = String.format(Locale.ENGLISH, "%d-local", generation + 1); 1353 | try { 1354 | forest.rawPut("_local", docID, newRevID.getBytes(), json); 1355 | } catch (ForestException e) { 1356 | return ForestBridge.err2status(e); 1357 | } 1358 | result[0] = revision.copyWithDocID(docID, newRevID); 1359 | return new Status(Status.CREATED); 1360 | } 1361 | }); 1362 | 1363 | if (status.isSuccessful()) 1364 | return result[0]; 1365 | else 1366 | throw new CouchbaseLiteException(status.getCode()); 1367 | } 1368 | } 1369 | 1370 | /////////////////////////////////////////////////////////////////////////// 1371 | // Internal (PROTECTED & PRIVATE) METHODS 1372 | /////////////////////////////////////////////////////////////////////////// 1373 | 1374 | private boolean saveForest(Document doc, 1375 | String revID, 1376 | Map properties) 1377 | throws ForestException { 1378 | // after insertRevision, the selected revision is inserted revision. 1379 | // need to select current revision. 1380 | doc.selectCurrentRev(); 1381 | // Is the new revision the winner? 1382 | boolean isWinner = doc.getSelectedRevID().equalsIgnoreCase(revID); 1383 | // Update the documentType: 1384 | if (!isWinner) 1385 | properties = ForestBridge.bodyOfSelectedRevision(doc); 1386 | if (properties != null && properties.containsKey("type") && 1387 | properties.get("type") instanceof String) 1388 | doc.setType((String) properties.get("type")); 1389 | // save 1390 | doc.save(maxRevTreeDepth); 1391 | return isWinner; 1392 | } 1393 | 1394 | private DocumentChange changeWithNewRevision(RevisionInternal inRev, 1395 | boolean isWinningRev, 1396 | Document doc, 1397 | URL source) { 1398 | String winningRevID = isWinningRev ? inRev.getRevID() : doc.getSelectedRevID(); 1399 | return new DocumentChange(inRev, winningRevID, doc.conflicted(), source); 1400 | } 1401 | 1402 | private boolean beginTransaction() { 1403 | try { 1404 | forest.beginTransaction(); 1405 | transactionLevel4Thread.set(transactionLevel4Thread.get() + 1); 1406 | } catch (ForestException e) { 1407 | Log.e(TAG, "Failed to begin transaction", e); 1408 | return false; 1409 | } 1410 | return true; 1411 | } 1412 | 1413 | private boolean endTransaction(boolean commit) { 1414 | try { 1415 | transactionLevel4Thread.set(transactionLevel4Thread.get() - 1); 1416 | forest.endTransaction(commit); 1417 | } catch (ForestException e) { 1418 | Log.e(TAG, "Failed to end transaction", e); 1419 | return false; 1420 | } 1421 | delegate.storageExitedTransaction(commit); 1422 | return true; 1423 | } 1424 | 1425 | private static Map getDocProperties(byte[] body) { 1426 | try { 1427 | return Manager.getObjectMapper().readValue(body, Map.class); 1428 | } catch (IOException e) { 1429 | return null; 1430 | } 1431 | } 1432 | 1433 | /** 1434 | * CBLDatabase+LocalDocs.m 1435 | * - (CBLStatus) deleteLocalDocumentWithID: (NSString*)docID 1436 | * revisionID: (NSString*)revID 1437 | * obeyMVCC: (BOOL)obeyMVCC; 1438 | */ 1439 | private Status deleteLocalDocument( 1440 | final String inDocID, String inRevID, final boolean obeyMVCC) { 1441 | final String docID = inDocID; 1442 | final String revID = inRevID; 1443 | 1444 | if (docID == null || !docID.startsWith("_local/")) 1445 | return new Status(Status.BAD_ID); 1446 | if (obeyMVCC && revID == null) 1447 | // Didn't specify a revision to delete: 404 or a 409, depending 1448 | return new Status(getLocalDocument(docID, null) != null ? 1449 | Status.CONFLICT : Status.NOT_FOUND); 1450 | 1451 | return inTransaction(new Task() { 1452 | @Override 1453 | public Status run() { 1454 | try { 1455 | byte[][] metaNbody = forest.rawGet("_local", docID); 1456 | if (metaNbody == null) { 1457 | return new Status(Status.NOT_FOUND); 1458 | } else if (obeyMVCC && revID != null && 1459 | !revID.equals(new String(metaNbody[0]))) { 1460 | return new Status(Status.CONFLICT); 1461 | } else { 1462 | forest.rawPut("_local", docID, null, null); 1463 | return new Status(Status.OK); 1464 | } 1465 | } catch (ForestException e) { 1466 | return ForestBridge.err2status(e); 1467 | } 1468 | } 1469 | }); 1470 | } 1471 | 1472 | @Override 1473 | public void setEncryptionKey(SymmetricKey key) { 1474 | encryptionKey = key; 1475 | } 1476 | 1477 | @Override 1478 | public SymmetricKey getEncryptionKey() { 1479 | return encryptionKey; 1480 | } 1481 | 1482 | @Override 1483 | public Action actionToChangeEncryptionKey(final SymmetricKey newKey) { 1484 | Action action = new Action(); 1485 | 1486 | // Re-key the views! 1487 | List viewNames = getAllViewNames(); 1488 | for (String viewName : viewNames) { 1489 | try { 1490 | ForestDBViewStore viewStorage = (ForestDBViewStore) getViewStorage(viewName, true); 1491 | action.add(viewStorage.getActionToChangeEncryptionKey()); 1492 | } catch (CouchbaseLiteException ex) { 1493 | Log.w(TAG, "Error in getViewStorage() viewName=" + viewName, ex); 1494 | } 1495 | } 1496 | 1497 | // Re-key the database: 1498 | final SymmetricKey oldKey = encryptionKey; 1499 | action.add( 1500 | new ActionBlock() { 1501 | @Override 1502 | public void execute() throws ActionException { 1503 | int algorithm = Database.NoEncryption; 1504 | byte[] key = null; 1505 | if (newKey != null) { 1506 | algorithm = Database.AES256Encryption; 1507 | key = newKey.getKey(); 1508 | } 1509 | try { 1510 | forest.rekey(algorithm, key); 1511 | setEncryptionKey(newKey); 1512 | } catch (ForestException e) { 1513 | throw new ActionException("Cannot rekey to the new key", e); 1514 | } 1515 | } 1516 | }, 1517 | new ActionBlock() { 1518 | @Override 1519 | public void execute() throws ActionException { 1520 | int algorithm = Database.NoEncryption; 1521 | byte[] key = null; 1522 | if (oldKey != null) { 1523 | algorithm = Database.AES256Encryption; 1524 | key = newKey.getKey(); 1525 | } 1526 | try { 1527 | // FIX: This can potentially fail. If it did, the database would be lost 1528 | // It would be safer to save & restore the old db file, the one that 1529 | // got replaced 1530 | // during rekeying, but the ForestDB API doesn't allow preserving it... 1531 | forest.rekey(algorithm, key); 1532 | setEncryptionKey(oldKey); 1533 | } catch (ForestException e) { 1534 | throw new ActionException("Cannot rekey to the old key", e); 1535 | } 1536 | } 1537 | }, null 1538 | ); 1539 | 1540 | return action; 1541 | } 1542 | 1543 | @Override 1544 | public byte[] derivePBKDF2SHA256Key(String password, byte[] salt, int rounds) 1545 | throws CouchbaseLiteException { 1546 | byte[] key = nativeDerivePBKDF2SHA256Key(password, salt, rounds); 1547 | if (key == null) 1548 | throw new CouchbaseLiteException("Cannot derive key for the password", 1549 | Status.BAD_REQUEST); 1550 | return key; 1551 | } 1552 | 1553 | private interface Task { 1554 | Status run(); 1555 | } 1556 | 1557 | private Status inTransaction(Task task) { 1558 | if (inTransaction()) 1559 | return task.run(); 1560 | else { 1561 | if (!beginTransaction()) 1562 | return new Status(Status.DB_ERROR); 1563 | boolean commit = false; 1564 | try { 1565 | Status status = task.run(); 1566 | commit = !status.isError(); 1567 | return status; 1568 | } finally { 1569 | if (!endTransaction(commit)) 1570 | return new Status(Status.DB_ERROR); 1571 | } 1572 | } 1573 | } 1574 | } 1575 | --------------------------------------------------------------------------------