├── .gitignore ├── LICENSE ├── README.md ├── SConscript ├── build.py ├── run-jstests.py ├── src ├── pmse_change.cpp ├── pmse_change.h ├── pmse_engine.cpp ├── pmse_engine.h ├── pmse_engine_test.cpp ├── pmse_index_cursor.cpp ├── pmse_index_cursor.h ├── pmse_init.cpp ├── pmse_init_test.cpp ├── pmse_list.cpp ├── pmse_list.h ├── pmse_list_int_ptr.cpp ├── pmse_list_int_ptr.h ├── pmse_map.h ├── pmse_record_store.cpp ├── pmse_record_store.h ├── pmse_record_store_test.cpp ├── pmse_recovery_unit.cpp ├── pmse_recovery_unit.h ├── pmse_sorted_data_interface.cpp ├── pmse_sorted_data_interface.h ├── pmse_sorted_data_interface_test.cpp ├── pmse_standard_record_store_test.cpp ├── pmse_tree.cpp └── pmse_tree.h ├── tests ├── README.md ├── crashtest_delete_01.sh ├── crashtest_delete_02.sh ├── crashtest_delete_03.sh ├── crashtest_delete_04.sh ├── crashtest_insert_01.sh ├── crashtest_insert_02.sh ├── crashtest_insert_03.sh ├── crashtest_insert_04.sh ├── crashtest_insert_05.sh ├── crashtest_insert_06.sh ├── crashtest_insert_07.sh ├── crashtest_insert_08.sh ├── crashtest_insert_09.sh ├── crashtest_insert_10.sh ├── crashtest_insert_11.sh ├── crashtest_insert_12.sh ├── crashtest_insert_13.sh ├── crashtest_update_01.sh ├── crashtest_update_02.sh ├── crashtest_update_03.sh ├── crashtest_update_04.sh ├── crashtest_update_05.sh ├── crashtest_update_06.sh ├── gdb │ ├── crashtest_delete_01.gdb │ ├── crashtest_delete_02.gdb │ ├── crashtest_delete_03.gdb │ ├── crashtest_delete_04.gdb │ ├── crashtest_insert_01.gdb │ ├── crashtest_insert_02.gdb │ ├── crashtest_insert_03.gdb │ ├── crashtest_insert_04.gdb │ ├── crashtest_insert_05.gdb │ ├── crashtest_insert_06.gdb │ ├── crashtest_insert_07.gdb │ ├── crashtest_insert_08.gdb │ ├── crashtest_insert_09.gdb │ ├── crashtest_insert_10.gdb │ ├── crashtest_insert_11.gdb │ ├── crashtest_insert_12.gdb │ ├── crashtest_insert_13.gdb │ ├── crashtest_update_01.gdb │ ├── crashtest_update_02.gdb │ ├── crashtest_update_03.gdb │ ├── crashtest_update_04.gdb │ ├── crashtest_update_05.gdb │ └── crashtest_update_06.gdb ├── js │ ├── crashtest_delete_01.js │ ├── crashtest_delete_02.js │ ├── crashtest_delete_03.js │ ├── crashtest_delete_04.js │ ├── crashtest_insert_01.js │ ├── crashtest_insert_02.js │ ├── crashtest_insert_03.js │ ├── crashtest_insert_04.js │ ├── crashtest_insert_05.js │ ├── crashtest_insert_06.js │ ├── crashtest_insert_07.js │ ├── crashtest_insert_08.js │ ├── crashtest_insert_09.js │ ├── crashtest_insert_10.js │ ├── crashtest_insert_11.js │ ├── crashtest_insert_12.js │ ├── crashtest_insert_13.js │ ├── crashtest_update_01.js │ ├── crashtest_update_02.js │ ├── crashtest_update_03.js │ ├── crashtest_update_04.js │ ├── crashtest_update_05.js │ └── crashtest_update_06.js └── run_test.sh └── utils ├── README.md ├── create_table.js ├── drop_table.js ├── parser.py ├── path_configuration.txt ├── run_suite.py └── run_workload.sh /.gitignore: -------------------------------------------------------------------------------- 1 | # Object files 2 | *.o 3 | *.ko 4 | *.obj 5 | *.elf 6 | 7 | # Precompiled Headers 8 | *.gch 9 | *.pch 10 | 11 | # Libraries 12 | *.lib 13 | *.a 14 | *.la 15 | *.lo 16 | 17 | # Shared objects (inc. Windows DLLs) 18 | *.dll 19 | *.so 20 | *.so.* 21 | *.dylib 22 | 23 | # Executables 24 | *.exe 25 | *.out 26 | *.app 27 | *.i*86 28 | *.x86_64 29 | *.hex 30 | 31 | # Debug files 32 | *.dSYM/ 33 | *.su 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2014-2020, Intel Corporation 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions 5 | are met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright 11 | notice, this list of conditions and the following disclaimer in 12 | the documentation and/or other materials provided with the 13 | distribution. 14 | 15 | * Neither the name of the copyright holder nor the names of its 16 | contributors may be used to endorse or promote products derived 17 | from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PROJECT NOT UNDER ACTIVE MANAGEMENT # 2 | This project will no longer be maintained by Intel. 3 | Intel has ceased development and contributions including, but not limited to, maintenance, bug fixes, new releases, or updates, to this project. 4 | Intel no longer accepts patches to this project. 5 | If you have an ongoing need to use this project, are interested in independently developing it, or would like to maintain patches for the open source software community, please create your own fork of this project. 6 | 7 | # PMSE - Persistent Memory Storage Engine for MongoDB 8 | 9 | ## What is PMSE? 10 | Persistent Memory Storage Engine is fully compatible with NVDIMM’s alternative storage engine for MongoDB, that empower optimal usage of persistent memory. Storage engine doesn’t do any snapshots or journaling, everything is immediately stored as persistent. 11 | 12 | ## Dependencies 13 | - [Persistent Memory Development Kit (PMDK)](https://github.com/pmem/pmdk) 14 | - [C++ Bindings for PMDK library](https://github.com/pmem/libpmemobj-cpp) 15 | - [MongoDB](https://github.com/mongodb/mongo) 16 | 17 | ## Building 18 | Our engine is compatible with MongoDB 3.5.13, so please checkout to proper tag: **git checkout r3.5.13** 19 | Be sure you have satisfied all dependencies for MongoDB and PMSE, especially PIP requirements and PMDK. 20 | First, try to compile MongoDB using this: https://github.com/mongodb/mongo/wiki/Build-Mongodb-From-Source 21 | To use PMSE module with MongoDB, in the mongo repository directory do the following: 22 | ``` 23 | cd ~/mongo 24 | git checkout r3.5.13 -b r3.5.13 25 | pip2 install -r buildscripts/requirements.txt 26 | mkdir -p src/mongo/db/modules/ 27 | ln -sf ~/pmse src/mongo/db/modules/pmse 28 | ``` 29 | Then you can compile: 30 | ``` 31 | scons LIBPATH=path_to_pmdk_libraries --dbg=off --opt=on core 32 | ``` 33 | or when you have troubles with compilation: 34 | ``` 35 | python2 buildscripts/scons.py LIBPATH=path_to_pmdk_libraries -j $(nproc --all) core --disable-warnings-as-errors --dbg=off --opt=on 36 | ``` 37 | Some operating systems have newer version of GCC so you shoud use GCC-5, for this purpose use CC and CXX flags for scons: 38 | ``` 39 | scons CC=gcc-5 CXX=g++-5 LIBPATH=/usr/local/lib --dbg=off --opt=on core 40 | ``` 41 | 42 | Typical library path for PMDK is /usr/local/lib/ or /usr/local/lib64/ and it depends on system you use. 43 | Sometimes (e.g. Fedora) you need to specify PKG_CONFIG_PATH to PMDK libraries before libpmemobj-cpp compilation. 44 | 45 | To clean after building: 46 | ``` 47 | scons –c 48 | ``` 49 | 50 | ## Tips for building 51 | To speed up build time with **–j** option and number of threads, behavior is the same as make. 52 | Also user can specify what need to build, typical configuration need only core features to run server and client, possible are: **core**, **all**, **unittests**. 53 | 54 | ## Server running 55 | The last thing you need is to replace the path to persistent memory or DAX device: 56 | ``` 57 | ./mongod --storageEngine=pmse --dbpath=/path/to/pm_device 58 | ``` 59 | 60 | ## Benchmarking 61 | If you want to do some benchmarks just go to the utils folder and read README.md file. 62 | 63 | ## Authors 64 | - Adrian Nidzgorski 65 | - Jakub Schmiegel 66 | - Krzysztof Filipek 67 | - Maciej Maciejewski 68 | -------------------------------------------------------------------------------- /SConscript: -------------------------------------------------------------------------------- 1 | # -*- mode: python -*- 2 | Import("env") 3 | 4 | env = env.Clone() 5 | 6 | env.InjectMongoIncludePaths() 7 | 8 | env.Library( 9 | target= 'storage_pmse_base', 10 | source= [ 11 | 'src/pmse_engine.cpp', 12 | 'src/pmse_record_store.cpp', 13 | 'src/pmse_list_int_ptr.cpp', 14 | 'src/pmse_list.cpp', 15 | 'src/pmse_sorted_data_interface.cpp', 16 | 'src/pmse_tree.cpp', 17 | 'src/pmse_index_cursor.cpp', 18 | 'src/pmse_recovery_unit.cpp', 19 | 'src/pmse_change.cpp' 20 | ], 21 | LIBDEPS= [ 22 | '$BUILD_DIR/mongo/base', 23 | '$BUILD_DIR/mongo/db/namespace_string', 24 | '$BUILD_DIR/mongo/db/catalog/collection_options', 25 | '$BUILD_DIR/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store', 26 | '$BUILD_DIR/mongo/db/storage/kv/kv_storage_engine', 27 | 28 | ], 29 | SYSLIBDEPS=[ 30 | "pmem", 31 | "pmemobj", 32 | ] 33 | ) 34 | 35 | env.Library( 36 | target='storage_pmse', 37 | source=[ 38 | 'src/pmse_init.cpp', 39 | ], 40 | LIBDEPS=[ 41 | 'storage_pmse_base', 42 | ], 43 | LIBDEPS_DEPENDENTS=['$BUILD_DIR/mongo/db/serveronly'] 44 | ) 45 | 46 | env.CppUnitTest( 47 | target='pmse_init_test', 48 | source=['src/pmse_init_test.cpp'], 49 | LIBDEPS=['$BUILD_DIR/mongo/db/serveronly'] 50 | ) 51 | 52 | env.CppUnitTest( 53 | target= 'pmse_engine_test', 54 | source= [ 55 | 'src/pmse_engine_test.cpp' 56 | ], 57 | LIBDEPS= [ 58 | '$BUILD_DIR/mongo/db/storage/kv/kv_engine_test_harness', 59 | '$BUILD_DIR/mongo/s/client/sharding_client', 60 | '$BUILD_DIR/mongo/base', 61 | '$BUILD_DIR/mongo/db/bson/dotted_path_support', 62 | '$BUILD_DIR/mongo/db/catalog/collection', 63 | '$BUILD_DIR/mongo/db/catalog/collection_options', 64 | '$BUILD_DIR/mongo/db/concurrency/lock_manager', 65 | '$BUILD_DIR/mongo/db/concurrency/write_conflict_exception', 66 | '$BUILD_DIR/mongo/db/index/index_descriptor', 67 | '$BUILD_DIR/mongo/db/mongod_options', 68 | '$BUILD_DIR/mongo/db/namespace_string', 69 | '$BUILD_DIR/mongo/db/repl/repl_settings', 70 | '$BUILD_DIR/mongo/db/server_options_core', 71 | '$BUILD_DIR/mongo/db/service_context', 72 | '$BUILD_DIR/mongo/db/storage/index_entry_comparison', 73 | '$BUILD_DIR/mongo/db/storage/journal_listener', 74 | '$BUILD_DIR/mongo/db/storage/key_string', 75 | '$BUILD_DIR/mongo/db/storage/kv/kv_prefix', 76 | '$BUILD_DIR/mongo/db/storage/oplog_hack', 77 | '$BUILD_DIR/mongo/db/storage/storage_options', 78 | '$BUILD_DIR/mongo/util/concurrency/ticketholder', 79 | '$BUILD_DIR/mongo/util/elapsed_tracker', 80 | '$BUILD_DIR/mongo/util/processinfo', 81 | 'storage_pmse_base' 82 | ] 83 | ) 84 | 85 | env.Library( 86 | target= 'additional_pmse_record_store_tests', 87 | source= [ 88 | 'src/pmse_record_store_test.cpp', 89 | ], 90 | LIBDEPS= [ 91 | '$BUILD_DIR/mongo/base', 92 | '$BUILD_DIR/mongo/db/bson/dotted_path_support', 93 | '$BUILD_DIR/mongo/db/catalog/collection', 94 | '$BUILD_DIR/mongo/db/catalog/collection_options', 95 | '$BUILD_DIR/mongo/db/concurrency/lock_manager', 96 | '$BUILD_DIR/mongo/db/concurrency/write_conflict_exception', 97 | '$BUILD_DIR/mongo/db/index/index_descriptor', 98 | '$BUILD_DIR/mongo/db/mongod_options', 99 | '$BUILD_DIR/mongo/db/namespace_string', 100 | '$BUILD_DIR/mongo/db/repl/repl_settings', 101 | '$BUILD_DIR/mongo/db/server_options_core', 102 | '$BUILD_DIR/mongo/db/service_context', 103 | '$BUILD_DIR/mongo/db/storage/index_entry_comparison', 104 | '$BUILD_DIR/mongo/db/storage/journal_listener', 105 | '$BUILD_DIR/mongo/db/storage/key_string', 106 | '$BUILD_DIR/mongo/db/storage/kv/kv_prefix', 107 | '$BUILD_DIR/mongo/db/storage/oplog_hack', 108 | '$BUILD_DIR/mongo/db/storage/storage_options', 109 | '$BUILD_DIR/mongo/util/concurrency/ticketholder', 110 | '$BUILD_DIR/mongo/util/elapsed_tracker', 111 | '$BUILD_DIR/mongo/util/processinfo', 112 | 'storage_pmse_base', 113 | '$BUILD_DIR/mongo/db/storage/kv/kv_engine_core', 114 | '$BUILD_DIR/mongo/db/storage/record_store_test_harness', 115 | '$BUILD_DIR/mongo/util/clock_source_mock' 116 | ] 117 | ) 118 | 119 | env.CppUnitTest( 120 | target= 'storage_pmse_record_store_test', 121 | source= [ 122 | 'src/pmse_standard_record_store_test.cpp' 123 | ], 124 | LIBDEPS= [ 125 | 'additional_pmse_record_store_tests' 126 | ] 127 | ) 128 | 129 | env.CppUnitTest( 130 | target= 'pmse_sorted_data_interface_test', 131 | source= [ 132 | 'src/pmse_sorted_data_interface_test.cpp', 133 | ], 134 | LIBDEPS= [ 135 | '$BUILD_DIR/mongo/db/storage/kv/kv_engine_core', 136 | '$BUILD_DIR/mongo/db/storage/sorted_data_interface_test_harness', 137 | '$BUILD_DIR/mongo/s/client/sharding_client', 138 | '$BUILD_DIR/mongo/base', 139 | '$BUILD_DIR/mongo/db/bson/dotted_path_support', 140 | '$BUILD_DIR/mongo/db/catalog/collection', 141 | '$BUILD_DIR/mongo/db/catalog/collection_options', 142 | '$BUILD_DIR/mongo/db/concurrency/lock_manager', 143 | '$BUILD_DIR/mongo/db/concurrency/write_conflict_exception', 144 | '$BUILD_DIR/mongo/db/index/index_descriptor', 145 | '$BUILD_DIR/mongo/db/mongod_options', 146 | '$BUILD_DIR/mongo/db/namespace_string', 147 | '$BUILD_DIR/mongo/db/repl/repl_settings', 148 | '$BUILD_DIR/mongo/db/server_options_core', 149 | '$BUILD_DIR/mongo/db/service_context', 150 | '$BUILD_DIR/mongo/db/storage/index_entry_comparison', 151 | '$BUILD_DIR/mongo/db/storage/journal_listener', 152 | '$BUILD_DIR/mongo/db/storage/key_string', 153 | '$BUILD_DIR/mongo/db/storage/kv/kv_prefix', 154 | '$BUILD_DIR/mongo/db/storage/oplog_hack', 155 | '$BUILD_DIR/mongo/db/storage/storage_options', 156 | '$BUILD_DIR/mongo/util/concurrency/ticketholder', 157 | '$BUILD_DIR/mongo/util/elapsed_tracker', 158 | '$BUILD_DIR/mongo/util/processinfo', 159 | 'storage_pmse_base' 160 | ] 161 | ) 162 | -------------------------------------------------------------------------------- /build.py: -------------------------------------------------------------------------------- 1 | 2 | def configure(conf, env): 3 | print("Configuring pmse storage engine module") 4 | 5 | -------------------------------------------------------------------------------- /run-jstests.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Copyright 2017-2018, Intel Corporation 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions 7 | # are met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # 12 | # * Redistributions in binary form must reproduce the above copyright 13 | # notice, this list of conditions and the following disclaimer in 14 | # the documentation and/or other materials provided with the 15 | # distribution. 16 | # 17 | # * Neither the name of the copyright holder nor the names of its 18 | # contributors may be used to endorse or promote products derived 19 | # from this software without specific prior written permission. 20 | # 21 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | 33 | 34 | from argparse import ArgumentParser 35 | from os import linesep, path 36 | from subprocess import run, TimeoutExpired, STDOUT, PIPE, DEVNULL 37 | from time import perf_counter 38 | from collections import OrderedDict 39 | 40 | 41 | def get_tests_for_suite(suite, mongo_root, test_binary): 42 | cmd = test_binary + ['--suites={}'.format(suite), '-n'] 43 | proc = run(cmd, stdout=PIPE, cwd=mongo_root) 44 | out = proc.stdout.decode('utf-8').splitlines() 45 | 46 | tests = [path.join(mongo_root, line) for line in out if line.startswith( 47 | 'jstests') and line.endswith('.js')] 48 | 49 | return tests 50 | 51 | 52 | def get_cmd_args(): 53 | parser = ArgumentParser( 54 | description='Run jstests/core tests with resmoke.py') 55 | parser.add_argument('-m', '--mongo-root', required=True, 56 | help='Path to mongo source root directory.abs') 57 | parser.add_argument('-d', '--dbpath', required=True, 58 | help='Directory where database is created.') 59 | parser.add_argument('-s', '--suite', required=True, help='Suite to run.') 60 | parser.add_argument('--timeout', type=int, default=5 * 61 | 60, help='Test case timeout in seconds.') 62 | parser.add_argument( 63 | '-t', 64 | '--tests', 65 | nargs='+', 66 | help='Tests from selected suite to run, default: run all.') 67 | return parser.parse_args() 68 | 69 | 70 | def execute_tests(args): 71 | test_dir = path.join(args.mongo_root, 'jstests', args.suite) 72 | test_binary = [path.join(args.mongo_root, 'buildscripts', 'resmoke.py')] 73 | test_args = ['--continueOnFailure', 74 | '--storageEngine=pmse', 75 | '--suites={}'.format(args.suite), 76 | '--dbpath={}'.format(args.dbpath)] 77 | 78 | if args.tests: 79 | tests = args.tests 80 | else: 81 | tests = get_tests_for_suite(args.suite, args.mongo_root, test_binary) 82 | 83 | failed = [] 84 | passed_warnings = OrderedDict() 85 | timeout = [] 86 | out = '' 87 | 88 | margin = len(max(tests, key=len)) + 8 89 | for test in sorted(tests): 90 | cmd = test_binary + test_args 91 | cmd.append(path.join(test_dir, test)) 92 | print_output = False 93 | skipped = False 94 | 95 | print('{} ...'.format(test).ljust(margin), end='', flush=True) 96 | 97 | start = perf_counter() 98 | try: 99 | proc = run(cmd, stderr=STDOUT, stdout=PIPE, 100 | cwd=args.mongo_root, timeout=args.timeout) 101 | except TimeoutExpired: 102 | run('pgrep mongod | xargs kill -9', 103 | shell=True, stdout=DEVNULL, stderr=STDOUT) 104 | timeout.append(test) 105 | print('TIMEOUT', end='') 106 | else: 107 | out = proc.stdout.decode('utf-8') 108 | if proc.returncode == 0: 109 | if "No tests ran" in out: 110 | print('SKIPPED', end='') 111 | skipped = True 112 | else: 113 | print('PASSED', end='') 114 | elif 'were skipped, 0 failed, 0 errored' in out: 115 | print('PASSED WITH WARNINGS. Test exited with code {}'.format( 116 | proc.returncode), end='') 117 | passed_warnings[test] = proc.returncode 118 | print_output = True 119 | else: 120 | print('FAILED', end='') 121 | failed.append(test) 122 | print_output = True 123 | finally: 124 | elapsed_ms = (perf_counter() - start) * 1000 125 | print('\t{0:.3f} [ms]'.format(elapsed_ms)) 126 | if print_output: 127 | print(out) 128 | if not skipped: 129 | run('rm -r {}/job0'.format(args.dbpath), shell=True) 130 | 131 | return tests, failed, timeout, passed_warnings 132 | 133 | 134 | def print_summary(tests, failed, timeout, passed_warnings): 135 | if not failed and not timeout: 136 | print('All tests passed') 137 | else: 138 | print('{0}Out of {1} tests {2} failed:'.format( 139 | linesep, len(tests), len(failed) + len(timeout))) 140 | for test in failed: 141 | print(test) 142 | for test in timeout: 143 | print('{} (TIMEOUT)'.format(test)) 144 | 145 | if passed_warnings: 146 | print('{} tests passed but exited with non-zero code:'.format(len(passed_warnings))) 147 | for test, returncode in passed_warnings.items(): 148 | print('{0} ({1})'.format(test, returncode)) 149 | 150 | 151 | def main(): 152 | args = get_cmd_args() 153 | tests, failed, timeout, passed_warnings = execute_tests(args) 154 | print_summary(tests, failed, timeout, passed_warnings) 155 | 156 | 157 | if __name__ == '__main__': 158 | main() 159 | -------------------------------------------------------------------------------- /src/pmse_change.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage 34 | 35 | #include 36 | 37 | #include "mongo/db/operation_context.h" 38 | #include "mongo/db/storage/recovery_unit.h" 39 | #include "mongo/util/log.h" 40 | 41 | #include "pmse_change.h" 42 | #include "pmse_map.h" 43 | 44 | namespace mongo { 45 | 46 | TruncateChange::TruncateChange(pool_base pop, PmseMap *mapper, 47 | RecordId Id, InitData *data, uint64_t dataSize) 48 | : _mapper(mapper), _Id(Id), _pop(pop), _dataSize(dataSize) { 49 | _cachedData = static_cast(malloc(sizeof(InitData) + data->size)); 50 | memcpy(_cachedData->data, data->data, data->size); 51 | _cachedData->size = data->size; 52 | } 53 | 54 | void TruncateChange::commit() {} 55 | 56 | void TruncateChange::rollback() { 57 | persistent_ptr obj; 58 | persistent_ptr temp = nullptr; 59 | try { 60 | transaction::exec_tx(_pop, [this, &obj, &temp] { 61 | obj = pmemobj_tx_alloc(sizeof(InitData::size) + _cachedData->size, 1); 62 | obj->size = _cachedData->size; 63 | memcpy(obj->data, _cachedData->data, _cachedData->size); 64 | temp = make_persistent(); 65 | temp->idValue = static_cast(_Id.repr()); 66 | }); 67 | } catch (std::exception &e) { 68 | log() << e.what(); 69 | } 70 | _mapper->insertToFrontKV(temp, obj); 71 | _mapper->changeSize(_dataSize); 72 | } 73 | 74 | DropListChange::DropListChange(pool_base pop, persistent_ptr list, int size) 75 | : _pop(pop), _list(list), _size(size) {} 76 | 77 | void DropListChange::commit() {} 78 | 79 | void DropListChange::rollback() { 80 | try { 81 | make_persistent_atomic(_pop, _list, _size); 82 | } catch(std::exception &e) { 83 | log() << e.what(); 84 | } 85 | } 86 | 87 | InsertChange::InsertChange(persistent_ptr> mapper, 88 | RecordId loc, uint64_t dataSize) 89 | : _mapper(mapper), _loc(loc), _dataSize(dataSize) {} 90 | 91 | void InsertChange::commit() {} 92 | 93 | void InsertChange::rollback() { 94 | _mapper->remove((uint64_t) _loc.repr()); 95 | _mapper->changeSize(-_dataSize); 96 | } 97 | 98 | RemoveChange::RemoveChange(pool_base pop, InitData* data, uint64_t dataSize) 99 | : _pop(pop), _dataSize(dataSize) { 100 | _cachedData = static_cast(malloc(sizeof(InitData) + data->size)); 101 | memcpy(_cachedData->data, data->data, data->size); 102 | _cachedData->size = data->size; 103 | } 104 | RemoveChange::~RemoveChange() { 105 | free(_cachedData); 106 | } 107 | void RemoveChange::commit() {} 108 | void RemoveChange::rollback() { 109 | persistent_ptr obj; 110 | _mapper = pool(_pop).get_root()->kvmap_root_ptr; 111 | try { 112 | transaction::exec_tx(_pop, [this, &obj] { 113 | obj = pmemobj_tx_alloc(sizeof(InitData::size) + _cachedData->size, 1); 114 | obj->size = _cachedData->size; 115 | memcpy(obj->data, _cachedData->data, _cachedData->size); 116 | }); 117 | } catch (std::exception &e) { 118 | log() << e.what(); 119 | } 120 | _mapper->insert(obj); 121 | _mapper->changeSize(_dataSize); 122 | } 123 | 124 | UpdateChange::UpdateChange(pool_base pop, uint64_t key, InitData* data, uint64_t dataSize) 125 | : _pop(pop), _key(key), _dataSize(dataSize) { 126 | _cachedData = static_cast(malloc(sizeof(InitData) + data->size)); 127 | memcpy(_cachedData->data, data->data, data->size); 128 | _cachedData->size = data->size; 129 | } 130 | UpdateChange::~UpdateChange() { 131 | free(_cachedData); 132 | } 133 | void UpdateChange::commit() {} 134 | void UpdateChange::rollback() { 135 | persistent_ptr obj; 136 | try { 137 | _mapper = pool(_pop).get_root()->kvmap_root_ptr; 138 | transaction::exec_tx(_pop, [this, &obj] { 139 | obj = pmemobj_tx_alloc(sizeof(InitData::size) + _cachedData->size, 1); 140 | obj->size = _cachedData->size; 141 | memcpy(obj->data, _cachedData->data, _cachedData->size); 142 | }); 143 | _mapper->updateKV(_key, obj); 144 | auto rd = RecordData(obj->data, obj->size); 145 | _mapper->changeSize(rd.size() - _dataSize); 146 | } catch (std::exception &e) { 147 | log() << e.what(); 148 | } 149 | } 150 | InsertIndexChange::InsertIndexChange(persistent_ptr tree, 151 | pool_base pop, BSONObj key, 152 | RecordId loc, bool dupsAllowed, 153 | const IndexDescriptor* desc) 154 | : _tree(tree), _pop(pop), _key(key), _loc(loc), 155 | _dupsAllowed(dupsAllowed), _desc(desc) {} 156 | 157 | void InsertIndexChange::commit() {} 158 | 159 | void InsertIndexChange::rollback() { 160 | try { 161 | transaction::exec_tx(_pop, [this] { 162 | IndexKeyEntry entry(_key.getOwned(), _loc); 163 | _tree->remove(_pop, entry, _dupsAllowed, _desc->keyPattern()); 164 | }); 165 | } catch (std::exception &e) { 166 | log() << e.what(); 167 | } 168 | } 169 | 170 | RemoveIndexChange::RemoveIndexChange(persistent_ptr tree, pool_base pop, BSONObj key, RecordId loc, 171 | bool dupsAllowed, BSONObj ordering) 172 | : _tree(tree), _pop(pop), _key(key), _loc(loc), 173 | _dupsAllowed(dupsAllowed), _ordering(ordering) {} 174 | void RemoveIndexChange::commit() {} 175 | void RemoveIndexChange::rollback() { 176 | try { 177 | transaction::exec_tx(_pop, [this] { 178 | IndexKeyEntry entry(_key.getOwned(), _loc); 179 | _tree->insert(_pop, entry, _ordering, _dupsAllowed); 180 | }); 181 | } catch (std::exception &e) { 182 | log() << e.what(); 183 | } 184 | } 185 | 186 | } // namespace mongo 187 | -------------------------------------------------------------------------------- /src/pmse_change.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #ifndef SRC_PMSE_CHANGE_H_ 34 | #define SRC_PMSE_CHANGE_H_ 35 | 36 | #include 37 | #include 38 | 39 | #include "pmse_list_int_ptr.h" 40 | #include "pmse_tree.h" 41 | 42 | #include "mongo/db/index/index_descriptor.h" 43 | #include "mongo/db/storage/record_data.h" 44 | #include "mongo/db/record_id.h" 45 | 46 | namespace mongo { 47 | template 48 | class PmseMap; 49 | 50 | class TruncateChange: public RecoveryUnit::Change { 51 | public: 52 | TruncateChange(pool_base pop, PmseMap *mapper, RecordId Id, InitData *data, uint64_t dataSize); 53 | virtual void rollback(); 54 | virtual void commit(); 55 | private: 56 | PmseMap *_mapper; 57 | RecordId _Id; 58 | InitData *_cachedData; 59 | pool_base _pop; 60 | persistent_ptr _key; 61 | uint64_t _dataSize; 62 | }; 63 | 64 | class DropListChange: public RecoveryUnit::Change { 65 | public: 66 | DropListChange(pool_base pop, persistent_ptr list, int size); 67 | virtual void rollback(); 68 | virtual void commit(); 69 | private: 70 | pool_base _pop; 71 | persistent_ptr _list; 72 | int _size; 73 | }; 74 | 75 | class InsertChange : public RecoveryUnit::Change { 76 | public: 77 | InsertChange(persistent_ptr> mapper, RecordId loc, uint64_t dataSize); 78 | virtual void rollback(); 79 | virtual void commit(); 80 | private: 81 | persistent_ptr> _mapper; 82 | const RecordId _loc; 83 | uint64_t _dataSize; 84 | }; 85 | 86 | class RemoveChange : public RecoveryUnit::Change { 87 | public: 88 | RemoveChange(pool_base pop, InitData* data, uint64_t dataSize); 89 | ~RemoveChange(); 90 | virtual void rollback(); 91 | virtual void commit(); 92 | private: 93 | pool_base _pop; 94 | InitData *_cachedData; 95 | uint64_t _dataSize; 96 | persistent_ptr> _mapper; 97 | }; 98 | 99 | class UpdateChange : public RecoveryUnit::Change { 100 | public: 101 | UpdateChange(pool_base pop, uint64_t key, InitData* data, uint64_t dataSize); 102 | ~UpdateChange(); 103 | virtual void rollback(); 104 | virtual void commit(); 105 | private: 106 | pool_base _pop; 107 | uint64_t _key; 108 | InitData *_cachedData; 109 | uint64_t _dataSize; 110 | persistent_ptr> _mapper; 111 | }; 112 | 113 | class InsertIndexChange : public RecoveryUnit::Change { 114 | public: 115 | InsertIndexChange(persistent_ptr tree, pool_base pop, 116 | BSONObj key, RecordId loc, bool dupsAllowed, 117 | const IndexDescriptor* desc); 118 | virtual void rollback(); 119 | virtual void commit(); 120 | private: 121 | persistent_ptr _tree; 122 | pool_base _pop; 123 | BSONObj _key; 124 | RecordId _loc; 125 | bool _dupsAllowed; 126 | const IndexDescriptor*_desc; 127 | }; 128 | 129 | class RemoveIndexChange : public RecoveryUnit::Change { 130 | public: 131 | RemoveIndexChange(persistent_ptr tree, pool_base pop, BSONObj key, RecordId loc, 132 | bool dupsAllowed, BSONObj ordering); 133 | virtual void rollback(); 134 | virtual void commit(); 135 | private: 136 | persistent_ptr _tree; 137 | pool_base _pop; 138 | BSONObj _key; 139 | RecordId _loc; 140 | bool _dupsAllowed; 141 | BSONObj _ordering; 142 | }; 143 | 144 | } // namespace mongo 145 | #endif // SRC_PMSE_CHANGE_H_ 146 | -------------------------------------------------------------------------------- /src/pmse_engine.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage 34 | 35 | #include "pmse_engine.h" 36 | #include "pmse_record_store.h" 37 | #include "pmse_sorted_data_interface.h" 38 | 39 | #include 40 | #include 41 | 42 | #include "mongo/platform/basic.h" 43 | #include "mongo/base/disallow_copying.h" 44 | #include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h" 45 | #include "mongo/db/storage/record_store.h" 46 | #include "mongo/stdx/memory.h" 47 | #include "mongo/db/catalog/collection_options.h" 48 | #include "mongo/util/log.h" 49 | 50 | #include 51 | 52 | namespace mongo { 53 | 54 | PmseEngine::PmseEngine(std::string dbpath) : _dbPath(dbpath) { 55 | if(!boost::algorithm::ends_with(dbpath, "/")) { 56 | _dbPath = _dbPath +"/"; 57 | } 58 | std::string path = _dbPath + _kIdentFilename.toString(); 59 | if (!boost::filesystem::exists(path)) { 60 | pop = pool::create(path, "pmse_identlist", 4 * PMEMOBJ_MIN_POOL, 61 | 0664); 62 | log() << "Engine pool created"; 63 | } else { 64 | pop = pool::open(path, "pmse_identlist"); 65 | log() << "Engine pool opened"; 66 | } 67 | 68 | try { 69 | auto root = pop.get_root(); 70 | if (!root->list_root_ptr) { 71 | transaction::exec_tx(pop, [this, &root] { 72 | root->list_root_ptr = make_persistent(pop); 73 | }); 74 | } 75 | _identList = root->list_root_ptr; 76 | } catch (std::exception& e) { 77 | log() << "Error while creating PMSE engine:" << e.what(); 78 | } 79 | _identList->setPool(pop); 80 | if(!_identList->isAfterSafeShutdown()) { 81 | _needCheck = true; 82 | } else { 83 | _needCheck = false; 84 | } 85 | _identList->resetState(); 86 | } 87 | 88 | PmseEngine::~PmseEngine() { 89 | for (auto p : _poolHandler) { 90 | p.second.close(); 91 | } 92 | pop.close(); 93 | } 94 | 95 | Status PmseEngine::createRecordStore(OperationContext* opCtx, StringData ns, StringData ident, 96 | const CollectionOptions& options) { 97 | stdx::lock_guard lock(_pmutex); 98 | auto status = Status::OK(); 99 | try { 100 | _identList->insertKV(ident.toString().c_str(), ns.toString().c_str()); 101 | auto record_store = stdx::make_unique(ns, ident, options, _dbPath, &_poolHandler); 102 | } catch(std::exception &e) { 103 | status = Status(ErrorCodes::OutOfDiskSpace, e.what()); 104 | } 105 | return status; 106 | } 107 | 108 | std::unique_ptr PmseEngine::getRecordStore(OperationContext* opCtx, 109 | StringData ns, 110 | StringData ident, 111 | const CollectionOptions& options) { 112 | persistent_ptr> _mapper; 113 | pool mapPoolOld; 114 | try { 115 | mapPoolOld = pool((_poolHandler).at(ident.toString())); 116 | } catch (std::exception &e) {} 117 | 118 | if (mapPoolOld.get_handle()) { 119 | _mapper = mapPoolOld.get_root()->kvmap_root_ptr; 120 | _mapper->storeCounters(); 121 | } 122 | _identList->update(ident.toString().c_str(), ns.toString().c_str()); 123 | return stdx::make_unique(ns, ident, options, _dbPath, 124 | &_poolHandler, (_needCheck ? true : false)); 125 | } 126 | 127 | Status PmseEngine::createSortedDataInterface(OperationContext* opCtx, 128 | StringData ident, 129 | const IndexDescriptor* desc) { 130 | stdx::lock_guard lock(_pmutex); 131 | try { 132 | _identList->insertKV(ident.toString().c_str(), ""); 133 | auto sorted_data_interface = PmseSortedDataInterface(ident, desc, _dbPath, &_poolHandler); 134 | } catch (std::exception &e) { 135 | return Status(ErrorCodes::OutOfDiskSpace, e.what()); 136 | } 137 | return Status::OK(); 138 | } 139 | 140 | SortedDataInterface* PmseEngine::getSortedDataInterface(OperationContext* opCtx, 141 | StringData ident, 142 | const IndexDescriptor* desc) { 143 | return new PmseSortedDataInterface(ident, desc, _dbPath, &_poolHandler); 144 | } 145 | 146 | Status PmseEngine::dropIdent(OperationContext* opCtx, StringData ident) { 147 | stdx::lock_guard lock(_pmutex); 148 | boost::filesystem::path path(_dbPath); 149 | _identList->deleteKV(ident.toString().c_str()); 150 | if (_poolHandler.count(ident.toString()) > 0) { 151 | _poolHandler[ident.toString()].close(); 152 | _poolHandler.erase(ident.toString()); 153 | } 154 | boost::filesystem::remove_all(path.string() + ident.toString()); 155 | return Status::OK(); 156 | } 157 | 158 | } // namespace mongo 159 | -------------------------------------------------------------------------------- /src/pmse_engine.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #ifndef SRC_PMSE_ENGINE_H_ 34 | #define SRC_PMSE_ENGINE_H_ 35 | 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | 42 | #include 43 | #include 44 | 45 | #include 46 | #include 47 | #include 48 | #include 49 | #include 50 | #include 51 | #include 52 | 53 | #include "pmse_list.h" 54 | #include "pmse_recovery_unit.h" 55 | 56 | #include "mongo/db/storage/kv/kv_engine.h" 57 | 58 | namespace mongo { 59 | 60 | class JournalListener; 61 | 62 | using namespace pmem::obj; 63 | 64 | struct ident_entry { 65 | persistent_ptr next; 66 | p value; 67 | }; 68 | 69 | class PmseEngine : public KVEngine { 70 | public: 71 | explicit PmseEngine(std::string dbpath); 72 | 73 | virtual ~PmseEngine(); 74 | 75 | virtual RecoveryUnit* newRecoveryUnit() { 76 | return new PmseRecoveryUnit(); 77 | } 78 | 79 | virtual Status createRecordStore(OperationContext* opCtx, 80 | StringData ns, 81 | StringData ident, 82 | const CollectionOptions& options); 83 | 84 | virtual std::unique_ptr getRecordStore(OperationContext* opCtx, 85 | StringData ns, 86 | StringData ident, 87 | const CollectionOptions& options); 88 | 89 | virtual Status createSortedDataInterface(OperationContext* opCtx, 90 | StringData ident, 91 | const IndexDescriptor* desc); 92 | 93 | virtual SortedDataInterface* getSortedDataInterface(OperationContext* opCtx, 94 | StringData ident, 95 | const IndexDescriptor* desc); 96 | 97 | virtual Status dropIdent(OperationContext* opCtx, StringData ident); 98 | 99 | virtual bool supportsDocLocking() const { 100 | return true; 101 | } 102 | 103 | virtual Status beginBackup(OperationContext* txn) { 104 | return Status::OK(); 105 | } 106 | 107 | virtual void endBackup(OperationContext* txn) { 108 | return; 109 | } 110 | 111 | virtual bool supportsDirectoryPerDB() const { 112 | return false; 113 | } 114 | 115 | virtual bool isDurable() const { 116 | return true; 117 | } 118 | 119 | virtual bool isEphemeral() const { 120 | return false; 121 | } 122 | 123 | virtual int64_t getIdentSize(OperationContext* opCtx, StringData ident) { 124 | // TODO(kfilipek): Implement getIdentSize 125 | return 1; 126 | } 127 | 128 | virtual Status repairIdent(OperationContext* opCtx, StringData ident) { 129 | return Status::OK(); 130 | } 131 | 132 | virtual bool hasIdent(OperationContext* opCtx, StringData ident) const { 133 | return _identList->hasKey(ident.toString().c_str()); 134 | } 135 | 136 | std::vector getAllIdents(OperationContext* opCtx) const { 137 | return _identList->getKeys(); 138 | } 139 | 140 | virtual void cleanShutdown() { 141 | // If not clean shutdown start scanning all collections 142 | _identList->safeShutdown(); 143 | } 144 | 145 | void setJournalListener(JournalListener* jl) final {} 146 | 147 | private: 148 | stdx::mutex _pmutex; 149 | bool _needCheck; 150 | std::map _poolHandler; 151 | std::shared_ptr _catalogInfo; 152 | std::string _dbPath; 153 | const StringData _kIdentFilename = "pmkv.pm"; 154 | pool pop; 155 | persistent_ptr _identList; 156 | }; 157 | } // namespace mongo 158 | 159 | #endif // SRC_PMSE_ENGINE_H_ 160 | -------------------------------------------------------------------------------- /src/pmse_engine_test.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #include "mongo/platform/basic.h" 34 | #include "mongo/base/init.h" 35 | #include "mongo/db/storage/kv/kv_engine_test_harness.h" 36 | #include "mongo/stdx/memory.h" 37 | #include "mongo/unittest/temp_dir.h" 38 | #include "mongo/util/clock_source_mock.h" 39 | 40 | #include "mongo/db/modules/pmse/src/pmse_engine.h" 41 | #include "mongo/db/modules/pmse/src/pmse_record_store.h" 42 | 43 | #include 44 | #include 45 | #include 46 | #include 47 | #include 48 | 49 | namespace mongo { 50 | 51 | class PmseKVHarnessHelper : public KVHarnessHelper { 52 | public: 53 | PmseKVHarnessHelper() : _dbpath("psmem_0") { 54 | _engine.reset(new PmseEngine(_dbpath.path())); 55 | } 56 | 57 | virtual ~PmseKVHarnessHelper() { 58 | _engine.reset(NULL); 59 | } 60 | 61 | virtual KVEngine* restartEngine() { 62 | _engine.reset(NULL); 63 | _engine.reset(new PmseEngine(_dbpath.path() + "/")); 64 | return _engine.get(); 65 | } 66 | 67 | virtual KVEngine* getEngine() { 68 | return _engine.get(); 69 | } 70 | 71 | private: 72 | const std::unique_ptr _cs = stdx::make_unique(); 73 | unittest::TempDir _dbpath; 74 | std::unique_ptr _engine; 75 | }; 76 | 77 | std::unique_ptr makeHelper() { 78 | return stdx::make_unique(); 79 | } 80 | 81 | MONGO_INITIALIZER(RegisterKVHarnessFactory)(InitializerContext*) { 82 | KVHarnessHelper::registerFactory(makeHelper); 83 | return Status::OK(); 84 | } 85 | 86 | } // namespace mongo 87 | -------------------------------------------------------------------------------- /src/pmse_index_cursor.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #ifndef SRC_PMSE_INDEX_CURSOR_H_ 34 | #define SRC_PMSE_INDEX_CURSOR_H_ 35 | 36 | #include "mongo/db/storage/sorted_data_interface.h" 37 | #include "mongo/db/storage/key_string.h" 38 | 39 | #include "pmse_tree.h" 40 | 41 | #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage 42 | 43 | using namespace pmem::obj; 44 | 45 | namespace mongo { 46 | 47 | class PmseCursor final : public SortedDataInterface::Cursor { 48 | public: 49 | PmseCursor(OperationContext* txn, bool isForward, 50 | persistent_ptr tree, const BSONObj& ordering, 51 | const bool unique); 52 | 53 | void setEndPosition(const BSONObj& key, bool inclusive); 54 | 55 | virtual boost::optional next(RequestedInfo parts); 56 | 57 | boost::optional seek(const BSONObj& key, bool inclusive, 58 | RequestedInfo parts); 59 | 60 | boost::optional seek(const IndexSeekPoint& seekPoint, 61 | RequestedInfo parts); 62 | 63 | boost::optional seekExact(const BSONObj& key, 64 | RequestedInfo parts); 65 | 66 | void save(); 67 | 68 | void saveUnpositioned(); 69 | 70 | void restore(); 71 | 72 | void detachFromOperationContext(); 73 | 74 | void reattachToOperationContext(OperationContext* opCtx); 75 | 76 | private: 77 | boost::optional seekInTree(IndexKeyEntry& key, 78 | KeyString::Discriminator discriminator, 79 | RequestedInfo parts); 80 | bool hasFieldNames(const BSONObj& obj) { 81 | BSONForEach(e, obj) { 82 | if (e.fieldName()[0]) 83 | return true; 84 | } 85 | return false; 86 | } 87 | 88 | BSONObj stripFieldNames(const BSONObj& query) { 89 | if (!hasFieldNames(query)) 90 | return query; 91 | 92 | BSONObjBuilder bb; 93 | BSONForEach(e, query) { 94 | bb.appendAs(e, StringData()); 95 | } 96 | return bb.obj(); 97 | } 98 | void locate(const BSONObj& key, const RecordId& loc, std::list& locks); 99 | void unlockTree(std::list& locks); 100 | void seekEndCursor(); 101 | bool lower_bound(IndexKeyEntry entry, CursorObject& cursor, std::list& locks); 102 | void moveToNext(std::list& locks); 103 | bool atOrPastEndPointAfterSeeking(); 104 | bool atEndPoint(); 105 | const bool _forward; 106 | const BSONObj& _ordering; 107 | persistent_ptr _first; 108 | persistent_ptr _last; 109 | persistent_ptr _tree; 110 | bool _isEOF = true; 111 | /* 112 | * Cursor used for iterating with next until "_endPosition" 113 | */ 114 | boost::optional _endPosition; 115 | CursorObject _cursor; 116 | 117 | struct EndState { 118 | EndState(BSONObj key, RecordId loc) : query(std::move(key), loc) {} 119 | IndexKeyEntry query; 120 | }; 121 | boost::optional _endState; 122 | BSONObj _cursorKey; 123 | int64_t _cursorId; 124 | bool _locateFoundDataEnd; 125 | bool _eofRestore; 126 | }; 127 | } // namespace mongo 128 | 129 | #endif // SRC_PMSE_INDEX_CURSOR_H_ 130 | -------------------------------------------------------------------------------- /src/pmse_init.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage 34 | 35 | #include "pmse_engine.h" 36 | 37 | #include 38 | 39 | #include "mongo/base/init.h" 40 | #include "mongo/db/service_context_d.h" 41 | #include "mongo/db/service_context.h" 42 | #include "mongo/db/storage/devnull/devnull_kv_engine.h" 43 | #include "mongo/db/storage/kv/kv_storage_engine.h" 44 | #include "mongo/db/storage/storage_options.h" 45 | #include "mongo/util/log.h" 46 | 47 | namespace mongo { 48 | 49 | namespace { 50 | const std::string storeName = "pmse"; 51 | } 52 | 53 | namespace { 54 | 55 | class PmseEngineFactory : public StorageEngine::Factory { 56 | public: 57 | virtual StorageEngine* create(const StorageGlobalParams& params, 58 | const StorageEngineLockFile* lockFile) const { 59 | KVStorageEngineOptions options; 60 | options.directoryPerDB = params.directoryperdb; 61 | options.forRepair = params.repair; 62 | log() << params.dbpath; 63 | return new KVStorageEngine(new PmseEngine(params.dbpath), options); 64 | } 65 | 66 | virtual StringData getCanonicalName() const { 67 | return storeName; 68 | } 69 | 70 | virtual Status validateMetadata(const StorageEngineMetadata& metadata, 71 | const StorageGlobalParams& params) const { 72 | // TODO( ): Implement validateMetadata 73 | return Status::OK(); 74 | } 75 | 76 | virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const { 77 | // TODO( ): Implement createMetadataOptions 78 | return BSONObj(); 79 | } 80 | }; 81 | } // namespace 82 | MONGO_INITIALIZER_WITH_PREREQUISITES(PMStoreEngineInit, ("SetGlobalEnvironment")) 83 | (InitializerContext* context) { 84 | getGlobalServiceContext()->registerStorageEngine(storeName.c_str(), 85 | new PmseEngineFactory()); 86 | return Status::OK(); 87 | } 88 | } // namespace mongo 89 | -------------------------------------------------------------------------------- /src/pmse_init_test.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #include "mongo/platform/basic.h" 34 | #include "mongo/db/json.h" 35 | #include "mongo/db/modules/pmse/src/pmse_record_store.h" 36 | #include "mongo/db/service_context.h" 37 | #include "mongo/db/storage/storage_engine_metadata.h" 38 | #include "mongo/db/storage/storage_options.h" 39 | #include "mongo/unittest/unittest.h" 40 | #include "mongo/util/mongoutils/str.h" 41 | 42 | namespace mongo { 43 | 44 | class PmseEngineFactoryTest : public mongo::unittest::Test { 45 | private: 46 | virtual void setUp() { 47 | ServiceContext* globalEnv = getGlobalServiceContext(); 48 | ASSERT_TRUE(globalEnv); 49 | ASSERT_TRUE(getGlobalServiceContext()->isRegisteredStorageEngine("pmse")); 50 | std::unique_ptr sfi( 51 | getGlobalServiceContext()->makeStorageFactoriesIterator()); 52 | ASSERT_TRUE(sfi); 53 | bool found = false; 54 | while (sfi->more()) { 55 | const StorageEngine::Factory* currentFactory = sfi->next(); 56 | if (currentFactory->getCanonicalName() == "pmse") { 57 | found = true; 58 | factory = currentFactory; 59 | break; 60 | } 61 | found = true; 62 | } 63 | ASSERT_TRUE(found); 64 | } 65 | 66 | virtual void tearDown() { 67 | factory = NULL; 68 | } 69 | 70 | protected: 71 | const StorageEngine::Factory* factory; 72 | }; 73 | 74 | void _testValidateMetadata(const StorageEngine::Factory* factory, 75 | const BSONObj& metadataOptions, bool directoryPerDB, 76 | bool directoryForIndexes, 77 | ErrorCodes::Error expectedCode) { 78 | // It is fine to specify an invalid data directory for the metadata 79 | // as long as we do not invoke read() or write(). 80 | StorageEngineMetadata metadata("no_such_directory"); 81 | metadata.setStorageEngineOptions(metadataOptions); 82 | 83 | StorageGlobalParams storageOptions; 84 | storageOptions.directoryperdb = directoryPerDB; 85 | 86 | Status status = factory->validateMetadata(metadata, storageOptions); 87 | if (expectedCode != status.code()) { 88 | FAIL(str::stream() << "Unexpected StorageEngine::Factory::validateMetadata " 89 | "result. Expected: " 90 | << ErrorCodes::errorString(expectedCode) << " but got " 91 | << status.toString() 92 | << " instead. metadataOptions: " << metadataOptions 93 | << "; directoryPerDB: " << directoryPerDB); 94 | } 95 | } 96 | 97 | // Do not validate fields that are not present in metadata. 98 | TEST_F(PmseEngineFactoryTest, ValidateMetadataEmptyOptions) { 99 | _testValidateMetadata(factory, BSONObj(), false, false, ErrorCodes::OK); 100 | _testValidateMetadata(factory, BSONObj(), false, true, ErrorCodes::OK); 101 | _testValidateMetadata(factory, BSONObj(), true, false, ErrorCodes::OK); 102 | _testValidateMetadata(factory, BSONObj(), false, false, ErrorCodes::OK); 103 | } 104 | 105 | TEST_F(PmseEngineFactoryTest, ValidateMetadataDirectoryPerDB) { 106 | _testValidateMetadata(factory, fromjson("{directoryPerDB: 123}"), false, 107 | false, ErrorCodes::FailedToParse); 108 | _testValidateMetadata(factory, fromjson("{directoryPerDB: false}"), false, 109 | false, ErrorCodes::OK); 110 | _testValidateMetadata(factory, fromjson("{directoryPerDB: false}"), true, 111 | false, ErrorCodes::InvalidOptions); 112 | _testValidateMetadata(factory, fromjson("{directoryPerDB: true}"), false, 113 | false, ErrorCodes::InvalidOptions); 114 | _testValidateMetadata(factory, fromjson("{directoryPerDB: true}"), true, 115 | false, ErrorCodes::OK); 116 | } 117 | 118 | void _testCreateMetadataOptions(const StorageEngine::Factory* factory, 119 | bool directoryPerDB, bool directoryForIndexes) { 120 | StorageGlobalParams storageOptions; 121 | storageOptions.directoryperdb = directoryPerDB; 122 | 123 | BSONObj metadataOptions = factory->createMetadataOptions(storageOptions); 124 | 125 | BSONElement directoryPerDBElement = 126 | metadataOptions.getField("directoryPerDB"); 127 | ASSERT_TRUE(directoryPerDBElement.isBoolean()); 128 | ASSERT_EQUALS(directoryPerDB, directoryPerDBElement.boolean()); 129 | } 130 | 131 | TEST_F(PmseEngineFactoryTest, CreateMetadataOptions) { 132 | _testCreateMetadataOptions(factory, false, false); 133 | _testCreateMetadataOptions(factory, false, true); 134 | _testCreateMetadataOptions(factory, true, false); 135 | _testCreateMetadataOptions(factory, true, true); 136 | } 137 | 138 | } // namespace mongo 139 | -------------------------------------------------------------------------------- /src/pmse_list.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage 34 | 35 | #include "pmse_list.h" 36 | 37 | #include 38 | #include 39 | #include 40 | 41 | #include "mongo/stdx/mutex.h" 42 | 43 | namespace mongo { 44 | 45 | void PmseList::insertKV(const char key[], const char value[]) { 46 | stdx::lock_guard lock(_pmutex); 47 | transaction::exec_tx(pool_obj, [this, key, value] { 48 | persistent_ptr pair; 49 | try { 50 | pair = make_persistent(); 51 | } catch (std::exception &e) { 52 | std::cout << "Can't allocate memory in pmstore_list" << std::endl; 53 | } 54 | struct _values temp; 55 | strcpy(temp.id, key); 56 | strcpy(temp.value, value); 57 | pair->kv = temp; 58 | pair->next = nullptr; 59 | if (head != nullptr) { 60 | tail->next = pair; 61 | tail = pair; 62 | } else { 63 | head = pair; 64 | tail = head; 65 | } 66 | }); 67 | } 68 | 69 | void PmseList::deleteKV(const char key[]) { 70 | stdx::lock_guard lock(_pmutex); 71 | auto before = head; 72 | for (auto rec = head; rec != nullptr; rec = rec->next) { 73 | if (strcmp(rec->kv.get_ro().id, key) == 0) { 74 | if (before != head) { 75 | before->next = rec->next; 76 | if (before->next == nullptr) { 77 | tail = before; 78 | } 79 | before.flush(); 80 | } else { 81 | if (head == rec) { 82 | head = rec->next; 83 | } else { 84 | before->next = rec->next; 85 | } 86 | if (head == nullptr) { 87 | tail = head; 88 | } 89 | } 90 | pmemobj_free(rec.raw_ptr()); 91 | break; 92 | } else { 93 | before = rec; 94 | } 95 | } 96 | } 97 | 98 | bool PmseList::hasKey(const char key[]) { 99 | for (auto rec = head; rec != nullptr; rec = rec->next) { 100 | if (strcmp(rec->kv.get_ro().id, key) == 0) { 101 | return true; 102 | } 103 | } 104 | return false; 105 | } 106 | 107 | std::vector PmseList::getKeys() { 108 | stdx::lock_guard lock(_pmutex); 109 | std::vector names; 110 | for (auto rec = head; rec != nullptr; rec = rec->next) { 111 | names.push_back(rec->kv.get_ro().id); 112 | } 113 | return names; 114 | } 115 | 116 | const char* PmseList::find(const char key[], bool &status) { 117 | stdx::lock_guard lock(_pmutex); 118 | for (auto rec = head; rec != nullptr; rec = rec->next) { 119 | if (strcmp(rec->kv.get_ro().id, key) == 0) { 120 | status = true; 121 | return rec->kv.get_ro().value; 122 | } 123 | } 124 | status = false; 125 | return ""; 126 | } 127 | 128 | void PmseList::update(const char key[], const char value[]) { 129 | stdx::lock_guard lock(_pmutex); 130 | for (auto rec = head; rec != nullptr; rec = rec->next) { 131 | if (strcmp(rec->kv.get_ro().id, key) == 0) { 132 | struct _values temp; 133 | strcpy(temp.id, key); 134 | strcpy(temp.value, value); 135 | rec->kv = temp; 136 | return; 137 | } 138 | } 139 | } 140 | 141 | void PmseList::clear() { 142 | stdx::lock_guard lock(_pmutex); 143 | if (!head) 144 | return; 145 | transaction::exec_tx(pool_obj, [this] { 146 | for (auto rec = head; rec != nullptr; rec = rec->next) { 147 | auto temp = rec->next; 148 | pmemobj_tx_free(rec.raw()); 149 | rec = temp; 150 | } 151 | head = nullptr; 152 | }); 153 | } 154 | 155 | void PmseList::setPool(pool pool_obj) { 156 | this->pool_obj = pool_obj; 157 | } 158 | 159 | bool PmseList::isAfterSafeShutdown() { 160 | return _afterSafeShutdown; 161 | } 162 | 163 | void PmseList::resetState() { 164 | transaction::exec_tx(pool_obj, [this] { 165 | _afterSafeShutdown = false; 166 | }); 167 | } 168 | 169 | void PmseList::safeShutdown() { 170 | transaction::exec_tx(pool_obj, [this] { 171 | _afterSafeShutdown = true; 172 | }); 173 | } 174 | 175 | } // namespace mongo 176 | -------------------------------------------------------------------------------- /src/pmse_list.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #ifndef SRC_PMSE_LIST_H_ 34 | #define SRC_PMSE_LIST_H_ 35 | 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | 44 | #include 45 | #include 46 | 47 | using namespace pmem::obj; 48 | 49 | namespace mongo { 50 | 51 | struct _values { 52 | char id[256]; 53 | char value[256]; 54 | }; 55 | struct ListRoot; 56 | 57 | class PmseList { 58 | public: 59 | struct _pair { 60 | p kv; 61 | persistent_ptr<_pair> next; 62 | }; 63 | typedef struct _pair KVPair; 64 | explicit PmseList(pool obj) : _afterSafeShutdown(true), pool_obj(obj) {} 65 | PmseList() = delete; 66 | ~PmseList() = default; 67 | void insertKV(const char key[], const char value[]); 68 | void deleteKV(const char key[]); 69 | void update(const char key[], const char value[]); 70 | bool hasKey(const char key[]); 71 | std::vector getKeys(); 72 | const char* find(const char key[], bool &status); 73 | void clear(); 74 | void setPool(pool pool_obj); 75 | bool isAfterSafeShutdown(); 76 | void safeShutdown(); 77 | void resetState(); 78 | private: 79 | p _afterSafeShutdown = true; 80 | p counter; 81 | persistent_ptr head; 82 | persistent_ptr tail; 83 | pool pool_obj; 84 | pmem::obj::mutex _pmutex; 85 | }; 86 | 87 | struct ListRoot { 88 | persistent_ptr list_root_ptr; 89 | }; 90 | 91 | } // namespace mongo 92 | #endif // SRC_PMSE_LIST_H_ 93 | -------------------------------------------------------------------------------- /src/pmse_list_int_ptr.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage 34 | 35 | #include "pmse_change.h" 36 | #include "pmse_list_int_ptr.h" 37 | 38 | #include "mongo/db/storage/recovery_unit.h" 39 | 40 | #include "mongo/util/log.h" 41 | 42 | namespace mongo { 43 | 44 | PmseListIntPtr::PmseListIntPtr() : _counter(1) { 45 | _pop = pool_by_vptr(this); 46 | } 47 | 48 | PmseListIntPtr::~PmseListIntPtr() { 49 | } 50 | 51 | void PmseListIntPtr::setPool() { 52 | _pop = pool_by_vptr(this); 53 | } 54 | 55 | uint64_t PmseListIntPtr::size() { 56 | return _size; 57 | } 58 | 59 | void PmseListIntPtr::insertKV(const persistent_ptr &key, 60 | const persistent_ptr &value, bool insertToFront) { 61 | if (insertToFront) { 62 | key->ptr = value; 63 | key->next = nullptr; 64 | if (_head != nullptr) { 65 | key->next = _head; 66 | _head = key; 67 | } else { 68 | _head = key; 69 | _tail = _head; 70 | } 71 | _size++; 72 | } else { 73 | key->ptr = value; 74 | key->next = nullptr; 75 | if (_head != nullptr) { 76 | _tail->next = key; 77 | _tail = key; 78 | } else { 79 | _head = key; 80 | _tail = _head; 81 | } 82 | _size++; 83 | } 84 | _dataSize += value->size; 85 | } 86 | 87 | int64_t PmseListIntPtr::deleteKV(uint64_t key, 88 | persistent_ptr &deleted, 89 | OperationContext* txn) { 90 | auto before = _head; 91 | int64_t sizeFreed = 0; 92 | for (auto rec = _head; rec != nullptr; rec = rec->next) { 93 | if (rec->idValue == key) { 94 | transaction::exec_tx(_pop, [this, &deleted, &before, 95 | &sizeFreed, &rec, &txn] { 96 | if (before != _head) { 97 | before->next = rec->next; 98 | if (before->next == nullptr) 99 | _tail = before; 100 | before.flush(); 101 | } else { 102 | if (_head == rec) { 103 | _head = rec->next; 104 | } else { 105 | before->next = rec->next; 106 | if (rec->next != nullptr) { 107 | _tail = rec->next; 108 | } else { 109 | _tail = before; 110 | } 111 | } 112 | if (_head == nullptr) { 113 | _tail = _head; 114 | } 115 | } 116 | _size--; 117 | deleted = rec; 118 | if (txn) { 119 | auto rd = RecordData(deleted->ptr->data, deleted->ptr->size); 120 | txn->recoveryUnit()->registerChange(new RemoveChange(_pop, (deleted->ptr).get(), rd.size())); 121 | } 122 | _dataSize -= deleted->ptr->size; 123 | sizeFreed = pmemobj_alloc_usable_size(deleted->ptr.raw()); 124 | delete_persistent(deleted->ptr); 125 | }); 126 | break; 127 | } else { 128 | before = rec; 129 | } 130 | } 131 | return sizeFreed; 132 | } 133 | 134 | bool PmseListIntPtr::hasKey(uint64_t key) { 135 | for (auto rec = _head; rec != nullptr; rec = rec->next) { 136 | if (rec->idValue == key) { 137 | return true; 138 | } 139 | } 140 | return false; 141 | } 142 | 143 | bool PmseListIntPtr::find(uint64_t key, persistent_ptr *item_ptr) { 144 | for (auto rec = _head; rec != nullptr; rec = rec->next) { 145 | if (rec->idValue == key) { 146 | *item_ptr = rec->ptr; 147 | return true; 148 | } 149 | } 150 | *item_ptr = nullptr; 151 | return false; 152 | } 153 | 154 | bool PmseListIntPtr::getPair(uint64_t key, persistent_ptr *item_ptr) { 155 | for (auto rec = _head; rec != nullptr; rec = rec->next) { 156 | if (rec->idValue == key) { 157 | *item_ptr = rec; 158 | return true; 159 | } 160 | } 161 | *item_ptr = nullptr; 162 | return false; 163 | } 164 | 165 | void PmseListIntPtr::update(uint64_t key, 166 | const persistent_ptr &value, OperationContext* txn) { 167 | for (auto rec = _head; rec != nullptr; rec = rec->next) { 168 | if (rec->idValue == key) { 169 | if (rec->ptr != nullptr) { 170 | if (txn) { 171 | txn->recoveryUnit()->registerChange(new UpdateChange(_pop, key, (rec->ptr).get(), rec->ptr->size)); 172 | } 173 | try { 174 | transaction::exec_tx(_pop, [&rec] { 175 | delete_persistent(rec->ptr); 176 | }); 177 | } catch(std::exception &e) { 178 | log() << e.what(); 179 | } 180 | } 181 | rec->ptr = value; 182 | return; 183 | } 184 | } 185 | } 186 | 187 | void PmseListIntPtr::clear(OperationContext* txn, PmseMap *_mapper) { 188 | if (!_head) 189 | return; 190 | transaction::exec_tx(_pop, [this, txn, _mapper] { 191 | for (auto rec = _head; rec != nullptr;) { 192 | if (txn) 193 | txn->recoveryUnit()->registerChange(new TruncateChange(_pop, _mapper, RecordId(rec->idValue), 194 | rec->ptr.get(), rec->ptr->size)); 195 | auto temp = rec->next; 196 | delete_persistent(rec); 197 | rec = temp; 198 | } 199 | _head = nullptr; 200 | _size = 0; 201 | }); 202 | } 203 | 204 | uint64_t PmseListIntPtr::getNextId() { 205 | return _counter++; 206 | } 207 | 208 | uint64_t PmseListIntPtr::getDataSize() { 209 | return _dataSize; 210 | } 211 | 212 | } // namespace mongo 213 | -------------------------------------------------------------------------------- /src/pmse_list_int_ptr.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #ifndef SRC_PMSE_LIST_INT_PTR_H_ 34 | #define SRC_PMSE_LIST_INT_PTR_H_ 35 | 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | 44 | #include 45 | 46 | #include "mongo/db/operation_context.h" 47 | 48 | using namespace pmem::obj; 49 | 50 | namespace mongo { 51 | struct InitData { 52 | uint64_t size; 53 | char data[]; 54 | }; 55 | 56 | struct _pair { 57 | p idValue; 58 | persistent_ptr ptr; 59 | persistent_ptr<_pair> next; 60 | p position; 61 | p isDeleted; 62 | }; 63 | 64 | template 65 | class PmseMap; 66 | 67 | typedef struct _pair KVPair; 68 | 69 | class PmseListIntPtr { 70 | template 71 | friend class PmseMap; 72 | public: 73 | PmseListIntPtr(); 74 | ~PmseListIntPtr(); 75 | void insertKV(const persistent_ptr &key, 76 | const persistent_ptr &value, bool insertToFront = false); 77 | bool find(uint64_t key, persistent_ptr *item_ptr); 78 | bool getPair(uint64_t key, persistent_ptr *item_ptr); 79 | void update(uint64_t key, const persistent_ptr &value, OperationContext* txn); 80 | int64_t deleteKV(uint64_t key, persistent_ptr &deleted, OperationContext* txn); 81 | bool hasKey(uint64_t key); 82 | void clear(OperationContext* txn, PmseMap *_mapper); 83 | void setPool(); 84 | uint64_t size(); 85 | uint64_t getNextId(); 86 | uint64_t getDataSize(); 87 | 88 | private: 89 | persistent_ptr getHead() { 90 | return _head; 91 | } 92 | persistent_ptr _head; 93 | persistent_ptr _tail; 94 | persistent_ptr _deleted; 95 | p _counter; 96 | p _dataSize; 97 | p _size; 98 | pool_base _pop; 99 | }; 100 | } // namespace mongo 101 | #endif // SRC_PMSE_LIST_INT_PTR_H_ 102 | -------------------------------------------------------------------------------- /src/pmse_record_store.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #ifndef SRC_PMSE_RECORD_STORE_H_ 34 | #define SRC_PMSE_RECORD_STORE_H_ 35 | 36 | #include "pmse_map.h" 37 | 38 | #include 39 | #include 40 | #include 41 | 42 | #include 43 | #include 44 | #include 45 | 46 | #include "mongo/platform/basic.h" 47 | #include "mongo/db/catalog/collection_options.h" 48 | #include "mongo/db/storage/capped_callback.h" 49 | #include "mongo/db/storage/record_store.h" 50 | #include "mongo/stdx/memory.h" 51 | 52 | namespace mongo { 53 | 54 | namespace { 55 | const std::string storeName = "pmse"; 56 | const uint64_t baseSize = 20480; 57 | } 58 | 59 | class PmseRecordCursor final : public SeekableRecordCursor { 60 | public: 61 | PmseRecordCursor(persistent_ptr> mapper, bool forward); 62 | 63 | boost::optional next(); 64 | 65 | boost::optional seekExact(const RecordId& id) final; 66 | 67 | void save() final; 68 | 69 | bool restore() final; 70 | 71 | void detachFromOperationContext() final {} 72 | 73 | void reattachToOperationContext(OperationContext* txn) final {} 74 | 75 | void saveUnpositioned(); 76 | 77 | private: 78 | void moveToNext(bool inNext = true); 79 | void moveToLast(); 80 | void moveBackward(); 81 | bool checkPosition(); 82 | 83 | persistent_ptr> _mapper; 84 | persistent_ptr _before; 85 | persistent_ptr _cur; 86 | p _eof = false; 87 | p _isCapped; 88 | p _forward; 89 | p _lastMoveWasRestore; 90 | p _positionCheck; 91 | p _actualListNumber = -1; 92 | p _position; 93 | }; 94 | 95 | class PmseRecordStore : public RecordStore { 96 | public: 97 | PmseRecordStore(StringData ns, StringData ident, 98 | const CollectionOptions& options, 99 | StringData dbpath, 100 | std::map *pool_handler, 101 | bool recoveryNeeded = false); 102 | 103 | ~PmseRecordStore() { 104 | _mapper->storeCounters(); 105 | } 106 | 107 | virtual const char* name() const { 108 | return storeName.c_str(); 109 | } 110 | 111 | virtual void setCappedCallback(CappedCallback* cb); 112 | 113 | virtual long long dataSize(OperationContext* txn) const { 114 | return _mapper->dataSize(); 115 | } 116 | 117 | virtual long long numRecords(OperationContext* txn) const { 118 | return (int64_t)_mapper->fillment(); 119 | } 120 | 121 | virtual bool isCapped() const { 122 | return _options.capped; 123 | } 124 | 125 | virtual int64_t storageSize(OperationContext* txn, 126 | BSONObjBuilder* extraInfo = NULL, 127 | int infoLevel = 0) const { 128 | return _storageSize; 129 | } 130 | 131 | virtual bool findRecord(OperationContext* txn, const RecordId& loc, 132 | RecordData* rd) const; 133 | 134 | virtual void deleteRecord(OperationContext* txn, const RecordId& dl); 135 | 136 | virtual StatusWith insertRecord(OperationContext* txn, 137 | const char* data, 138 | int len, 139 | Timestamp timestamp, 140 | bool enforceQuota); 141 | 142 | virtual Status insertRecordsWithDocWriter(OperationContext* txn, 143 | const DocWriter* const* docs, 144 | const Timestamp* timestamps, 145 | size_t nDocs, 146 | RecordId* idsOut = nullptr); 147 | 148 | virtual void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const; 149 | 150 | virtual Status updateRecord(OperationContext* txn, 151 | const RecordId& oldLocation, 152 | const char* data, int len, 153 | bool enforceQuota, 154 | UpdateNotifier* notifier); 155 | 156 | virtual bool updateWithDamagesSupported() const { 157 | return false; 158 | } 159 | 160 | virtual StatusWith updateWithDamages( 161 | OperationContext* txn, const RecordId& loc, 162 | const RecordData& oldRec, const char* damageSource, 163 | const mutablebson::DamageVector& damages) { 164 | invariant(false); 165 | } 166 | 167 | std::unique_ptr getCursor(OperationContext* txn, 168 | bool forward) const final { 169 | return stdx::make_unique(_mapper, forward); 170 | } 171 | 172 | virtual Status truncate(OperationContext* txn) { 173 | if (!_mapper->truncate(txn)) { 174 | return Status(ErrorCodes::OperationFailed, "Truncate error"); 175 | } 176 | return Status::OK(); 177 | } 178 | 179 | virtual void cappedTruncateAfter(OperationContext* txn, RecordId end, 180 | bool inclusive); 181 | 182 | virtual Status validate(OperationContext* txn, bool full, bool scanData, 183 | ValidateAdaptor* adaptor, ValidateResults* results, 184 | BSONObjBuilder* output) { 185 | return Status::OK(); 186 | } 187 | 188 | virtual void appendCustomStats(OperationContext* txn, 189 | BSONObjBuilder* result, double scale) const { 190 | if (_mapper->isCapped()) { 191 | result->appendNumber("capped", true); 192 | result->appendNumber("maxSize", floor(_mapper->getMax() / scale)); 193 | result->appendNumber("max", _mapper->getMaxSize()); 194 | } else { 195 | result->appendNumber("capped", false); 196 | } 197 | result->appendNumber("numInserts", _mapper->fillment()); 198 | } 199 | 200 | virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const { 201 | return Status::OK(); 202 | } 203 | 204 | virtual void updateStatsAfterRepair(OperationContext* txn, 205 | long long numRecords, 206 | long long dataSize) { 207 | } 208 | 209 | /** 210 | * @return OK if the validate run successfully 211 | * OK will be returned even if corruption is found 212 | * deatils will be in result 213 | */ 214 | virtual Status validate(OperationContext* txn, 215 | ValidateCmdLevel level, 216 | ValidateAdaptor* adaptor, 217 | ValidateResults* results, 218 | BSONObjBuilder* output); 219 | 220 | private: 221 | void deleteCappedAsNeeded(OperationContext* txn); 222 | static bool isSystemCollection(const StringData& ns); 223 | CappedCallback* _cappedCallback; 224 | int64_t _storageSize = baseSize; 225 | CollectionOptions _options; 226 | const StringData _dbPath; 227 | pool _mapPool; 228 | persistent_ptr> _mapper; 229 | }; 230 | } // namespace mongo 231 | #endif // SRC_PMSE_RECORD_STORE_H_ 232 | -------------------------------------------------------------------------------- /src/pmse_recovery_unit.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage 34 | 35 | #include "mongo/util/log.h" 36 | 37 | #include "pmse_recovery_unit.h" 38 | 39 | namespace mongo { 40 | 41 | void PmseRecoveryUnit::commitUnitOfWork() { 42 | try { 43 | auto end = _changes.end(); 44 | for (auto it = _changes.begin(); it != end; ++it) { 45 | (*it)->commit(); 46 | } 47 | _changes.clear(); 48 | } catch (...) { 49 | throw; 50 | } 51 | } 52 | 53 | void PmseRecoveryUnit::abortUnitOfWork() { 54 | try { 55 | auto end = _changes.rend(); 56 | for (auto it = _changes.rbegin(); it != end; ++it) { 57 | (*it)->rollback(); 58 | } 59 | _changes.clear(); 60 | } catch (...) { 61 | throw; 62 | } 63 | } 64 | void PmseRecoveryUnit::beginUnitOfWork(OperationContext* opCtx) {} 65 | 66 | bool PmseRecoveryUnit::waitUntilDurable() { 67 | return true; 68 | } 69 | 70 | void PmseRecoveryUnit::abandonSnapshot() {} 71 | 72 | SnapshotId PmseRecoveryUnit::getSnapshotId() const { 73 | return SnapshotId(); 74 | } 75 | 76 | void PmseRecoveryUnit::registerChange(Change* change) { 77 | _changes.push_back(ChangePtr(change)); 78 | } 79 | 80 | void* PmseRecoveryUnit::writingPtr(void* data, size_t len) { 81 | return nullptr; 82 | } 83 | 84 | void PmseRecoveryUnit::setRollbackWritesDisabled() {} 85 | 86 | } // namespace mongo 87 | -------------------------------------------------------------------------------- /src/pmse_recovery_unit.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #ifndef SRC_PMSE_RECOVERY_UNIT_H_ 34 | #define SRC_PMSE_RECOVERY_UNIT_H_ 35 | 36 | #include 37 | 38 | #include "mongo/db/storage/recovery_unit.h" 39 | 40 | namespace mongo { 41 | 42 | class PmseRecoveryUnit : public RecoveryUnit { 43 | public: 44 | PmseRecoveryUnit() = default; 45 | 46 | virtual void beginUnitOfWork(OperationContext* opCtx); 47 | 48 | virtual void commitUnitOfWork(); 49 | 50 | virtual void abortUnitOfWork(); 51 | 52 | virtual bool waitUntilDurable(); 53 | 54 | virtual void abandonSnapshot(); 55 | 56 | virtual SnapshotId getSnapshotId() const; 57 | 58 | virtual void registerChange(Change* change); 59 | 60 | virtual void* writingPtr(void* data, size_t len); 61 | 62 | virtual void setRollbackWritesDisabled(); 63 | 64 | private: 65 | typedef std::shared_ptr ChangePtr; 66 | typedef std::vector Changes; 67 | Changes _changes; 68 | }; 69 | 70 | } // namespace mongo 71 | #endif // SRC_PMSE_RECOVERY_UNIT_H_ 72 | -------------------------------------------------------------------------------- /src/pmse_sorted_data_interface.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage 34 | 35 | #include "pmse_change.h" 36 | #include "pmse_index_cursor.h" 37 | #include "pmse_sorted_data_interface.h" 38 | 39 | #include 40 | #include 41 | #include 42 | #include 43 | 44 | #include 45 | #include 46 | #include 47 | 48 | #include "mongo/util/log.h" 49 | 50 | namespace mongo { 51 | 52 | const int TempKeyMaxSize = 1024; 53 | 54 | PmseSortedDataInterface::PmseSortedDataInterface(StringData ident, 55 | const IndexDescriptor* desc, 56 | StringData dbpath, 57 | std::map *pool_handler) 58 | : _dbpath(dbpath), _desc(*desc) { 59 | try { 60 | if (pool_handler->count(ident.toString()) > 0) { 61 | _pm_pool = pool((*pool_handler)[ident.toString()]); 62 | } else { 63 | std::string filepath = _dbpath.toString() + ident.toString(); 64 | if (desc->parentNS() == "local.startup_log" && 65 | boost::filesystem::exists(filepath)) { 66 | log() << "Delete old startup log"; 67 | boost::filesystem::remove_all(filepath); 68 | } 69 | if (!boost::filesystem::exists(filepath)) { 70 | _pm_pool = pool::create(filepath.c_str(), "pmse_index", 71 | (isSystemCollection(desc->parentNS()) ? 10 : 30) 72 | * PMEMOBJ_MIN_POOL, 0664); 73 | } else { 74 | _pm_pool = pool::open(filepath.c_str(), "pmse_index"); 75 | } 76 | pool_handler->insert(std::pair(ident.toString(), 77 | _pm_pool)); 78 | } 79 | _tree = _pm_pool.get_root(); 80 | } catch (std::exception &e) { 81 | log() << "Error handled: " << e.what(); 82 | throw Status(ErrorCodes::CannotCreateIndex, "Cannot create/open pool while creating index"); 83 | } 84 | } 85 | 86 | /* 87 | * Insert new (Key,RecordID) into Sorted Index into correct place. 88 | * Placement must be chosen basing on key value. * 89 | */ 90 | Status PmseSortedDataInterface::insert(OperationContext* txn, 91 | const BSONObj& key, const RecordId& loc, 92 | bool dupsAllowed) { 93 | BSONObj owned = key.getOwned(); 94 | Status status = Status::OK(); 95 | persistent_ptr obj; 96 | 97 | if (key.objsize() >= TempKeyMaxSize) { 98 | std::string msg = mongoutils::str::stream() 99 | << "PMSE::insert: key too large to index, failing " << ' ' 100 | << key.objsize() << ' ' << key; 101 | return Status(ErrorCodes::KeyTooLong, msg); 102 | } 103 | try { 104 | IndexKeyEntry entry(key.getOwned(), loc); 105 | status = _tree->insert(_pm_pool, entry, _desc.keyPattern(), dupsAllowed); 106 | if (status == Status::OK()) { 107 | txn->recoveryUnit()->registerChange(new InsertIndexChange(_tree, _pm_pool, key, loc, dupsAllowed, &_desc)); 108 | } 109 | } catch (std::exception &e) { 110 | log() << e.what(); 111 | } 112 | return status; 113 | } 114 | 115 | /* 116 | * Remove given record from Sorted Index * 117 | */ 118 | void PmseSortedDataInterface::unindex(OperationContext* txn, const BSONObj& key, 119 | const RecordId& loc, bool dupsAllowed) { 120 | bool status = true; 121 | IndexKeyEntry entry(key.getOwned(), loc); 122 | try { 123 | transaction::exec_tx(_pm_pool, [this, &entry, dupsAllowed, txn, &status] { 124 | status = _tree->remove(_pm_pool, entry, dupsAllowed, _desc.keyPattern()); 125 | }); 126 | if (status == true) { 127 | txn->recoveryUnit()->registerChange(new RemoveIndexChange(_tree, _pm_pool, key, loc, dupsAllowed, _desc.keyPattern())); 128 | } 129 | } catch (std::exception &e) { 130 | log() << e.what(); 131 | } 132 | } 133 | 134 | Status PmseSortedDataInterface::dupKeyCheck(OperationContext* txn, 135 | const BSONObj& key, 136 | const RecordId& loc) { 137 | return Status::OK(); 138 | } 139 | 140 | std::unique_ptr PmseSortedDataInterface::newCursor( 141 | OperationContext* txn, bool isForward) const { 142 | return stdx::make_unique (txn, isForward, _tree, 143 | _desc.keyPattern(), 144 | _desc.unique()); 145 | } 146 | 147 | class PmseSortedDataBuilderInterface : public SortedDataBuilderInterface { 148 | MONGO_DISALLOW_COPYING(PmseSortedDataBuilderInterface); 149 | public: 150 | PmseSortedDataBuilderInterface(OperationContext* txn, 151 | PmseSortedDataInterface* index, 152 | bool dupsAllowed) 153 | : _index(index), 154 | _txn(txn), 155 | _dupsAllowed(dupsAllowed) {} 156 | 157 | virtual Status addKey(const BSONObj& key, const RecordId& loc) { 158 | return _index->insert(_txn, key, loc, _dupsAllowed); 159 | } 160 | 161 | void commit(bool mayInterrupt) {} 162 | private: 163 | PmseSortedDataInterface* _index; 164 | OperationContext* _txn; 165 | bool _dupsAllowed; 166 | }; 167 | 168 | SortedDataBuilderInterface* PmseSortedDataInterface::getBulkBuilder( 169 | OperationContext* txn, bool dupsAllowed) { 170 | return new PmseSortedDataBuilderInterface(txn, this, dupsAllowed); 171 | } 172 | 173 | bool PmseSortedDataInterface::isSystemCollection(const StringData& ns) { 174 | return ns.toString() == "local.startup_log" || 175 | ns.toString() == "admin.system.version" || 176 | ns.toString() == "_mdb_catalog"; 177 | } 178 | 179 | } // namespace mongo 180 | -------------------------------------------------------------------------------- /src/pmse_sorted_data_interface.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #ifndef SRC_PMSE_SORTED_DATA_INTERFACE_H_ 34 | #define SRC_PMSE_SORTED_DATA_INTERFACE_H_ 35 | 36 | #include "pmse_tree.h" 37 | 38 | #include 39 | #include 40 | #include 41 | #include 42 | 43 | #include 44 | #include 45 | 46 | #include "mongo/db/storage/sorted_data_interface.h" 47 | #include "mongo/db/index/index_descriptor.h" 48 | #include "mongo/bson/bsonobj_comparator.h" 49 | 50 | namespace mongo { 51 | 52 | class PmseSortedDataInterface : public SortedDataInterface { 53 | public: 54 | PmseSortedDataInterface(StringData ident, const IndexDescriptor* desc, 55 | StringData dbpath, std::map *pool_handler); 57 | 58 | virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, 59 | bool dupsAllowed); 60 | 61 | virtual Status insert(OperationContext* txn, const BSONObj& key, 62 | const RecordId& loc, bool dupsAllowed); 63 | 64 | virtual void unindex(OperationContext* txn, const BSONObj& key, 65 | const RecordId& loc, bool dupsAllowed); 66 | 67 | virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, 68 | const RecordId& loc); 69 | 70 | virtual void fullValidate(OperationContext* txn, long long* numKeysOut, 71 | ValidateResults* fullResults) const { 72 | *numKeysOut = _tree->countElements(); 73 | // TODO(kfilipek): Implement fullValidate 74 | } 75 | 76 | virtual bool appendCustomStats(OperationContext* txn, 77 | BSONObjBuilder* output, double scale) const { 78 | // TODO(kfilipek): Implement appendCustomStats 79 | return false; 80 | } 81 | 82 | virtual long long getSpaceUsedBytes(OperationContext* txn) const { 83 | // TODO(kfilipek): Implement getSpaceUsedBytes 84 | return 0; 85 | } 86 | 87 | virtual bool isEmpty(OperationContext* txn) { 88 | return _tree->isEmpty(); 89 | } 90 | 91 | virtual Status initAsEmpty(OperationContext* txn) { 92 | return Status::OK(); 93 | } 94 | 95 | std::unique_ptr newCursor( 96 | OperationContext* txn, bool isForward) const; 97 | 98 | private: 99 | static bool isSystemCollection(const StringData& ns); 100 | StringData _dbpath; 101 | pool _pm_pool; 102 | persistent_ptr _tree; 103 | IndexDescriptor _desc; 104 | }; 105 | } // namespace mongo 106 | #endif // SRC_PMSE_SORTED_DATA_INTERFACE_H_ 107 | -------------------------------------------------------------------------------- /src/pmse_sorted_data_interface_test.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #include 34 | #include 35 | 36 | #include "mongo/platform/basic.h" 37 | #include "mongo/base/init.h" 38 | #include "mongo/base/string_data.h" 39 | #include "mongo/base/checked_cast.h" 40 | #include "mongo/bson/bsonobjbuilder.h" 41 | #include "mongo/db/catalog/index_catalog_entry.h" 42 | #include "mongo/db/index/index_descriptor.h" 43 | #include "mongo/db/json.h" 44 | #include "mongo/db/operation_context_noop.h" 45 | #include "mongo/db/storage/kv/kv_prefix.h" 46 | #include "mongo/db/storage/sorted_data_interface_test_harness.h" 47 | #include "mongo/db/storage/kv/kv_engine_test_harness.h" 48 | #include "mongo/db/storage/record_store_test_harness.h" 49 | #include "mongo/db/concurrency/write_conflict_exception.h" 50 | #include "mongo/stdx/memory.h" 51 | #include "mongo/unittest/temp_dir.h" 52 | #include "mongo/unittest/unittest.h" 53 | 54 | #include "mongo/db/modules/pmse/src/pmse_record_store.h" 55 | #include "mongo/db/modules/pmse/src/pmse_recovery_unit.h" 56 | #include "mongo/db/modules/pmse/src/pmse_sorted_data_interface.h" 57 | 58 | #include 59 | #include 60 | #include 61 | #include 62 | #include 63 | 64 | namespace mongo { 65 | 66 | class PmseSortedDataInterfaceHarnessHelper final 67 | : public SortedDataInterfaceHarnessHelper { 68 | public: 69 | PmseSortedDataInterfaceHarnessHelper() : _dbpath("psmem_0") { 70 | } 71 | 72 | ~PmseSortedDataInterfaceHarnessHelper() final { 73 | } 74 | 75 | std::unique_ptr newSortedDataInterface( 76 | bool unique) final { 77 | std::string ns = "test.pmse"; 78 | OperationContextNoop opCtx(newRecoveryUnit().release()); 79 | BSONObj spec; 80 | 81 | spec = BSON("key" << BSON("a" << 1) << "name" 82 | << "testIndex" 83 | << "ns" << ns << "unique" << unique); 84 | 85 | IndexDescriptor desc(NULL, "", spec); 86 | 87 | std::map pool_handler; 88 | 89 | return stdx::make_unique( 90 | "pool_test", &desc, _dbpath.path() + "/", &pool_handler); 91 | } 92 | 93 | std::unique_ptr newRecoveryUnit() final { 94 | return stdx::make_unique(); 95 | } 96 | 97 | private: 98 | unittest::TempDir _dbpath; 99 | }; 100 | 101 | std::unique_ptr makeHarnessHelper() { 102 | return stdx::make_unique(); 103 | } 104 | 105 | MONGO_INITIALIZER(RegisterHarnessFactory)(InitializerContext* const) { 106 | mongo::registerHarnessHelperFactory(makeHarnessHelper); 107 | return Status::OK(); 108 | } 109 | } // namespace mongo 110 | -------------------------------------------------------------------------------- /src/pmse_standard_record_store_test.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #include 34 | #include 35 | #include 36 | 37 | #include "mongo/platform/basic.h" 38 | #include "mongo/base/checked_cast.h" 39 | #include "mongo/base/init.h" 40 | #include "mongo/base/string_data.h" 41 | #include "mongo/bson/bsonobjbuilder.h" 42 | #include "mongo/db/catalog/collection_options.h" 43 | #include "mongo/db/concurrency/write_conflict_exception.h" 44 | #include "mongo/db/json.h" 45 | #include "mongo/db/operation_context_noop.h" 46 | #include "mongo/db/storage/kv/kv_engine_test_harness.h" 47 | #include "mongo/db/storage/kv/kv_prefix.h" 48 | #include "mongo/db/storage/record_store_test_harness.h" 49 | #include "mongo/stdx/memory.h" 50 | #include "mongo/unittest/temp_dir.h" 51 | #include "mongo/unittest/unittest.h" 52 | #include "mongo/util/clock_source_mock.h" 53 | #include "mongo/util/fail_point.h" 54 | #include "mongo/util/scopeguard.h" 55 | 56 | #include "mongo/db/modules/pmse/src/pmse_engine.h" 57 | #include "mongo/db/modules/pmse/src/pmse_record_store.h" 58 | #include "mongo/db/modules/pmse/src/pmse_recovery_unit.h" 59 | 60 | #include 61 | #include 62 | #include 63 | #include 64 | #include 65 | 66 | namespace mongo { 67 | 68 | using std::unique_ptr; 69 | using std::string; 70 | 71 | class PmseHarnessHelper final : public RecordStoreHarnessHelper { 72 | public: 73 | PmseHarnessHelper() : _dbpath("psmem_0"), _engine(_dbpath.path()) { 74 | } 75 | 76 | ~PmseHarnessHelper() { 77 | } 78 | 79 | virtual std::unique_ptr newNonCappedRecordStore() { 80 | return newNonCappedRecordStore("a.b"); 81 | } 82 | 83 | virtual std::unique_ptr newNonCappedRecordStore(const std::string& ns) { 84 | PmseRecoveryUnit* ru = 85 | dynamic_cast(_engine.newRecoveryUnit()); 86 | OperationContextNoop opCtx(ru); 87 | string uri = "table:" + ns; 88 | 89 | CollectionOptions options; 90 | options.capped = false; 91 | options.cappedSize = -1; 92 | options.cappedMaxDocs = -1; 93 | 94 | std::map pool_handler; 95 | auto ret = stdx::make_unique( 96 | ns, "pool_test", options, _dbpath.path() + "/", &pool_handler); 97 | 98 | return std::move(ret); 99 | } 100 | 101 | virtual std::unique_ptr newCappedRecordStore( 102 | int64_t cappedSizeBytes, int64_t cappedMaxDocs) final { 103 | return newCappedRecordStore("a.b", cappedSizeBytes, cappedMaxDocs); 104 | } 105 | 106 | virtual std::unique_ptr newCappedRecordStore( 107 | const std::string& ns, int64_t cappedMaxSize, int64_t cappedMaxDocs) { 108 | PmseRecoveryUnit* ru = 109 | dynamic_cast(_engine.newRecoveryUnit()); 110 | OperationContextNoop opCtx(ru); 111 | string uri = "table:a.b"; 112 | 113 | CollectionOptions options; 114 | options.capped = true; 115 | options.cappedSize = cappedMaxSize; 116 | options.cappedMaxDocs = cappedMaxDocs; 117 | 118 | std::map pool_handler; 119 | auto ret = stdx::make_unique( 120 | ns, "pool_test", options, _dbpath.path() + "/", &pool_handler); 121 | 122 | return std::move(ret); 123 | } 124 | 125 | virtual std::unique_ptr newRecoveryUnit() final { 126 | return std::unique_ptr(_engine.newRecoveryUnit()); 127 | } 128 | 129 | virtual bool supportsDocLocking() final { 130 | return true; 131 | } 132 | 133 | private: 134 | unittest::TempDir _dbpath; 135 | ClockSourceMock _cs; 136 | 137 | PmseEngine _engine; 138 | }; 139 | 140 | std::unique_ptr makeHarnessHelper() { 141 | return stdx::make_unique(); 142 | } 143 | 144 | MONGO_INITIALIZER(RegisterHarnessFactory)(InitializerContext* const) { 145 | mongo::registerHarnessHelperFactory(makeHarnessHelper); 146 | return Status::OK(); 147 | } 148 | 149 | } // mongo 150 | -------------------------------------------------------------------------------- /src/pmse_tree.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2014-2020, Intel Corporation 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 8 | * * Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 11 | * * Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in 13 | * the documentation and/or other materials provided with the 14 | * distribution. 15 | * 16 | * * Neither the name of the copyright holder nor the names of its 17 | * contributors may be used to endorse or promote products derived 18 | * from this software without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | */ 32 | 33 | #ifndef SRC_PMSE_TREE_H_ 34 | #define SRC_PMSE_TREE_H_ 35 | 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | #include 44 | #include 45 | #include 46 | 47 | #include "mongo/db/storage/sorted_data_interface.h" 48 | #include "mongo/db/index/index_descriptor.h" 49 | 50 | using namespace pmem::obj; 51 | 52 | namespace mongo { 53 | 54 | const uint64_t TREE_ORDER = 7; // number of elements in internal node 55 | const int64_t BSON_MIN_SIZE = 5; 56 | 57 | const uint64_t MIN_END = 1; 58 | const uint64_t MAX_END = 2; 59 | 60 | struct IndexKeyEntry_PM { 61 | public: 62 | static int64_t compareEntries(IndexKeyEntry& leftEntry, IndexKeyEntry_PM& rightEntry, const BSONObj& ordering); 63 | 64 | BSONObj getBSON(); 65 | persistent_ptr data; 66 | p loc; 67 | }; 68 | 69 | struct PmseTreeNode { 70 | PmseTreeNode() : num_keys(0) {} 71 | 72 | explicit PmseTreeNode(bool node_leaf) 73 | : num_keys(0) { 74 | keys = make_persistent(); 75 | if (node_leaf) { 76 | is_leaf = true; 77 | } else { 78 | for (uint64_t i = 0; i < TREE_ORDER; i++) { 79 | children_array[i] = nullptr; 80 | } 81 | } 82 | next = nullptr; 83 | previous = nullptr; 84 | } 85 | 86 | p num_keys = 0; 87 | persistent_ptr keys; 88 | persistent_ptr children_array[TREE_ORDER + 1]; /* Exist only for internal nodes */ 89 | persistent_ptr next = nullptr; 90 | persistent_ptr previous = nullptr; 91 | persistent_ptr parent = nullptr; 92 | p is_leaf = false; 93 | pmem::obj::shared_mutex _pmutex; 94 | }; 95 | 96 | struct CursorObject { 97 | persistent_ptr node; 98 | uint64_t index; 99 | }; 100 | 101 | class PmseTree { 102 | friend class PmseCursor; 103 | 104 | public: 105 | Status insert(pool_base pop, IndexKeyEntry& entry, 106 | const BSONObj& _ordering, bool dupsAllowed); 107 | bool remove(pool_base pop, IndexKeyEntry& entry, 108 | bool dupsAllowed, const BSONObj& _ordering); 109 | 110 | uint64_t countElements(); 111 | 112 | bool isEmpty(); 113 | 114 | private: 115 | pmem::obj::mutex globalMutex; 116 | void unlockTree(std::list& locks); 117 | bool nodeIsSafeForOperation(persistent_ptr node, bool insert); 118 | uint64_t cut(uint64_t length); 119 | int64_t getNeighborIndex(persistent_ptr node); 120 | persistent_ptr coalesceNodes( 121 | pool_base pop, persistent_ptr root, 122 | persistent_ptr n, 123 | persistent_ptr neighbor, 124 | int64_t neighbor_index, IndexKeyEntry_PM k_prime); 125 | persistent_ptr redistributeNodes( 126 | pool_base pop, persistent_ptr root, 127 | persistent_ptr n, 128 | persistent_ptr neighbor, 129 | int64_t neighbor_index, int64_t k_prime_index, 130 | IndexKeyEntry_PM k_prime); 131 | persistent_ptr makeTreeRoot(IndexKeyEntry& key); 132 | Status insertKeyIntoLeaf(persistent_ptr node, IndexKeyEntry& entry, 133 | const BSONObj& _ordering); 134 | persistent_ptr locateLeafWithKeyPM( 135 | persistent_ptr node, IndexKeyEntry& entry, 136 | const BSONObj& _ordering, std::list& locks, 137 | persistent_ptr& lockNode, bool insert); 138 | persistent_ptr splitFullNodeAndInsert( 139 | pool_base pop, persistent_ptr node, 140 | IndexKeyEntry& entry, const BSONObj& _ordering, 141 | std::list& locks); 142 | persistent_ptr insertIntoNodeParent( 143 | pool_base pop, persistent_ptr root, 144 | persistent_ptr node, IndexKeyEntry_PM& new_key, 145 | persistent_ptr new_leaf); 146 | persistent_ptr allocateNewRoot( 147 | pool_base pop, persistent_ptr left, 148 | IndexKeyEntry_PM& new_key, persistent_ptr right); 149 | uint64_t getLeftIndex(persistent_ptr parent, 150 | persistent_ptr left); 151 | persistent_ptr insertKeyIntoNode( 152 | pool_base pop, persistent_ptr root, 153 | persistent_ptr parent, uint64_t left_index, 154 | IndexKeyEntry_PM& new_key, persistent_ptr right); 155 | persistent_ptr insertToNodeAfterSplit( 156 | pool_base pop, persistent_ptr root, 157 | persistent_ptr old_node, uint64_t left_index, 158 | IndexKeyEntry_PM& new_key, persistent_ptr right); 159 | persistent_ptr adjustRoot(persistent_ptr root); 160 | persistent_ptr deleteEntry(pool_base pop, IndexKeyEntry& key, 161 | persistent_ptr node, 162 | uint64_t index); 163 | persistent_ptr removeEntryFromNode( 164 | IndexKeyEntry& key, persistent_ptr node, 165 | uint64_t index); 166 | 167 | persistent_ptr _current; 168 | persistent_ptr _root; 169 | persistent_ptr _first; 170 | persistent_ptr _last; 171 | BSONObj _ordering; 172 | }; 173 | 174 | } // namespace mongo 175 | #endif // SRC_PMSE_TREE_H_ 176 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | ## Crash tests for MongoDB & PMSE 2 | 3 | There are two ways to run crash tests: 4 | 5 | Run single test using a test specific file: 6 | 7 | ./crashtest_insert_01.sh 8 | 9 | Run main script with test parameters: 10 | 11 | ./run_test.sh -i 1 12 | 13 | There are three test groups: 14 | 15 | -i for insert 16 | -u for update 17 | -d for delete 18 | 19 | Main script can also run all tests one by one with parameter -a[ll]: 20 | 21 | ./run_test.sh -a 22 | 23 | Default dbpath is: 24 | 25 | /mnt/psmem_0/ 26 | 27 | It is possible to specify a different dbpath by parameter -m: 28 | 29 | ./run_test.sh -m /my/db/path/ -i 1 30 | 31 | or 32 | 33 | ./crashtest_insert_01.sh -m /my/db/path/ 34 | 35 | There are also log files in 'log' directory, each test will generate two files, for example in case of insert 01; 36 | 37 | crashtest_insert_01_gdb_log.txt 38 | crashtest_insert_01_shell_log.txt 39 | 40 | The first one with 'gdb' in filename contains log from gdb and mongod server. 41 | Second file with 'shell' in filename contains log from mongo shell. -------------------------------------------------------------------------------- /tests/crashtest_delete_01.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_delete_01..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_delete_01.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_delete_01_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_delete_01.js > src/mongo/db/modules/pmse/tests/log/crashtest_delete_01_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_delete_02.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_delete_02..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_delete_02.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_delete_02_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_delete_02.js > src/mongo/db/modules/pmse/tests/log/crashtest_delete_02_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_delete_03.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_delete_03..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_delete_03.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_delete_03_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_delete_03.js > src/mongo/db/modules/pmse/tests/log/crashtest_delete_03_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_delete_04.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_delete_04..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_delete_04.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_delete_04_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_delete_04.js > src/mongo/db/modules/pmse/tests/log/crashtest_delete_04_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_insert_01.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_insert_01..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] dbpath" >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_insert_01.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_insert_01_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_insert_01.js > src/mongo/db/modules/pmse/tests/log/crashtest_insert_01_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_insert_02.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_insert_02..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_insert_02.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_insert_02_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_insert_02.js > src/mongo/db/modules/pmse/tests/log/crashtest_insert_02_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_insert_03.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_insert_03..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_insert_03.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_insert_03_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_insert_03.js > src/mongo/db/modules/pmse/tests/log/crashtest_insert_03_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_insert_04.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_insert_04..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_insert_04.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_insert_04_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_insert_04.js > src/mongo/db/modules/pmse/tests/log/crashtest_insert_04_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_insert_05.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_insert_05..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_insert_05.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_insert_05_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_insert_05.js > src/mongo/db/modules/pmse/tests/log/crashtest_insert_05_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_insert_06.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_insert_06..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_insert_06.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_insert_06_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_insert_06.js > src/mongo/db/modules/pmse/tests/log/crashtest_insert_06_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_insert_07.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_insert_07..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_insert_07.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_insert_07_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_insert_07.js > src/mongo/db/modules/pmse/tests/log/crashtest_insert_07_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_insert_08.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_insert_08..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_insert_08.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_insert_08_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_insert_08.js > src/mongo/db/modules/pmse/tests/log/crashtest_insert_08_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_insert_09.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_insert_09..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_insert_09.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_insert_09_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_insert_09.js > src/mongo/db/modules/pmse/tests/log/crashtest_insert_09_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_insert_10.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_insert_10..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_insert_10.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_insert_10_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_insert_10.js > src/mongo/db/modules/pmse/tests/log/crashtest_insert_10_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_insert_11.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_insert_11..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_insert_11.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_insert_11_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_insert_11.js > src/mongo/db/modules/pmse/tests/log/crashtest_insert_11_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_insert_12.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_insert_12..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_insert_12.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_insert_12_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_insert_12.js > src/mongo/db/modules/pmse/tests/log/crashtest_insert_12_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_insert_13.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_insert_13..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_insert_13.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_insert_13_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_insert_13.js > src/mongo/db/modules/pmse/tests/log/crashtest_insert_13_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_update_01.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_update_01..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_update_01.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_update_01_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_update_01.js > src/mongo/db/modules/pmse/tests/log/crashtest_update_01_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_update_02.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_update_02..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_update_02.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_update_02_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_update_02.js > src/mongo/db/modules/pmse/tests/log/crashtest_update_02_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_update_03.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_update_03..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_update_03.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_update_03_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_update_03.js > src/mongo/db/modules/pmse/tests/log/crashtest_update_03_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_update_04.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_update_04..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_update_04.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_update_04_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_update_04.js > src/mongo/db/modules/pmse/tests/log/crashtest_update_04_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_update_05.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_update_05..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_update_05.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_update_05_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_update_05.js > src/mongo/db/modules/pmse/tests/log/crashtest_update_05_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/crashtest_update_06.sh: -------------------------------------------------------------------------------- 1 | echo "Running crashtest_update_06..."; 2 | 3 | mydb="/mnt/psmem_0/"; 4 | 5 | while getopts ":m:" opt; do 6 | case $opt in 7 | m) 8 | mydb=$OPTARG; 9 | ;; 10 | \?) 11 | echo "Invalid option: -$OPTARG, please use: -m[mongoDB] xyz, where xyz = dbpath." >&2 12 | exit 1 13 | ;; 14 | :) 15 | echo "-$OPTARG requires an argument with dbpath." >&2 16 | exit 1 17 | ;; 18 | esac 19 | done 20 | 21 | killall mongod > /dev/null 2>&1 22 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 23 | cd ../../../../../../ 24 | 25 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_update_06.gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_update_06_gdb_log.txt 2>&1 & 26 | 27 | mongo_port=0; 28 | 29 | while ((mongo_port < 1)); do 30 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 31 | done 32 | 33 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_update_06.js > src/mongo/db/modules/pmse/tests/log/crashtest_update_06_shell_log.txt 2>&1; then 34 | echo "Success" 35 | else 36 | echo "Fail!" 37 | fi 38 | killall mongod > /dev/null 2>&1 39 | -------------------------------------------------------------------------------- /tests/gdb/crashtest_delete_01.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseRecordStore::deleteRecord 6 | command 1 7 | stop 8 | end 9 | run 10 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_delete_02.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::unindex 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 2 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_delete_03.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::unindex 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 3 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_delete_04.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseRecordStore::deleteRecord 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 1 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_insert_01.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::insert 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 2 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_insert_02.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::insert 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 4 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_insert_03.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::insert 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 3 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_insert_04.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::insert 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 6 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_insert_05.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::insert 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 3 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_insert_06.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::insert 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 3 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_insert_07.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::insert 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 3 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_insert_08.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::insert 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 3 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_insert_09.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::insert 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 4 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_insert_10.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseRecordStore::insertRecord 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 49 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_insert_11.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::insert 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 49 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_insert_12.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseRecordStore::insertRecord 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 49 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_insert_13.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::insert 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 49 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_update_01.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::unindex 6 | command 1 7 | stop 8 | end 9 | run 10 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_update_02.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::insert 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 4 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_update_03.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::unindex 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 1 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_update_04.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::unindex 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 3 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_update_05.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::unindex 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 1 11 | quit -------------------------------------------------------------------------------- /tests/gdb/crashtest_update_06.gdb: -------------------------------------------------------------------------------- 1 | set verbose off 2 | set confirm off 3 | set breakpoint pending on 4 | 5 | b mongo::PmseSortedDataInterface::unindex 6 | command 1 7 | stop 8 | end 9 | run 10 | cont 3 11 | quit -------------------------------------------------------------------------------- /tests/js/crashtest_delete_01.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | a: 1 12 | }, 13 | name: "a", 14 | }, 15 | ] 16 | } 17 | ) 18 | 19 | var data_thread = function() { 20 | var coll = db.getCollection("testt"); 21 | coll.deleteOne({a: "a"}); 22 | }; 23 | 24 | var coll = db.getCollection("testt"); 25 | var data = {a: "a"}; 26 | coll.insert(data); 27 | 28 | dt = new ScopedThread(data_thread); 29 | dt.start(); 30 | dt.join(); 31 | 32 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 33 | 34 | var db = x.getDB("test"); 35 | var coll = db.getCollection("testt"); 36 | var coll_validate = coll.validate(); 37 | 38 | printjson(coll_validate); 39 | 40 | var data_count = coll.find().count(); 41 | 42 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 43 | assert.eq(data_count, coll.find({a: "a"}).count(), "Count mismatch! find().count and find().hint({a: 'a'}).count()"); 44 | -------------------------------------------------------------------------------- /tests/js/crashtest_delete_02.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | a: 1 12 | }, 13 | name: "a", 14 | }, 15 | { 16 | key: { 17 | b: 1 18 | }, 19 | name: "b", 20 | }, 21 | ] 22 | } 23 | ) 24 | 25 | var data_thread = function() { 26 | var coll = db.getCollection("testt"); 27 | coll.deleteOne({a: "a", b: "b"}); 28 | }; 29 | 30 | var coll = db.getCollection("testt"); 31 | var data = {a: "a", b: "b"}; 32 | coll.insert(data); 33 | 34 | dt = new ScopedThread(data_thread); 35 | dt.start(); 36 | dt.join(); 37 | 38 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 39 | 40 | var db = x.getDB("test"); 41 | var coll = db.getCollection("testt"); 42 | var coll_validate = coll.validate(); 43 | 44 | printjson(coll_validate); 45 | 46 | var data_count = coll.find().count(); 47 | 48 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 49 | assert.eq(data_count, coll.find({a: "a"}).count(), "Count mismatch! find().count and find().hint({a: 'a'}).count()"); 50 | assert.eq(data_count, coll.find({b: "b"}).count(), "Count mismatch! find().count and find().hint({b: 'b'}).count()"); 51 | -------------------------------------------------------------------------------- /tests/js/crashtest_delete_03.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | a: 1 12 | }, 13 | name: "a", 14 | }, 15 | { 16 | key: { 17 | b: 1 18 | }, 19 | name: "b", 20 | }, 21 | { 22 | key: { 23 | c: 1 24 | }, 25 | name: "c", 26 | }, 27 | { 28 | key: { 29 | d: 1 30 | }, 31 | name: "d", 32 | }, 33 | ] 34 | } 35 | ) 36 | 37 | var data_thread = function() { 38 | var coll = db.getCollection("testt"); 39 | coll.deleteMany({a: "a", b: "b", c: "c", d: "d"}); 40 | }; 41 | 42 | var coll = db.getCollection("testt"); 43 | var data = {a: "a", b: "b", c: "c", d: "d"}; 44 | coll.insert(data); 45 | coll.insert(data); 46 | 47 | dt = new ScopedThread(data_thread); 48 | dt.start(); 49 | dt.join(); 50 | 51 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 52 | 53 | var db = x.getDB("test"); 54 | var coll = db.getCollection("testt"); 55 | var coll_validate = coll.validate(); 56 | 57 | printjson(coll_validate); 58 | 59 | var data_count = coll.find().count(); 60 | 61 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 62 | assert.eq(data_count, coll.find({a: "a"}).count(), "Count mismatch! find().count and find().hint({a: 'a'}).count()"); 63 | assert.eq(data_count, coll.find({b: "b"}).count(), "Count mismatch! find().count and find().hint({b: 'b'}).count()"); 64 | assert.eq(data_count, coll.find({a: "c"}).count(), "Count mismatch! find().count and find().hint({a: 'c'}).count()"); 65 | assert.eq(data_count, coll.find({b: "d"}).count(), "Count mismatch! find().count and find().hint({b: 'd'}).count()"); 66 | -------------------------------------------------------------------------------- /tests/js/crashtest_delete_04.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | a: 1 12 | }, 13 | name: "a", 14 | }, 15 | { 16 | key: { 17 | b: 1 18 | }, 19 | name: "b", 20 | }, 21 | { 22 | key: { 23 | c: 1 24 | }, 25 | name: "c", 26 | }, 27 | { 28 | key: { 29 | d: 1 30 | }, 31 | name: "d", 32 | }, 33 | ] 34 | } 35 | ) 36 | 37 | var data_thread = function() { 38 | var coll = db.getCollection("testt"); 39 | coll.deleteMany({a: "a", b: "b", c: "c", d: "d"}); 40 | }; 41 | 42 | var coll = db.getCollection("testt"); 43 | var data = {a: "a", b: "b", c: "c", d: "d"}; 44 | coll.insert(data); 45 | coll.insert(data); 46 | 47 | dt = new ScopedThread(data_thread); 48 | dt.start(); 49 | dt.join(); 50 | 51 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 52 | 53 | var db = x.getDB("test"); 54 | var coll = db.getCollection("testt"); 55 | var coll_validate = coll.validate(); 56 | 57 | printjson(coll_validate); 58 | 59 | var data_count = coll.find().count(); 60 | 61 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 62 | assert.eq(data_count, coll.find({a: "a"}).count(), "Count mismatch! find().count and find().hint({a: 'a'}).count()"); 63 | assert.eq(data_count, coll.find({b: "b"}).count(), "Count mismatch! find().count and find().hint({b: 'b'}).count()"); 64 | assert.eq(data_count, coll.find({a: "c"}).count(), "Count mismatch! find().count and find().hint({a: 'c'}).count()"); 65 | assert.eq(data_count, coll.find({b: "d"}).count(), "Count mismatch! find().count and find().hint({b: 'd'}).count()"); 66 | -------------------------------------------------------------------------------- /tests/js/crashtest_insert_01.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | 6 | var data_thread = function() { 7 | var coll = db.getCollection("testt"); 8 | var data = {a: "a"}; 9 | coll.insert(data); 10 | }; 11 | 12 | dt = new ScopedThread(data_thread); 13 | dt.start(); 14 | dt.join(); 15 | 16 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 17 | 18 | var db = x.getDB("test"); 19 | var coll = db.getCollection("testt"); 20 | var coll_validate = coll.validate(); 21 | 22 | printjson(coll_validate); 23 | 24 | var data_count = coll.find().count(); 25 | 26 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 27 | assert.eq(data_count, coll.find().hint({_id: 1}).count(), "Count mismatch! find().count and find().hint({_id: 1}).count()"); 28 | -------------------------------------------------------------------------------- /tests/js/crashtest_insert_02.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | a: 1 12 | }, 13 | name: "a", 14 | }, 15 | { 16 | key: { 17 | b: 1 18 | }, 19 | name: "b", 20 | } 21 | ] 22 | } 23 | ) 24 | 25 | var data_thread = function() { 26 | var coll = db.getCollection("testt"); 27 | var data = {a: "a", b: "b"}; 28 | coll.insert(data); 29 | }; 30 | 31 | dt = new ScopedThread(data_thread); 32 | dt.start(); 33 | dt.join(); 34 | 35 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 36 | 37 | var db = x.getDB("test"); 38 | var coll = db.getCollection("testt"); 39 | var coll_validate = coll.validate(); 40 | 41 | printjson(coll_validate); 42 | 43 | var data_count = coll.find().count(); 44 | 45 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 46 | assert.eq(data_count, coll.find().hint({_id: 1}).count(), "Count mismatch! find().count and find().hint({_id: 1}).count()"); 47 | assert.eq(data_count, coll.find().hint({a: 1}).count(), "Count mismatch! find().count and find().hint({a: 1}).count()"); 48 | assert.eq(data_count, coll.find().hint({b: 1}).count(), "Count mismatch! find().count and find().hint({b: 1}).count()"); 49 | -------------------------------------------------------------------------------- /tests/js/crashtest_insert_03.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | a: 1, 12 | b: 1 13 | }, 14 | name: "ab", 15 | }, 16 | ] 17 | } 18 | ) 19 | 20 | var data_thread = function() { 21 | var coll = db.getCollection("testt"); 22 | var data = {a: "a", b: "b"}; 23 | coll.insert(data); 24 | }; 25 | 26 | dt = new ScopedThread(data_thread); 27 | dt.start(); 28 | dt.join(); 29 | 30 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 31 | 32 | var db = x.getDB("test"); 33 | var coll = db.getCollection("testt"); 34 | var coll_validate = coll.validate(); 35 | 36 | printjson(coll_validate); 37 | 38 | var data_count = coll.find().count(); 39 | 40 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 41 | assert.eq(data_count, coll.find().hint({_id: 1}).count(), "Count mismatch! find().count and find().hint({_id: 1}).count()"); 42 | assert.eq(data_count, coll.find().hint({a: 1, b: 1}).count(), "Count mismatch! find().count and find().hint({a: 1, b: 1}).count()"); 43 | -------------------------------------------------------------------------------- /tests/js/crashtest_insert_04.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | a: 1 12 | }, 13 | name: "a", 14 | }, 15 | { 16 | key: { 17 | b: 1 18 | }, 19 | name: "b", 20 | }, 21 | ] 22 | } 23 | ) 24 | 25 | var data_thread = function() { 26 | var coll = db.getCollection("testt"); 27 | var data = {a: "a", b: [1, 2, 3]}; 28 | coll.insert(data); 29 | }; 30 | 31 | dt = new ScopedThread(data_thread); 32 | dt.start(); 33 | dt.join(); 34 | 35 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 36 | 37 | var db = x.getDB("test"); 38 | var coll = db.getCollection("testt"); 39 | var coll_validate = coll.validate(); 40 | 41 | printjson(coll_validate); 42 | 43 | var data_count = coll.find().count(); 44 | 45 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 46 | assert.eq(data_count, coll.find().hint({_id: 1}).count(), "Count mismatch! find().count and find().hint({_id: 1}).count()"); 47 | assert.eq(data_count, coll.find().hint({a: 1}).count(), "Count mismatch! find().count and find().hint({a: 1}).count()"); 48 | assert.eq(data_count, coll.find({b: 1}).count(), "Count mismatch! find().count and find({b: 1}).count()"); 49 | assert.eq(data_count, coll.find({b: 2}).count(), "Count mismatch! find().count and find({b: 2}).count()"); 50 | assert.eq(data_count, coll.find({b: 3}).count(), "Count mismatch! find().count and find({b: 3}).count()"); 51 | -------------------------------------------------------------------------------- /tests/js/crashtest_insert_05.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | location: "2dsphere" 12 | }, 13 | name: "location", 14 | }, 15 | ] 16 | } 17 | ) 18 | 19 | var data_thread = function() { 20 | var coll = db.getCollection("testt"); 21 | var data = {location: {type: "Point", coordinates: [54.38, 18.48]}, name: "Intel"}; 22 | coll.insert(data); 23 | }; 24 | 25 | dt = new ScopedThread(data_thread); 26 | dt.start(); 27 | dt.join(); 28 | 29 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 30 | 31 | var db = x.getDB("test"); 32 | var coll = db.getCollection("testt"); 33 | var coll_validate = coll.validate(); 34 | 35 | printjson(coll_validate); 36 | 37 | var data_count = coll.find().count(); 38 | 39 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 40 | assert.eq(data_count, coll.find().hint({_id: 1}).count(), "Count mismatch! find().count and find().hint({_id: 1}).count()"); 41 | assert.eq(data_count, coll.find( { location : 42 | { $geoIntersects : 43 | { $geometry : 44 | { type : "Point" , 45 | coordinates : [ 54.38, 18.48 ] 46 | } } } } ).count(), "Count mismatch! find().count and find(Intel coord).count()"); 47 | -------------------------------------------------------------------------------- /tests/js/crashtest_insert_06.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | str: "text", 12 | }, 13 | name: "str", 14 | }, 15 | ] 16 | } 17 | ) 18 | 19 | var data_thread = function() { 20 | var coll = db.getCollection("testt"); 21 | var data = {a: 1, b: 2, str: "PMSE"}; 22 | coll.insert(data); 23 | }; 24 | 25 | dt = new ScopedThread(data_thread); 26 | dt.start(); 27 | dt.join(); 28 | 29 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 30 | 31 | var db = x.getDB("test"); 32 | var coll = db.getCollection("testt"); 33 | var coll_validate = coll.validate(); 34 | 35 | printjson(coll_validate); 36 | 37 | var data_count = coll.find().count(); 38 | 39 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 40 | assert.eq(data_count, coll.find().hint({_id: 1}).count(), "Count mismatch! find().count and find().hint({_id: 1}).count()"); 41 | assert.eq(data_count, coll.find( { $text: { $search: "PMSE" } } ).count(), "Count mismatch! find().count and find(text).count()"); 42 | -------------------------------------------------------------------------------- /tests/js/crashtest_insert_07.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | h: "hashed", 12 | }, 13 | name: "h", 14 | }, 15 | ] 16 | } 17 | ) 18 | 19 | var data_thread = function() { 20 | var coll = db.getCollection("testt"); 21 | var data = {h: 1, h2: 2}; 22 | coll.insert(data); 23 | }; 24 | 25 | dt = new ScopedThread(data_thread); 26 | dt.start(); 27 | dt.join(); 28 | 29 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 30 | 31 | var db = x.getDB("test"); 32 | var coll = db.getCollection("testt"); 33 | var coll_validate = coll.validate(); 34 | 35 | printjson(coll_validate); 36 | 37 | var data_count = coll.find().count(); 38 | 39 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 40 | assert.eq(data_count, coll.find().hint({_id: 1}).count(), "Count mismatch! find().count and find().hint({_id: 1}).count()"); 41 | assert.eq(data_count, coll.find({h: 1}).count(), "Count mismatch! find().count and find(hashed).count()"); 42 | -------------------------------------------------------------------------------- /tests/js/crashtest_insert_08.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | a: 1, 12 | }, 13 | name: "a", 14 | expireAfterSeconds: 10, 15 | }, 16 | ] 17 | } 18 | ) 19 | 20 | var data_thread = function() { 21 | var coll = db.getCollection("testt"); 22 | var data = {a: 1}; 23 | coll.insert(data); 24 | }; 25 | 26 | dt = new ScopedThread(data_thread); 27 | dt.start(); 28 | dt.join(); 29 | 30 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 31 | 32 | var db = x.getDB("test"); 33 | var coll = db.getCollection("testt"); 34 | var coll_validate = coll.validate(); 35 | 36 | printjson(coll_validate); 37 | 38 | var data_count = coll.find().count(); 39 | 40 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 41 | assert.eq(data_count, coll.find().hint({_id: 1}).count(), "Count mismatch! find().count and find().hint({_id: 1}).count()"); 42 | assert.eq(data_count, coll.find().hint({a: 1}).count(), "Count mismatch! find().count and find().hint(a).count()"); 43 | -------------------------------------------------------------------------------- /tests/js/crashtest_insert_09.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.createCollection( "testt", { capped: true, size: 100000 } ) 6 | db.runCommand( 7 | { 8 | createIndexes: "testt", 9 | indexes: [ 10 | { 11 | key: { 12 | a: 1, 13 | }, 14 | name: "a", 15 | }, 16 | { 17 | key: { 18 | b: 1, 19 | }, 20 | name: "b", 21 | }, 22 | ] 23 | } 24 | ) 25 | 26 | var data_thread = function() { 27 | var coll = db.getCollection("testt"); 28 | var data = {a: 1, b: 0}; 29 | coll.insert(data); 30 | }; 31 | 32 | dt = new ScopedThread(data_thread); 33 | dt.start(); 34 | dt.join(); 35 | 36 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 37 | 38 | var db = x.getDB("test"); 39 | var coll = db.getCollection("testt"); 40 | var coll_validate = coll.validate(); 41 | 42 | printjson(coll_validate); 43 | 44 | var data_count = coll.find().count(); 45 | 46 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 47 | assert.eq(data_count, coll.find().hint({_id: 1}).count(), "Count mismatch! find().count and find().hint({_id: 1}).count()"); 48 | assert.eq(data_count, coll.find().hint({a: 1}).count(), "Count mismatch! find().count and find().hint(a).count()"); 49 | assert.eq(data_count, coll.find().hint({b: 1}).count(), "Count mismatch! find().count and find().hint(b).count()"); 50 | -------------------------------------------------------------------------------- /tests/js/crashtest_insert_10.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | a: 1 12 | }, 13 | name: "a", 14 | }, 15 | { 16 | key: { 17 | b: 1 18 | }, 19 | name: "b", 20 | }, 21 | { 22 | key: { 23 | c: 1 24 | }, 25 | name: "c", 26 | }, 27 | { 28 | key: { 29 | d: 1 30 | }, 31 | name: "d", 32 | }, 33 | ] 34 | } 35 | ) 36 | 37 | var data_thread = function() { 38 | var coll = db.getCollection("testt"); 39 | var data = {a: "a", b: "b", c: "x", d: "d"}; 40 | var data_set = [data]; 41 | for(i=0; i<100; i++) 42 | { 43 | data_set.push(data); 44 | } 45 | coll.insertMany(data_set); 46 | }; 47 | 48 | dt = new ScopedThread(data_thread); 49 | dt.start(); 50 | dt.join(); 51 | 52 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 53 | 54 | var db = x.getDB("test"); 55 | var coll = db.getCollection("testt"); 56 | var coll_validate = coll.validate(); 57 | 58 | printjson(coll_validate); 59 | 60 | var data_count = coll.find().count(); 61 | 62 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 63 | assert.eq(data_count, coll.find().hint({_id: 1}).count(), "Count mismatch! find().count and find().hint({_id: 1}).count()"); 64 | assert.eq(data_count, coll.find().hint({a: 1}).count(), "Count mismatch! find().count and find().hint(a).count()"); 65 | assert.eq(data_count, coll.find().hint({b: 1}).count(), "Count mismatch! find().count and find().hint(b).count()"); 66 | assert.eq(data_count, coll.find().hint({c: 1}).count(), "Count mismatch! find().count and find().hint(c).count()"); 67 | assert.eq(data_count, coll.find().hint({d: 1}).count(), "Count mismatch! find().count and find().hint(d).count()"); 68 | -------------------------------------------------------------------------------- /tests/js/crashtest_insert_11.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | 6 | db.runCommand( 7 | { 8 | createIndexes: "testt", 9 | indexes: [ 10 | { 11 | key: { 12 | a: 1 13 | }, 14 | name: "a", 15 | }, 16 | { 17 | key: { 18 | b: 1 19 | }, 20 | name: "b", 21 | }, 22 | { 23 | key: { 24 | c: 1 25 | }, 26 | name: "c", 27 | }, 28 | { 29 | key: { 30 | d: 1 31 | }, 32 | name: "d", 33 | }, 34 | ] 35 | } 36 | ) 37 | 38 | var data_thread = function() { 39 | var coll = db.getCollection("testt"); 40 | var data = {a: "a", b: "b", c: "x", d: "d"}; 41 | var data_set = [data]; 42 | for(i=0; i<100; i++) 43 | { 44 | data_set.push(data); 45 | } 46 | coll.insertMany(data_set); 47 | }; 48 | 49 | dt = new ScopedThread(data_thread); 50 | dt.start(); 51 | dt.join(); 52 | 53 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 54 | 55 | var db = x.getDB("test"); 56 | var coll = db.getCollection("testt"); 57 | var coll_validate = coll.validate(); 58 | 59 | printjson(coll_validate); 60 | 61 | var data_count = coll.find().count(); 62 | 63 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 64 | assert.eq(data_count, coll.find().hint({_id: 1}).count(), "Count mismatch! find().count and find().hint({_id: 1}).count()"); 65 | assert.eq(data_count, coll.find().hint({a: 1}).count(), "Count mismatch! find().count and find().hint(a).count()"); 66 | assert.eq(data_count, coll.find().hint({b: 1}).count(), "Count mismatch! find().count and find().hint(b).count()"); 67 | assert.eq(data_count, coll.find().hint({c: 1}).count(), "Count mismatch! find().count and find().hint(c).count()"); 68 | assert.eq(data_count, coll.find().hint({d: 1}).count(), "Count mismatch! find().count and find().hint(d).count()"); 69 | -------------------------------------------------------------------------------- /tests/js/crashtest_insert_12.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.createCollection( "testt", { capped: true, size: 100000 } ); 6 | db.runCommand( 7 | { 8 | createIndexes: "testt", 9 | indexes: [ 10 | { 11 | key: { 12 | a: 1 13 | }, 14 | name: "a", 15 | }, 16 | { 17 | key: { 18 | b: 1 19 | }, 20 | name: "b", 21 | }, 22 | { 23 | key: { 24 | c: 1 25 | }, 26 | name: "c", 27 | }, 28 | { 29 | key: { 30 | d: 1 31 | }, 32 | name: "d", 33 | }, 34 | ] 35 | } 36 | ) 37 | 38 | var data_thread = function() { 39 | var coll = db.getCollection("testt"); 40 | var data = {a: "a", b: "b", c: "x", d: "d"}; 41 | var data_set = [data]; 42 | for(i=0; i<100; i++) 43 | { 44 | data_set.push(data); 45 | } 46 | coll.insertMany(data_set); 47 | }; 48 | 49 | dt = new ScopedThread(data_thread); 50 | dt.start(); 51 | dt.join(); 52 | 53 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 54 | 55 | var db = x.getDB("test"); 56 | var coll = db.getCollection("testt"); 57 | var coll_validate = coll.validate(); 58 | 59 | printjson(coll_validate); 60 | 61 | var data_count = coll.find().count(); 62 | 63 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 64 | assert.eq(data_count, coll.find().hint({_id: 1}).count(), "Count mismatch! find().count and find().hint({_id: 1}).count()"); 65 | assert.eq(data_count, coll.find().hint({a: 1}).count(), "Count mismatch! find().count and find().hint(a).count()"); 66 | assert.eq(data_count, coll.find().hint({b: 1}).count(), "Count mismatch! find().count and find().hint(b).count()"); 67 | assert.eq(data_count, coll.find().hint({c: 1}).count(), "Count mismatch! find().count and find().hint(c).count()"); 68 | assert.eq(data_count, coll.find().hint({d: 1}).count(), "Count mismatch! find().count and find().hint(d).count()"); 69 | -------------------------------------------------------------------------------- /tests/js/crashtest_insert_13.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.createCollection( "testt", { capped: true, size: 100000 } ); 6 | db.runCommand( 7 | { 8 | createIndexes: "testt", 9 | indexes: [ 10 | { 11 | key: { 12 | a: 1 13 | }, 14 | name: "a", 15 | }, 16 | { 17 | key: { 18 | b: 1 19 | }, 20 | name: "b", 21 | }, 22 | { 23 | key: { 24 | c: 1 25 | }, 26 | name: "c", 27 | }, 28 | { 29 | key: { 30 | d: 1 31 | }, 32 | name: "d", 33 | }, 34 | ] 35 | } 36 | ) 37 | 38 | var data_thread = function() { 39 | var coll = db.getCollection("testt"); 40 | var data = {a: "a", b: "b", c: "x", d: "d"}; 41 | var data_set = [data]; 42 | for(i=0; i<100; i++) 43 | { 44 | data_set.push(data); 45 | } 46 | coll.insertMany(data_set); 47 | }; 48 | 49 | dt = new ScopedThread(data_thread); 50 | dt.start(); 51 | dt.join(); 52 | 53 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 54 | 55 | var db = x.getDB("test"); 56 | var coll = db.getCollection("testt"); 57 | var coll_validate = coll.validate(); 58 | 59 | printjson(coll_validate); 60 | 61 | var data_count = coll.find().count(); 62 | 63 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 64 | assert.eq(data_count, coll.find().hint({_id: 1}).count(), "Count mismatch! find().count and find().hint({_id: 1}).count()"); 65 | assert.eq(data_count, coll.find().hint({a: 1}).count(), "Count mismatch! find().count and find().hint(a).count()"); 66 | assert.eq(data_count, coll.find().hint({b: 1}).count(), "Count mismatch! find().count and find().hint(b).count()"); 67 | assert.eq(data_count, coll.find().hint({c: 1}).count(), "Count mismatch! find().count and find().hint(c).count()"); 68 | assert.eq(data_count, coll.find().hint({d: 1}).count(), "Count mismatch! find().count and find().hint(d).count()"); 69 | -------------------------------------------------------------------------------- /tests/js/crashtest_update_01.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | a: 1 12 | }, 13 | name: "a", 14 | }, 15 | ] 16 | } 17 | ) 18 | 19 | var data_thread = function() { 20 | var coll = db.getCollection("testt"); 21 | coll.updateOne({a: "a"}, { $set: {a: "b"}}); 22 | }; 23 | 24 | var coll = db.getCollection("testt"); 25 | var data = {a: "a"}; 26 | coll.insert(data); 27 | 28 | dt = new ScopedThread(data_thread); 29 | dt.start(); 30 | dt.join(); 31 | 32 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 33 | 34 | var db = x.getDB("test"); 35 | var coll = db.getCollection("testt"); 36 | var coll_validate = coll.validate(); 37 | 38 | printjson(coll_validate); 39 | 40 | var data_count = coll.find().count(); 41 | 42 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 43 | assert.eq(data_count, coll.find({a: "b"}).count(), "Count mismatch! find().count and find().hint({a: 'b'}).count()"); 44 | assert.eq(0, coll.find({a: "a"}).count(), "Count mismatch! find().count and find().hint({a: 'b'}).count()"); 45 | -------------------------------------------------------------------------------- /tests/js/crashtest_update_02.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | a: 1 12 | }, 13 | name: "a", 14 | }, 15 | ] 16 | } 17 | ) 18 | 19 | var data_thread = function() { 20 | var coll = db.getCollection("testt"); 21 | coll.updateOne({a: "a"}, { $set: {a: "b"}}); 22 | }; 23 | 24 | var coll = db.getCollection("testt"); 25 | var data = {a: "a"}; 26 | coll.insert(data); 27 | 28 | dt = new ScopedThread(data_thread); 29 | dt.start(); 30 | dt.join(); 31 | 32 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 33 | 34 | var db = x.getDB("test"); 35 | var coll = db.getCollection("testt"); 36 | var coll_validate = coll.validate(); 37 | 38 | printjson(coll_validate); 39 | 40 | var data_count = coll.find().count(); 41 | 42 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 43 | assert.eq(data_count, coll.find({a: "b"}).count(), "Count mismatch! find().count and find().hint({a: 'b'}).count()"); 44 | assert.eq(0, coll.find({a: "a"}).count(), "Count mismatch! find().count and find().hint({a: 'b'}).count()"); 45 | -------------------------------------------------------------------------------- /tests/js/crashtest_update_03.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.runCommand( 6 | { 7 | createIndexes: "testt", 8 | indexes: [ 9 | { 10 | key: { 11 | a: 1 12 | }, 13 | name: "a", 14 | }, 15 | { 16 | key: { 17 | b: 1 18 | }, 19 | name: "b", 20 | }, 21 | ] 22 | } 23 | ) 24 | 25 | var data_thread = function() { 26 | var coll = db.getCollection("testt"); 27 | coll.updateOne({a: "a", b: "b"}, { $set: {a: "c", b: "d"}}); 28 | }; 29 | 30 | var coll = db.getCollection("testt"); 31 | var data = {a: "a", b: "b"}; 32 | coll.insert(data); 33 | 34 | dt = new ScopedThread(data_thread); 35 | dt.start(); 36 | dt.join(); 37 | 38 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 39 | 40 | var db = x.getDB("test"); 41 | var coll = db.getCollection("testt"); 42 | var coll_validate = coll.validate(); 43 | 44 | printjson(coll_validate); 45 | 46 | var data_count = coll.find().count(); 47 | 48 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 49 | assert.eq(data_count, coll.find({a: "c"}).count(), "Count mismatch! find().count and find().hint({a: 'c'}).count()"); 50 | assert.eq(data_count, coll.find({b: "d"}).count(), "Count mismatch! find().count and find().hint({b: 'd'}).count()"); 51 | assert.eq(0, coll.find({a: "a"}).count(), "Count mismatch! find().count and find().hint({a: 'a'}).count()"); 52 | assert.eq(0, coll.find({b: "b"}).count(), "Count mismatch! find().count and find().hint({b: 'b'}).count()"); 53 | -------------------------------------------------------------------------------- /tests/js/crashtest_update_04.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | 6 | db.runCommand( 7 | { 8 | createIndexes: "testt", 9 | indexes: [ 10 | { 11 | key: { 12 | a: 1 13 | }, 14 | name: "a", 15 | }, 16 | { 17 | key: { 18 | b: 1 19 | }, 20 | name: "b", 21 | }, 22 | ] 23 | } 24 | ) 25 | 26 | var data_thread = function() { 27 | var coll = db.getCollection("testt"); 28 | coll.updateMany({a: "a", b: "b"}, { $set: {a: "c", b: "d"}}); 29 | }; 30 | 31 | var coll = db.getCollection("testt"); 32 | var data = {a: "a", b: "b"}; 33 | coll.insert(data); 34 | coll.insert(data); 35 | 36 | dt = new ScopedThread(data_thread); 37 | dt.start(); 38 | dt.join(); 39 | 40 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 41 | 42 | var db = x.getDB("test"); 43 | var coll = db.getCollection("testt"); 44 | var coll_validate = coll.validate(); 45 | 46 | printjson(coll_validate); 47 | 48 | var data_count = coll.find().count(); 49 | 50 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 51 | assert.eq(data_count, coll.find({a: "c"}).count(), "Count mismatch! find().count and find().hint({a: 'c'}).count()"); 52 | assert.eq(data_count, coll.find({b: "d"}).count(), "Count mismatch! find().count and find().hint({b: 'd'}).count()"); 53 | assert.eq(0, coll.find({a: "a"}).count(), "Count mismatch! find().count and find().hint({a: 'a'}).count()"); 54 | assert.eq(0, coll.find({b: "b"}).count(), "Count mismatch! find().count and find().hint({b: 'b'}).count()"); 55 | -------------------------------------------------------------------------------- /tests/js/crashtest_update_05.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.createCollection( "testt", { capped: true, size: 100000 } ); 6 | db.runCommand( 7 | { 8 | createIndexes: "testt", 9 | indexes: [ 10 | { 11 | key: { 12 | a: 1 13 | }, 14 | name: "a", 15 | }, 16 | { 17 | key: { 18 | b: 1 19 | }, 20 | name: "b", 21 | }, 22 | ] 23 | } 24 | ) 25 | 26 | var data_thread = function() { 27 | var coll = db.getCollection("testt"); 28 | coll.updateOne({a: "a", b: "b"}, { $set: {a: "c", b: "d"}}); 29 | }; 30 | 31 | var coll = db.getCollection("testt"); 32 | var data = {a: "a", b: "b"}; 33 | coll.insert(data); 34 | 35 | dt = new ScopedThread(data_thread); 36 | dt.start(); 37 | dt.join(); 38 | 39 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 40 | 41 | var db = x.getDB("test"); 42 | var coll = db.getCollection("testt"); 43 | var coll_validate = coll.validate(); 44 | 45 | printjson(coll_validate); 46 | 47 | var data_count = coll.find().count(); 48 | 49 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 50 | assert.eq(data_count, coll.find({a: "c"}).count(), "Count mismatch! find().count and find().hint({a: 'c'}).count()"); 51 | assert.eq(data_count, coll.find({b: "d"}).count(), "Count mismatch! find().count and find().hint({b: 'd'}).count()"); 52 | assert.eq(0, coll.find({a: "a"}).count(), "Count mismatch! find().count and find().hint({a: 'a'}).count()"); 53 | assert.eq(0, coll.find({b: "b"}).count(), "Count mismatch! find().count and find().hint({b: 'b'}).count()"); 54 | -------------------------------------------------------------------------------- /tests/js/crashtest_update_06.js: -------------------------------------------------------------------------------- 1 | load('jstests/libs/parallelTester.js'); 2 | mydbpath = db.serverCmdLineOpts().parsed.storage.dbPath; 3 | 4 | db.dropDatabase(); 5 | db.createCollection( "testt", { capped: true, size: 100000 } ); 6 | db.runCommand( 7 | { 8 | createIndexes: "testt", 9 | indexes: [ 10 | { 11 | key: { 12 | a: 1 13 | }, 14 | name: "a", 15 | }, 16 | { 17 | key: { 18 | b: 1 19 | }, 20 | name: "b", 21 | }, 22 | ] 23 | } 24 | ) 25 | 26 | var data_thread = function() { 27 | var coll = db.getCollection("testt"); 28 | coll.updateMany({a: "a", b: "b"}, { $set: {a: "c", b: "d"}}); 29 | }; 30 | 31 | var coll = db.getCollection("testt"); 32 | var data = {a: "a", b: "b"}; 33 | coll.insert(data); 34 | coll.insert(data); 35 | 36 | dt = new ScopedThread(data_thread); 37 | dt.start(); 38 | dt.join(); 39 | 40 | x = startMongoProgram('mongod', '--storageEngine=pmse', '--dbpath='+mydbpath, '--port', 27017); 41 | 42 | var db = x.getDB("test"); 43 | var coll = db.getCollection("testt"); 44 | var coll_validate = coll.validate(); 45 | 46 | printjson(coll_validate); 47 | 48 | var data_count = coll.find().count(); 49 | 50 | assert.eq(coll_validate.valid, true, "Collection is not valid"); 51 | assert.eq(data_count, coll.find({a: "c"}).count(), "Count mismatch! find().count and find().hint({a: 'c'}).count()"); 52 | assert.eq(data_count, coll.find({b: "d"}).count(), "Count mismatch! find().count and find().hint({b: 'd'}).count()"); 53 | assert.eq(0, coll.find({a: "a"}).count(), "Count mismatch! find().count and find().hint({a: 'a'}).count()"); 54 | assert.eq(0, coll.find({b: "b"}).count(), "Count mismatch! find().count and find().hint({b: 'b'}).count()"); 55 | -------------------------------------------------------------------------------- /tests/run_test.sh: -------------------------------------------------------------------------------- 1 | insert_test_num="$(find . -maxdepth 1 -type f -name 'crashtest_insert_??.sh' | wc -l)"; 2 | update_test_num="$(find . -maxdepth 1 -type f -name 'crashtest_update_??.sh' | wc -l)"; 3 | delete_test_num="$(find . -maxdepth 1 -type f -name 'crashtest_delete_??.sh' | wc -l)"; 4 | mydb="/mnt/psmem_0/"; 5 | 6 | while getopts ":m:i:u:d:a" opt; do 7 | case $opt in 8 | m) 9 | mydb=$OPTARG; 10 | ;; 11 | i) 12 | if (($OPTARG > 0)) && (($OPTARG <= $insert_test_num)); then 13 | test_group="insert"; 14 | test_num=$OPTARG; 15 | else 16 | echo "Invalid test number in insert test group." 17 | exit 1 18 | fi 19 | ;; 20 | u) 21 | if (($OPTARG > 0)) && (($OPTARG <= $update_test_num)); then 22 | test_group="update"; 23 | test_num=$OPTARG; 24 | else 25 | echo "Invalid test number in update test group." 26 | exit 1 27 | fi 28 | ;; 29 | d) 30 | if (($OPTARG > 0)) && (($OPTARG <= $delete_test_num)); then 31 | test_group="delete"; 32 | test_num=$OPTARG; 33 | else 34 | echo "Invalid test number in delete test group." 35 | exit 1 36 | fi 37 | ;; 38 | a) 39 | test_num=1; 40 | while ((test_num<=$insert_test_num)); do 41 | ./run_test.sh -m "$mydb" -i "$test_num" 42 | ((test_num++)) 43 | done 44 | test_num=1; 45 | while ((test_num<=$update_test_num)); do 46 | ./run_test.sh -m "$mydb" -u "$test_num" 47 | ((test_num++)) 48 | done 49 | test_num=1; 50 | while ((test_num<=$delete_test_num)); do 51 | ./run_test.sh -m "$mydb" -d "$test_num" 52 | ((test_num++)) 53 | done 54 | exit 1 55 | ;; 56 | \?) 57 | echo "Invalid option: -$OPTARG, please use: -i[nsert] x, -u[pdate] x, -d[elete] x, a[ll], where x = test number." >&2 58 | exit 1 59 | ;; 60 | :) 61 | echo "Test group -$OPTARG requires an argument with test number." >&2 62 | exit 1 63 | ;; 64 | esac 65 | done 66 | 67 | if ((test_num < 10)); then 68 | test_num="0"$test_num; 69 | fi 70 | 71 | echo "Running crashtest_"$test_group"_"$test_num"..."; 72 | 73 | killall mongod > /dev/null 2>&1 74 | rm -rf /mnt/psmem_0/ > /dev/null 2>&1 75 | cd ../../../../../../ 76 | 77 | gdb --batch --command=src/mongo/db/modules/pmse/tests/gdb/crashtest_"$test_group"_"$test_num".gdb --args ./mongod --dbpath="$mydb" --storageEngine=pmse --bind_ip 127.0.0.1 > src/mongo/db/modules/pmse/tests/log/crashtest_"$test_group"_"$test_num"_gdb_log.txt 2>&1 & 78 | 79 | mongo_port=0; 80 | 81 | while ((mongo_port < 1)); do 82 | mongo_port="$(netstat -l | grep 27017 | wc -l)"; 83 | done 84 | 85 | if ./mongo src/mongo/db/modules/pmse/tests/js/crashtest_"$test_group"_"$test_num".js > src/mongo/db/modules/pmse/tests/log/crashtest_"$test_group"_"$test_num"_shell_log.txt 2>&1; then 86 | echo "Success" 87 | else 88 | echo "Fail!" 89 | fi 90 | killall mongod > /dev/null 2>&1 91 | -------------------------------------------------------------------------------- /utils/README.md: -------------------------------------------------------------------------------- 1 | # Benchmarking tools 2 | 3 | ## What are these files? 4 | 5 | **path_configuration.txt** - file contains full paths for root directories of YCSB and MongoDB 6 | 7 | **run_suite.py** - will generate bash file testplan.sh with commands to run YCSB in various configurations 8 | 9 | **run_workload.sh** - bash file which is called from testplan.sh generated by run_suite.py 10 | 11 | **test_suite.txt** - set of user-defined suites, file is created by user 12 | 13 | **testplan.sh** - script executing commands to achieve goals defined in script above 14 | 15 | **results/configuration.json** - recently read configuration from test_suite.txt stored as JSON file 16 | 17 | ## Before you start 18 | - Build PMSE with MongoDB 19 | - Install Yahoo! Cloud Solution Benchmarking 20 | - Set absolute paths to MongoDB and YCSB in path_configuration.txt 21 | - Write benchmark scenarios to test_suite.txt 22 | 23 | ## Start benchmarks 24 | When you write some benchmark suites and have paths written to path_configuration.txt you should generate testplan.sh using run_suite.py. Testplan.sh will run YCSB many times according to test suites and generate files in results directory. Each directory is named and contains results related to specific suite. 25 | If everything is configured correctly to start benchmark just type: 26 | ``` 27 | python run_suite.py 28 | ./testplan.sh 29 | ``` 30 | 31 | ## Parsing a results 32 | After this step you get CSV files in proper directories and will printed to the screen. 33 | Python parsing script does not take any argument: 34 | ``` 35 | python parser.py 36 | ``` 37 | 38 | ## Benchmark suite layout 39 | 40 | In the **test_suite.txt** you can describe many benchmark scenarios. Every suite is described with some specific keywords explained below. Also you can comment some lines using ‘**#**’ symbol: 41 | 42 | Keyword | Meaning 43 | --------|-------- 44 | SUITE [text] | beginning of benchmark suite with given name, 45 | ENDSUITE | end of suite 46 | THREADS [list of numbers] | different amount of threads for each iteration 47 | LOAD | if keyword is present, script will run YCSB client to fill database, otherwise YCSB will start benchmarking 48 | JOURNALING | when enabled set YCSB operation to have WriteConcern set to "Journaled" 49 | RECORDS [number] | number of records in DB when executing 'run' phase or number of records to be inserted during 'load' 50 | OPERATIONS [number] | number of operation what will be performed 51 | READ_PROPORTION | proportion of reads 52 | INSERT_PROPORTION | proportion of inserts 53 | UPDATE_PROPORTION | proportion of updates 54 | YCSB_NUMA [number] | NUMA node for YCSB instance 55 | DROP_BEFORE | drop collection before suite start (helpful in context of running insert cases with different number of threads) 56 | CREATE_AFTER_DROP | create collection after collection drop 57 | 58 | 59 | ## Examples 60 | ### Only inserts 61 | ``` 62 | SUITE only_inserts 63 | THREADS 48 64 | JOURNALING disabled 65 | RECORDS 1000000 66 | OPERATIONS 1000000 67 | INSERT_PROPORTION 1.0 68 | READ_PROPORTION 0.0 69 | UPDATE_PROPORTION 0.0 70 | YCSB_NUMA 1 71 | DROP_BEFORE 72 | CREATE_AFTER_DROP 73 | ENDSUITE 74 | ``` 75 | ### Load database at the beginning then perform 100% read 76 | ``` 77 | SUITE load_phase 78 | LOAD 79 | THREADS 48 80 | JOURNALING disabled 81 | RECORDS 1000000 82 | OPERATIONS 1000000 83 | INSERT_PROPORTION 1.0 84 | READ_PROPORTION 0.0 85 | UPDATE_PROPORTION 0.0 86 | YCSB_NUMA 1 87 | DROP_BEFORE 88 | CREATE_AFTER_DROP 89 | ENDSUITE 90 | # Suite below will start three times with different thread configuration: 16 32 48 91 | SUITE run_phase 92 | THREADS 16 32 48 93 | JOURNALING disabled 94 | RECORDS 1000000 95 | OPERATIONS 1000000 96 | YCSB_NUMA 1 97 | INSERT_PROPORTION 0.0 98 | READ_PROPORTION 1.0 99 | UPDATE_PROPORTION 0.0 100 | ENDSUITE 101 | ``` 102 | 103 | ## Authors 104 | * [Krzysztof Filipek](https://github.com/KFilipek) 105 | -------------------------------------------------------------------------------- /utils/create_table.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | db = db.getSiblingDB("ycsb"); 3 | db.createCollection("usertable"); 4 | })(); 5 | -------------------------------------------------------------------------------- /utils/drop_table.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | db = db.getSiblingDB("ycsb"); 3 | db.usertable.drop(); 4 | })(); 5 | -------------------------------------------------------------------------------- /utils/parser.py: -------------------------------------------------------------------------------- 1 | import os 2 | from os.path import join, getsize 3 | 4 | for root, dirs, filenames in os.walk('results'): 5 | if len(dirs) == 0: 6 | parsed_results = [] 7 | for filename in filenames: 8 | if filename.split('_')[0] == 'run': 9 | with open(root + '/' + filename) as file_object: 10 | file_object.readline() 11 | trimmed_lines = [] 12 | for line in file_object.readlines(): 13 | record = tuple(line.replace(',','').split(' ')) 14 | if record[0] != '[CLEANUP]' or record[0] != '[READ-FAILED]': 15 | if record[0] == '[READ]' or record[0] == '[INSERT]' or record[0] == '[UPDATE]' or record[0] == '[OVERALL]': #in case of READ 16 | try: 17 | int(record[1]) 18 | except ValueError: #if cannot cast it's fine 19 | trimmed_lines.append(record) 20 | parsed_results.append([int(filename.split('_')[1].split('.')[0]), trimmed_lines]) 21 | 22 | parsed_results = sorted(parsed_results, key=lambda x: x[0], reverse=False) 23 | csv = [] 24 | threads = 'Threads;#;' 25 | if len(parsed_results) <= 0: 26 | continue 27 | print '------CSV------' 28 | for i in range(0, len(parsed_results[0][1])): 29 | csv.append(parsed_results[0][1][i][0] + ';' + parsed_results[0][1][i][1] + ';') 30 | for test_result in parsed_results: 31 | threads += str(test_result[0]) + ';' 32 | for i, line in enumerate(test_result[1]): 33 | csv[i] += line[2].replace('\n','').replace('.',',') + ';' 34 | csv.insert(0, threads) 35 | with open(root + '/results.csv','w') as csv_file: 36 | for x in csv: 37 | csv_file.write(x + '\n') 38 | print x 39 | csv_file.close() -------------------------------------------------------------------------------- /utils/path_configuration.txt: -------------------------------------------------------------------------------- 1 | YCSB_PATH=FULL_PATH_TO_YCSB 2 | MONGO_PATH=FULL_PATH_TO_MONGODB 3 | -------------------------------------------------------------------------------- /utils/run_suite.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import json 3 | import os 4 | import subprocess 5 | 6 | #comment 7 | # SUITE write_workload 8 | # THREADS 1 2 4 8 16 32 48 64 96 9 | # JOURNALING enabled/disabled 10 | # RECORDS 1000 11 | # OPERATIONS 100 12 | # READ_PROPORTION 0.0 13 | # UPDATE_PROPORTION 0.0 14 | # INSERT_PROPORTION 1.0 15 | # YCSB_NUMA 1 16 | # DROP_BEFORE 17 | # ENDSUITE 18 | 19 | #GET PATHS FROM CONFIG FILE 20 | PATH_TO_MONGO = '' 21 | PATH_TO_YCSB = '' 22 | 23 | path_configuration = open("path_configuration.txt", "r") 24 | for line in path_configuration: 25 | if line.startswith('YCSB_PATH='): 26 | arg = line.split("=") 27 | if len(arg) > 1: 28 | PATH_TO_YCSB = arg[1].replace('\n','') 29 | else: 30 | raise NameError('No path in YCSB_PATH!') 31 | elif line.startswith('MONGO_PATH='): 32 | arg = line.split("=") 33 | if len(arg) > 1: 34 | PATH_TO_MONGO = arg[1].replace('\n','') 35 | else: 36 | raise NameError('No path in MONGO_PATH!') 37 | 38 | if not os.path.isdir(PATH_TO_MONGO): 39 | raise NameError('Wrong path to MONGO!') 40 | elif not os.path.isdir(PATH_TO_YCSB): 41 | raise NameError('Wrong path to YCSB!') 42 | 43 | class Test: 44 | def __init__(self): 45 | self.testName = "" 46 | self.threads = [] 47 | self.journaling = "" 48 | self.records = 0 49 | self.operations = 0 50 | self.read_proportion = -1.0 51 | self.update_proportion = -1.0 52 | self.insert_proportion = -1.0 53 | self.ycsb_numa = -1 54 | self.drop_before = -1 55 | self.create_after_drop = -1 56 | self.is_load = -1 57 | def toJSON(self): 58 | return json.dumps(self, default=lambda o: o.__dict__, 59 | sort_keys=True, indent=4) 60 | 61 | def getArgs(str): 62 | arguments = [] 63 | for i in range(1, len(str)): 64 | arguments.append(str[i]) 65 | return arguments 66 | 67 | KEYWORDS = set(["THREADS", "JOURNALING", "RECORDS", "OPERATIONS", 68 | "READ_PROPORTION", "LOAD", 69 | "UPDATE_PROPORTION", "INSERT_PROPORTION", "YCSB_NUMA", 70 | "SUITE", "ENDSUITE", "DROP_BEFORE", "CREATE_AFTER_DROP"]) #Add keyword if you need to extend implementation 71 | 72 | # open meta file 73 | with open("test_suite.txt", "r") as configfile: 74 | configurations = [] 75 | for line in configfile: 76 | splittedLine = line.split() 77 | if line == '\n' or line.startswith('#'): 78 | continue 79 | if len(set.intersection(KEYWORDS, splittedLine)) != 1: 80 | print splittedLine 81 | raise NameError('Too many keywords in single line!') 82 | 83 | #get args if exists 84 | args = getArgs(splittedLine) 85 | 86 | #if line starts from keyword we must read arguments 87 | if splittedLine[0] == "SUITE": 88 | configurations.append(Test()) 89 | configurations[len(configurations)-1].testName = args[0] 90 | elif splittedLine[0] == "THREADS": 91 | configurations[len(configurations)-1].threads = args 92 | elif splittedLine[0] == "LOAD": 93 | configurations[len(configurations)-1].is_load = 1 94 | elif splittedLine[0] == "JOURNALING": 95 | if args[0] == "enabled": 96 | configurations[len(configurations)-1].journaling = "journaled" #according to YCSB documentation 97 | elif args[0] == "disabled": 98 | configurations[len(configurations)-1].journaling = "acknowledged" #according to YCSB documentation 99 | else: 100 | raise NameError('Unrecognized argument') 101 | elif splittedLine[0] == "RECORDS": 102 | configurations[len(configurations)-1].records = args[0] 103 | elif splittedLine[0] == "OPERATIONS": 104 | configurations[len(configurations)-1].operations = args[0] 105 | elif splittedLine[0] == "READ_PROPORTION": 106 | configurations[len(configurations)-1].read_proportion = args[0] 107 | elif splittedLine[0] == "UPDATE_PROPORTION": 108 | configurations[len(configurations)-1].update_proportion = args[0] 109 | elif splittedLine[0] == "INSERT_PROPORTION": 110 | configurations[len(configurations)-1].insert_proportion = args[0] 111 | elif splittedLine[0] == "YCSB_NUMA": 112 | configurations[len(configurations)-1].ycsb_numa = args[0] 113 | elif splittedLine[0] == "DROP_BEFORE": 114 | configurations[len(configurations)-1].drop_before = 1 115 | elif splittedLine[0] == "CREATE_AFTER_DROP": 116 | configurations[len(configurations)-1].create_after_drop = 1 117 | elif splittedLine[0] == "ENDSUITE": 118 | continue 119 | else: 120 | raise NameError('Unrecognized keyword') 121 | configfile.close() 122 | 123 | print 'Script read those tests:' 124 | i = 1 125 | for conf in configurations: 126 | print '{:>20} {:<12}'.format('Test#: ', str(i)) 127 | print '{:>20} {:<12}'.format("Name: ", conf.testName) 128 | print '{:>20} {:<12}'.format("Threads: " ,str(conf.threads)) 129 | print '{:>20} {:<12}'.format("Journaling: ", conf.journaling) 130 | print '{:>20} {:<12}'.format("Records: ", conf.records) 131 | print '{:>20} {:<12}'.format("Operation: ", conf.operations) 132 | print '{:>20} {:<12}'.format("Read proportion: ", str(conf.read_proportion)) 133 | print '{:>20} {:<12}'.format("Update proportion: ", str(conf.update_proportion)) 134 | print '{:>20} {:<12}'.format("Insert proportion: ", str(conf.insert_proportion)) 135 | print '{:>20} {:<12}'.format("NUMA for YCSB: ", conf.ycsb_numa) 136 | print "" 137 | i = i + 1 138 | 139 | # PUT CONFIGURATION TO FILE IN PROPER PATH 140 | results_directory = "results/" 141 | if not os.path.exists(results_directory): 142 | os.makedirs(results_directory) 143 | i = 1 144 | with open(results_directory + '/configurations.json', 'w') as jsonconfig: 145 | for conf in configurations: 146 | jsonconfig.write(conf.toJSON() + '\n') 147 | if not os.path.exists(results_directory + conf.testName + '/'): 148 | os.makedirs(results_directory + conf.testName + '/') 149 | with open(results_directory + conf.testName + '/test_description.txt', 'a') as test_description: 150 | test_description.write('{:>20} {:<12}'.format('Test#: ', str(i)) + '\n') # 'Test #' + str(i) 151 | test_description.write('{:>20} {:<12}'.format("Name: ", conf.testName) + '\n') 152 | test_description.write('{:>20} {:<12}'.format("Threads: " ,str(conf.threads)) + '\n') 153 | test_description.write('{:>20} {:<12}'.format("Journaling: ", conf.journaling) + '\n') 154 | test_description.write('{:>20} {:<12}'.format("Records: ", conf.records) + '\n') 155 | test_description.write('{:>20} {:<12}'.format("Operation: ", conf.operations) + '\n') 156 | test_description.write('{:>20} {:<12}'.format("Read proportion: ", str(conf.read_proportion)) + '\n') 157 | test_description.write('{:>20} {:<12}'.format("Update proportion: ", str(conf.update_proportion)) + '\n') 158 | test_description.write('{:>20} {:<12}'.format("Insert proportion: ", str(conf.insert_proportion)) + '\n') 159 | test_description.write('{:>20} {:<12}'.format("NUMA for YCSB: ", conf.ycsb_numa) + '\n') 160 | test_description.write('\n') 161 | i = i + 1 162 | 163 | # run specified configurations 164 | generated_commands = [] 165 | for test in configurations: 166 | command_prefix = '' 167 | command_suffix = '' 168 | 169 | command_prefix = './run_workload.sh ' + test.testName 170 | 171 | if not test.is_load == 1: 172 | command_prefix += ' run ' 173 | if test.journaling == 'journaled': 174 | command_suffix += " true " 175 | else: 176 | command_suffix += " false " 177 | else: 178 | command_prefix += ' load ' #there is no need to do journaling while load 179 | command_suffix += ' false ' 180 | 181 | command_suffix += test.records + ' ' + test.operations + ' ' 182 | command_suffix += test.read_proportion + ' ' + test.update_proportion + ' ' + test.insert_proportion + ' ' 183 | if test.ycsb_numa == -1: 184 | print 'NUMA node is not set for test: ' + test.testName + '.' 185 | command_suffix += test.ycsb_numa 186 | 187 | for thread_no in test.threads: 188 | # DROP BEFORE LOAD PHASE 189 | if test.drop_before == 1 or test.create_after_drop == 1 or test.is_load == 1: 190 | generated_commands.append(PATH_TO_MONGO + 'mongo ' + PATH_TO_MONGO + 'drop_table.js') 191 | if test.create_after_drop == 1: 192 | generated_commands.append(PATH_TO_MONGO + 'mongo ' + PATH_TO_MONGO + 'create_table.js') 193 | 194 | # DROP&CREATE BEFORE NEXT INSERTS 195 | generated_commands.append(command_prefix + thread_no + command_suffix + ' ' + PATH_TO_YCSB) 196 | 197 | # Generate script 198 | with open('testplan.sh','w') as testplan: 199 | testplan.write('#!/bin/bash\n') 200 | for x in generated_commands: 201 | testplan.write(x + '\n') 202 | -------------------------------------------------------------------------------- /utils/run_workload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Run workload from command line 3 | # ./run_workload.sh suite_name(string) workload_type(string) no_threads(uint) 4 | # journal(bool) record_count(uint) operation_count(uint) 5 | # readproportion(uint) updateproportion(uint) insertproportion(uint) 6 | # numa_node(uint) ycsb_path 7 | # 8 | # workload_type can be: run or load according to YCSB documentation 9 | # 10 | # e.g. ./run_workload.sh write run 64 true 1000000 10000000 0.0 0.0 1.0 11 | 12 | #YCSB_PATH=/home/kfilipek/repository/mongo_repositories/YCSB/ 13 | YCSB_PATH=${11} 14 | echo $YCSB_PATH 15 | OLD_PATH=$(pwd) 16 | 17 | echo $0 $1 $2 $3 $4 $5 $6 $7 $8 $9 ${10} 18 | echo "Passed $# argumets to script" 19 | if [ $# -ne 11 ]; 20 | then 21 | echo "Illegal number of parameters, should be 11. Check script documentation." 22 | exit 0 23 | fi 24 | 25 | if [ $4 = "true" ]; 26 | then 27 | JOURNALING=journaled 28 | else 29 | JOURNALING=acknowledged 30 | fi 31 | 32 | mkdir -p "results/$1/" 33 | 34 | if [ $2 = "load" ]; 35 | then 36 | # LOAD PHASE 37 | echo "load chosen" 38 | if [ ${10} -lt 0 ]; 39 | then 40 | cd $YCSB_PATH 41 | ./bin/ycsb load mongodb -s -threads $3 -p hdrhistogram.percentiles=95,99,99.9,99.99 -p recordcount=$5 -p operationcount=$6 -p readproportion=$7 -p updateproportion=$8 -p insertproportion=$9 -P ./workloads/workloada -p mongodb.url=mongodb://localhost:27017/ycsb -p mongodb.writeConcern=$JOURNALING > $OLD_PATH/results/$1/load_$3.log 42 | cd $OLD_PATH 43 | else 44 | cd $YCSB_PATH 45 | numactl -N ${10} ./bin/ycsb load mongodb -s -threads $3 -p hdrhistogram.percentiles=95,99,99.9,99.99 -p recordcount=$5 -p operationcount=$6 -p readproportion=$7 -p updateproportion=$8 -p insertproportion=$9 -P ./workloads/workloada -p mongodb.url=mongodb://localhost:27017/ycsb -p mongodb.writeConcern=$JOURNALING > $OLD_PATH/results/$1/load_$3.log 46 | cd $OLD_PATH 47 | fi 48 | else 49 | # RUN PHASE 50 | echo "run chosen" 51 | if [ ${10} -lt 0 ]; 52 | then 53 | cd $YCSB_PATH 54 | ./bin/ycsb run mongodb -s -threads $3 -p hdrhistogram.percentiles=95,99,99.9,99.99 -p recordcount=$5 -p operationcount=$6 -p readproportion=$7 -p updateproportion=$8 -p insertproportion=$9 -P ./workloads/workloada -p mongodb.url=mongodb://localhost:27017/ycsb -p mongodb.writeConcern=$JOURNALING > $OLD_PATH/results/$1/run_$3.log 55 | cd $OLD_PATH 56 | else 57 | cd $YCSB_PATH 58 | numactl -N ${10} ./bin/ycsb run mongodb -s -threads $3 -p hdrhistogram.percentiles=95,99,99.9,99.99 -p recordcount=$5 -p operationcount=$6 -p readproportion=$7 -p updateproportion=$8 -p insertproportion=$9 -P ./workloads/workloada -p mongodb.url=mongodb://localhost:27017/ycsb -p mongodb.writeConcern=$JOURNALING > $OLD_PATH/results/$1/run_$3.log 59 | cd $OLD_PATH 60 | fi 61 | fi 62 | --------------------------------------------------------------------------------