├── .clang-format ├── .dockerignore ├── .github └── workflows │ ├── build-and-test.yml │ └── docker-image.yml ├── .gitignore ├── .vscode └── cmake-variants.json ├── CMakeLists.txt ├── Dockerfile ├── LICENSE ├── README.md ├── VERSION ├── Vagrantfile ├── common ├── CMakeLists.txt ├── configuration.cpp ├── configuration.h ├── constants.h ├── csv_writer.cpp ├── csv_writer.h ├── json_utils.h ├── metrics.cpp ├── metrics.h ├── offline_data_reader.cpp ├── offline_data_reader.h ├── proto_utils.cpp ├── proto_utils.h ├── sharder.cpp ├── sharder.h ├── spin_latch.h ├── string_utils.cpp ├── string_utils.h ├── thread_utils.h └── types.h ├── connection ├── CMakeLists.txt ├── broker.cpp ├── broker.h ├── poller.cpp ├── poller.h ├── sender.cpp ├── sender.h └── zmq_utils.h ├── data_structure ├── CMakeLists.txt ├── async_log.h ├── batch_log.cpp ├── batch_log.h ├── concurrent_hash_map.h └── rwlatch.h ├── examples ├── cluster.conf ├── copy.json ├── read.json ├── single.conf ├── sleep.json └── write.json ├── execution ├── CMakeLists.txt ├── execution.cpp ├── execution.h ├── key_value.cpp ├── tpcc.cpp └── tpcc │ ├── constants.h │ ├── deliver.cpp │ ├── load_tables.cpp │ ├── load_tables.h │ ├── metadata_initializer.cpp │ ├── metadata_initializer.h │ ├── new_order.cpp │ ├── order_status.cpp │ ├── payment.cpp │ ├── scalar.h │ ├── stock_level.cpp │ ├── storage_adapter.cpp │ ├── storage_adapter.h │ ├── table.h │ ├── transaction.h │ └── types.h ├── install-deps.sh ├── module ├── CMakeLists.txt ├── base │ ├── module.cpp │ ├── module.h │ ├── networked_module.cpp │ └── networked_module.h ├── consensus.cpp ├── consensus.h ├── forwarder.cpp ├── forwarder.h ├── interleaver.cpp ├── interleaver.h ├── multi_home_orderer.cpp ├── multi_home_orderer.h ├── scheduler.cpp ├── scheduler.h ├── scheduler_components │ ├── ddr_lock_manager.cpp │ ├── ddr_lock_manager.h │ ├── old_lock_manager.cpp │ ├── old_lock_manager.h │ ├── per_key_remaster_manager.cpp │ ├── per_key_remaster_manager.h │ ├── remaster_manager.h │ ├── rma_lock_manager.cpp │ ├── rma_lock_manager.h │ ├── simple_remaster_manager.cpp │ ├── simple_remaster_manager.h │ ├── txn_holder.cpp │ ├── txn_holder.h │ ├── worker.cpp │ └── worker.h ├── sequencer.cpp ├── sequencer.h ├── server.cpp ├── server.h ├── txn_generator.cpp └── txn_generator.h ├── paxos ├── CMakeLists.txt ├── acceptor.cpp ├── acceptor.h ├── leader.cpp ├── leader.h ├── simulated_multi_paxos.cpp └── simulated_multi_paxos.h ├── proto ├── api.proto ├── configuration.proto ├── internal.proto ├── modules.proto ├── offline_data.proto └── transaction.proto ├── service ├── benchmark.cpp ├── client.cpp ├── scheduler_benchmark.cpp ├── service_utils.h └── slog.cpp ├── storage ├── CMakeLists.txt ├── lookup_master_index.h ├── mem_only_storage.h ├── metadata_initializer.cpp ├── metadata_initializer.h └── storage.h ├── test ├── CMakeLists.txt ├── common │ └── string_utils_test.cpp ├── connection │ ├── broker_and_sender_test.cpp │ └── zmq_utils_test.cpp ├── data_structure │ ├── batch_log_test.cpp │ └── concurrent_hash_map_test.cpp ├── e2e │ └── e2e_test.cpp ├── execution │ └── tpcc │ │ ├── table_test.cpp │ │ └── transaction_test.cpp ├── module │ ├── forwarder_test.cpp │ ├── interleaver_test.cpp │ ├── scheduler_components │ │ ├── ddr_lock_manager_test.cpp │ │ ├── old_lock_manager_test.cpp │ │ ├── per_key_remaster_manager_test.cpp │ │ ├── rma_lock_manager_test.cpp │ │ └── simple_remaster_manager_test.cpp │ ├── scheduler_test.cpp │ └── sequencer_test.cpp ├── paxos │ └── paxos_test.cpp ├── storage │ └── mem_only_storage_test.cpp ├── test_utils.cpp └── test_utils.h ├── tools ├── admin.py ├── aws.py ├── aws │ └── spot_cluster_config_template.json ├── common.py ├── deinterleave.py ├── fnv_hash.py ├── gen_data.py ├── microbenchmark.sh ├── netem.py ├── proto │ ├── configuration_pb2.py │ ├── modules_pb2.py │ ├── offline_data_pb2.py │ └── transaction_pb2.py └── requirements.txt ├── version.h.in └── workload ├── CMakeLists.txt ├── basic.cpp ├── basic.h ├── cockroach.cpp ├── cockroach.h ├── remastering.cpp ├── remastering.h ├── tpcc.cpp ├── tpcc.h └── workload.h /.dockerignore: -------------------------------------------------------------------------------- 1 | Dockerfile 2 | build 3 | .vscode 4 | .deps 5 | .dockerignore 6 | *.dat 7 | venv 8 | tmp 9 | config 10 | README.md -------------------------------------------------------------------------------- /.github/workflows/build-and-test.yml: -------------------------------------------------------------------------------- 1 | name: Build and Test 2 | 3 | on: [push, pull_request] 4 | 5 | env: 6 | BUILD_TYPE: Debug 7 | GLOG_v: 4 8 | GLOG_logtostderr: 1 9 | 10 | jobs: 11 | build-and-test: 12 | runs-on: ubuntu-latest 13 | strategy: 14 | # Allow other jobs to continue when one job fails 15 | fail-fast: false 16 | matrix: 17 | remaster: [none, simple, per_key, counterless] 18 | lock: [old, rma, ddr] 19 | exclude: 20 | - remaster: simple 21 | lock: rma 22 | - remaster: simple 23 | lock: ddr 24 | - remaster: per_key 25 | lock: rma 26 | - remaster: per_key 27 | lock: ddr 28 | - remaster: counterless 29 | lock: old 30 | 31 | steps: 32 | - name: Get latest CMake 33 | uses: lukka/get-cmake@latest 34 | 35 | - uses: actions/checkout@v2 36 | 37 | - name: Cache dependencies 38 | uses: actions/cache@v2 39 | with: 40 | path: .deps 41 | key: ${{ runner.os }}-build-${{ hashFiles('**/CMakeFiles.txt') }} 42 | restore-keys: | 43 | ${{ runner.os }}-build- 44 | ${{ runner.os }}- 45 | 46 | - name: Install dependencies 47 | run: ./install-deps.sh 48 | 49 | - name: Create Build Environment 50 | run: cmake -E make_directory ${{ github.workspace }}/build 51 | 52 | - name: Configure CMake 53 | shell: bash 54 | env: 55 | REMASTER_PROTOCOL: ${{ matrix.remaster }} 56 | LOCK_MANAGER: ${{ matrix.lock }} 57 | working-directory: ${{ github.workspace }}/build 58 | run: cmake -S $GITHUB_WORKSPACE -B . 59 | -DCMAKE_BUILD_TYPE=$BUILD_TYPE 60 | -DBUILD_SLOG_CLIENT=OFF 61 | -DBUILD_SLOG_TESTS=ON 62 | -DREMASTER_PROTOCOL=$REMASTER_PROTOCOL 63 | -DLOCK_MANAGER=$LOCK_MANAGER 64 | 65 | - name: Build 66 | working-directory: ${{ github.workspace }}/build 67 | shell: bash 68 | run: cmake --build . --config $BUILD_TYPE --parallel 4 69 | 70 | - name: Test 71 | working-directory: ${{ github.workspace }}/build 72 | shell: bash 73 | run: ctest -C $BUILD_TYPE --output-on-failure 74 | -------------------------------------------------------------------------------- /.github/workflows/docker-image.yml: -------------------------------------------------------------------------------- 1 | name: Create Docker Image 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | - dev 8 | 9 | jobs: 10 | build-image: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | include: 15 | - remaster: counterless 16 | lock: rma 17 | - remaster: counterless 18 | lock: ddr 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | 23 | - name: Cache Docker layers 24 | uses: actions/cache@v2 25 | with: 26 | path: /tmp/.buildx-cache 27 | key: ${{ runner.os }}-buildx-${{ github.sha }} 28 | restore-keys: | 29 | ${{ runner.os }}-buildx- 30 | 31 | - name: Login to Docker Hub 32 | uses: docker/login-action@v1 33 | with: 34 | username: ${{ secrets.DOCKER_HUB_USERNAME }} 35 | password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} 36 | 37 | - name: Set up Docker Buildx 38 | id: buildx 39 | uses: docker/setup-buildx-action@v1 40 | 41 | - name: Compute tag 42 | env: 43 | LOCK: ${{ matrix.lock }} 44 | id: compute_tag 45 | run: | 46 | echo "::set-output name=sha::${LOCK}-${GITHUB_SHA:0:7}" 47 | echo "::set-output name=branch::${LOCK}-${GITHUB_REF##refs/heads/}" 48 | 49 | - name: Build and push 50 | id: docker_build 51 | uses: docker/build-push-action@v2 52 | with: 53 | push: true 54 | tags: ctring/slog:${{ steps.compute_tag.outputs.sha }},ctring/slog:${{ steps.compute_tag.outputs.branch }} 55 | builder: ${{ steps.buildx.outputs.name }} 56 | cache-from: type=local,src=/tmp/.buildx-cache 57 | cache-to: type=local,dest=/tmp/.buildx-cache 58 | build-args: | 59 | CMAKE_OPTIONS=-DLOCK_MANAGER=${{ matrix.lock }} -DREMASTER_PROTOCOL=${{ matrix.remaster }} 60 | 61 | - name: Image digest 62 | run: echo ${{ steps.docker_build.outputs.digest }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # VS Code 2 | .vscode 3 | !.vscode/cmake-variants.json 4 | 5 | # Prerequisites 6 | *.d 7 | 8 | # Compiled Object files 9 | *.slo 10 | *.lo 11 | *.o 12 | *.obj 13 | 14 | # Precompiled Headers 15 | *.gch 16 | *.pch 17 | 18 | # Compiled Dynamic libraries 19 | *.so 20 | *.dylib 21 | *.dll 22 | 23 | # Fortran module files 24 | *.mod 25 | *.smod 26 | 27 | # Compiled Static libraries 28 | *.lai 29 | *.la 30 | *.a 31 | *.lib 32 | 33 | # Executables 34 | *.exe 35 | *.out 36 | *.app 37 | 38 | # Build and dependencies folders 39 | build/ 40 | .deps/ 41 | .dep/ 42 | 43 | # Byte-compiled / optimized / DLL files 44 | __pycache__/ 45 | *.py[cod] 46 | *$py.class 47 | 48 | # Installer logs 49 | pip-log.txt 50 | pip-delete-this-directory.txt 51 | 52 | # Jupyter Notebook 53 | .ipynb_checkpoints 54 | 55 | # Environments 56 | .env 57 | .venv 58 | env/ 59 | venv/ 60 | ENV/ 61 | env.bak/ 62 | venv.bak/ 63 | 64 | # Data files 65 | *.dat -------------------------------------------------------------------------------- /.vscode/cmake-variants.json: -------------------------------------------------------------------------------- 1 | { 2 | "buildType": { 3 | "default": "debug", 4 | "choices": { 5 | "asan": { 6 | "short": "Asan", 7 | "long": "Address sanitizer", 8 | "buildType": "Asan" 9 | }, 10 | "tsan": { 11 | "short": "Tsan", 12 | "long": "Thread sanitizer", 13 | "buildType": "Tsan" 14 | }, 15 | "ubsan": { 16 | "short": "Ubsan", 17 | "long": "Undefined behaviour sanitizer", 18 | "buildType": "Ubsan" 19 | }, 20 | "debug": { 21 | "short": "Debug", 22 | "long": "Emit debug information", 23 | "buildType": "Debug" 24 | }, 25 | "relwithdebinfo": { 26 | "short": "RelWithDebInfo", 27 | "long": "Optimize generated code with debug info", 28 | "buildType": "RelWithDebInfo" 29 | }, 30 | "release": { 31 | "short": "Release", 32 | "long": "Optimize generated code", 33 | "buildType": "Release" 34 | } 35 | } 36 | }, 37 | "remasterProtocol": { 38 | "default": "none-rma", 39 | "choices": { 40 | "none-old": { 41 | "short": "None/OLD", 42 | "long": "No remaster protocol with OLD lock manager", 43 | "settings": { 44 | "REMASTER_PROTOCOL": "NONE", 45 | "LOCK_MANAGER": "OLD" 46 | } 47 | }, 48 | "none-rma": { 49 | "short": "None/RMA", 50 | "long": "No remaster protocol with RMA lock manager", 51 | "settings": { 52 | "REMASTER_PROTOCOL": "NONE", 53 | "LOCK_MANAGER": "RMA" 54 | } 55 | }, 56 | "none-ddr": { 57 | "short": "None/DDR", 58 | "long": "No remaster protocol with DDR lock manager", 59 | "settings": { 60 | "REMASTER_PROTOCOL": "NONE", 61 | "LOCK_MANAGER": "DDR" 62 | } 63 | }, 64 | "simple": { 65 | "short": "Simple", 66 | "long": "Simple remaster protocol", 67 | "settings": { 68 | "REMASTER_PROTOCOL": "SIMPLE", 69 | "LOCK_MANAGER": "OLD" 70 | } 71 | }, 72 | "perkey": { 73 | "short": "PerKey", 74 | "long": "Per-key remaster protocol", 75 | "settings": { 76 | "REMASTER_PROTOCOL": "PER_KEY", 77 | "LOCK_MANAGER": "OLD" 78 | } 79 | }, 80 | "counterless-rma": { 81 | "short": "Counterless/RMA", 82 | "long": "Counterless remaster protocol with RMA lock manager", 83 | "settings": { 84 | "REMASTER_PROTOCOL": "COUNTERLESS", 85 | "LOCK_MANAGER": "RMA" 86 | } 87 | }, 88 | "counterless-ddr": { 89 | "short": "Counterless/DDR", 90 | "long": "Counterless remaster protocol with DDR lock manager", 91 | "settings": { 92 | "REMASTER_PROTOCOL": "COUNTERLESS", 93 | "LOCK_MANAGER": "DDR" 94 | } 95 | } 96 | } 97 | } 98 | } -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:focal AS builder 2 | ARG CMAKE_OPTIONS 3 | 4 | # Avoid interactive installation 5 | ENV DEBIAN_FRONTEND=noninteractive 6 | RUN apt-get update 7 | RUN apt-get -y install wget build-essential cmake git pkg-config 8 | 9 | WORKDIR /src 10 | 11 | COPY ./install-deps.sh . 12 | RUN ./install-deps.sh -d 13 | 14 | COPY . . 15 | RUN rm -rf build \ 16 | && mkdir build \ 17 | && cd build \ 18 | && cmake .. -DBUILD_SLOG_TESTS=OFF ${CMAKE_OPTIONS} -DCMAKE_BUILD_TYPE=release \ 19 | && make -j$(nproc) \ 20 | && cd .. 21 | 22 | FROM ubuntu:focal AS runner 23 | # If set (to anything), also create an image with tools (exclude the toolings) 24 | ARG INCLUDE_TOOLS 25 | 26 | WORKDIR /opt/slog 27 | COPY --from=builder /src/build/slog . 28 | COPY --from=builder /src/build/client . 29 | COPY --from=builder /src/build/benchmark . 30 | COPY --from=builder /src/build/scheduler_benchmark . 31 | COPY --from=builder /src/examples/*.conf ./ 32 | COPY --from=builder /src/tools/ tools/ 33 | 34 | RUN if [ -n "$INCLUDE_TOOLS" ]; then \ 35 | apt-get update; \ 36 | apt-get -y install python3 python3-pip; \ 37 | python3 -m pip install -r tools/requirements.txt; \ 38 | chmod +x tools/*.py; \ 39 | fi 40 | 41 | ENV PATH="/opt/slog:${PATH}" 42 | ENV PATH="/opt/slog/tools:${PATH}" -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Cuong Nguyen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 1.0.735 2 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure("2") do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://vagrantcloud.com/search. 15 | config.vm.box = "ubuntu/eoan64" 16 | 17 | # Note: requires vagrant-disksize plugin 18 | config.disksize.size = '20GB' 19 | 20 | # Disable automatic box update checking. If you disable this, then 21 | # boxes will only be checked for updates when the user runs 22 | # `vagrant box outdated`. This is not recommended. 23 | # config.vm.box_check_update = false 24 | 25 | # Create a forwarded port mapping which allows access to a specific port 26 | # within the machine from a port on the host machine. In the example below, 27 | # accessing "localhost:8080" will access port 80 on the guest machine. 28 | # NOTE: This will enable public access to the opened port 29 | # config.vm.network "forwarded_port", guest: 80, host: 8080 30 | 31 | # Create a forwarded port mapping which allows access to a specific port 32 | # within the machine from a port on the host machine and only allow access 33 | # via 127.0.0.1 to disable public access 34 | # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" 35 | 36 | # Create a private network, which allows host-only access to the machine 37 | # using a specific IP. 38 | # config.vm.network "private_network", ip: "192.168.33.10" 39 | 40 | # Create a public network, which generally matched to bridged network. 41 | # Bridged networks make the machine appear as another physical device on 42 | # your network. 43 | # config.vm.network "public_network" 44 | 45 | # Share an additional folder to the guest VM. The first argument is 46 | # the path on the host to the actual folder. The second argument is 47 | # the path on the guest to mount the folder. And the optional third 48 | # argument is a set of non-required options. 49 | # config.vm.synced_folder "../data", "/vagrant_data" 50 | 51 | # Provider-specific configuration so you can fine-tune various 52 | # backing providers for Vagrant. These expose provider-specific options. 53 | # Example for VirtualBox: 54 | # 55 | config.vm.provider "virtualbox" do |vb| 56 | # Display the VirtualBox GUI when booting the machine 57 | # vb.gui = true 58 | 59 | # Customize the amount of memory on the VM: 60 | vb.memory = "4096" 61 | 62 | # box was timing out on startup. from https://bugs.launchpad.net/cloud-images/+bug/1829625 63 | vb.customize ["modifyvm", :id, "--uart1", "0x3F8", "4"] 64 | vb.customize ["modifyvm", :id, "--uartmode1", "file", "./ttyS0.log"] 65 | end 66 | # 67 | # View the documentation for the provider you are using for more 68 | # information on available options. 69 | 70 | # Enable provisioning with a shell script. Additional provisioners such as 71 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the 72 | # documentation for more information about their specific syntax and use. 73 | # config.vm.provision "shell", inline: <<-SHELL 74 | # apt-get update 75 | # apt-get install -y apache2 76 | # SHELL 77 | end 78 | -------------------------------------------------------------------------------- /common/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | target_sources(slog-core 2 | PRIVATE 3 | configuration.cpp 4 | configuration.h 5 | constants.h 6 | csv_writer.cpp 7 | csv_writer.h 8 | json_utils.h 9 | metrics.cpp 10 | metrics.h 11 | offline_data_reader.cpp 12 | offline_data_reader.h 13 | proto_utils.cpp 14 | proto_utils.h 15 | sharder.cpp 16 | sharder.h 17 | spin_latch.h 18 | string_utils.cpp 19 | string_utils.h 20 | thread_utils.h 21 | types.h) -------------------------------------------------------------------------------- /common/configuration.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include "common/types.h" 8 | #include "proto/configuration.pb.h" 9 | #include "proto/internal.pb.h" 10 | 11 | namespace slog { 12 | 13 | class Configuration; 14 | 15 | using ConfigurationPtr = std::shared_ptr; 16 | 17 | class Configuration { 18 | public: 19 | static ConfigurationPtr FromFile(const std::string& file_path, const std::string& local_address = ""); 20 | 21 | Configuration(const internal::Configuration& config, const std::string& local_address); 22 | 23 | const internal::Configuration& proto_config() const; 24 | const std::string& protocol() const; 25 | const std::vector& all_addresses() const; 26 | const std::string& address(uint32_t replica, uint32_t partition) const; 27 | const std::string& address(MachineId machine_id) const; 28 | uint32_t broker_ports(int i) const; 29 | uint32_t broker_ports_size() const; 30 | uint32_t server_port() const; 31 | uint32_t forwarder_port() const; 32 | uint32_t sequencer_port() const; 33 | uint32_t num_replicas() const; 34 | uint32_t num_partitions() const; 35 | uint32_t num_workers() const; 36 | std::vector all_machine_ids() const; 37 | std::chrono::milliseconds mh_orderer_batch_duration() const; 38 | std::chrono::milliseconds forwarder_batch_duration() const; 39 | std::chrono::milliseconds sequencer_batch_duration() const; 40 | int sequencer_batch_size() const; 41 | bool sequencer_rrr() const; 42 | uint32_t replication_factor() const; 43 | 44 | const std::string& local_address() const; 45 | uint32_t local_replica() const; 46 | uint32_t local_partition() const; 47 | MachineId local_machine_id() const; 48 | MachineId MakeMachineId(uint32_t replica, uint32_t partition) const; 49 | std::pair UnpackMachineId(MachineId machine_id) const; 50 | 51 | uint32_t leader_replica_for_multi_home_ordering() const; 52 | uint32_t leader_partition_for_multi_home_ordering() const; 53 | 54 | uint32_t replication_delay_pct() const; 55 | uint32_t replication_delay_amount_ms() const; 56 | 57 | std::vector enabled_events() const; 58 | bool bypass_mh_orderer() const; 59 | std::vector cpu_pinnings(ModuleId module) const; 60 | bool return_dummy_txn() const; 61 | int recv_retries() const; 62 | internal::ExecutionType execution_type() const; 63 | const std::vector replication_order() const; 64 | bool synchronized_batching() const; 65 | uint32_t sample_rate() const; 66 | std::vector distance_ranking_from(int replica_id) const; 67 | 68 | int broker_rcvbuf() const; 69 | int long_sender_sndbuf() const; 70 | 71 | private: 72 | internal::Configuration config_; 73 | std::string local_address_; 74 | int local_replica_; 75 | int local_partition_; 76 | 77 | std::vector all_addresses_; 78 | std::vector replication_order_; 79 | }; 80 | 81 | } // namespace slog -------------------------------------------------------------------------------- /common/constants.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "common/types.h" 7 | 8 | namespace slog { 9 | 10 | const auto kModuleTimeout = std::chrono::milliseconds(1000); 11 | 12 | const Channel kServerChannel = 1; 13 | const Channel kForwarderChannel = 2; 14 | const Channel kSequencerChannel = 3; 15 | const Channel kMultiHomeOrdererChannel = 4; 16 | const Channel kInterleaverChannel = 5; 17 | const Channel kLocalLogChannel = 6; 18 | const Channel kSchedulerChannel = 7; 19 | const Channel kLocalPaxos = 8; 20 | const Channel kGlobalPaxos = 9; 21 | const Channel kWorkerChannel = 10; 22 | // Broker channels range from kBrokerChannel to kMaxChannel - 1 23 | const Channel kBrokerChannel = 11; 24 | const Channel kMaxChannel = 15; 25 | 26 | const uint32_t kMaxNumMachines = 100; 27 | 28 | const uint32_t kPaxosDefaultLeaderPosition = 0; 29 | 30 | const size_t kLockTableSizeLimit = 1000000; 31 | 32 | /**************************** 33 | * Statistic Keys 34 | ****************************/ 35 | 36 | /* Server */ 37 | const char TXN_ID_COUNTER[] = "txn_id_counter"; 38 | const char NUM_PENDING_RESPONSES[] = "num_pending_responses"; 39 | const char NUM_PARTIALLY_FINISHED_TXNS[] = "num_partially_finished_txns"; 40 | const char PENDING_RESPONSES[] = "pending_responses"; 41 | const char PARTIALLY_FINISHED_TXNS[] = "partially_finished_txns"; 42 | 43 | /* Forwarder */ 44 | const char FORW_BATCH_SIZE_PCTLS[] = "forw_batch_size_pctls"; 45 | const char FORW_BATCH_DURATION_MS_PCTLS[] = "forw_batch_duration_ms_pctls"; 46 | 47 | /* Multi-home orderer */ 48 | const char MHO_BATCH_SIZE_PCTLS[] = "mho_batch_size_pctls"; 49 | const char MHO_BATCH_DURATION_MS_PCTLS[] = "mho_batch_duration_ms_pctls"; 50 | 51 | /* Sequencer */ 52 | const char SEQ_BATCH_SIZE_PCTLS[] = "seq_batch_size_pctls"; 53 | const char SEQ_BATCH_DURATION_MS_PCTLS[] = "seq_batch_duration_ms_pctls"; 54 | 55 | /* Scheduler */ 56 | const char ALL_TXNS[] = "all_txns"; 57 | const char NUM_ALL_TXNS[] = "num_all_txns"; 58 | const char NUM_LOCKED_KEYS[] = "num_locked_keys"; 59 | const char LOCK_MANAGER_TYPE[] = "lock_manager_type"; 60 | const char NUM_TXNS_WAITING_FOR_LOCK[] = "num_txns_waiting_for_lock"; 61 | const char NUM_WAITING_FOR_PER_TXN[] = "num_waiting_for_per_txn"; 62 | const char LOCK_TABLE[] = "lock_table"; 63 | const char WAITED_BY_GRAPH[] = "waited_by_graph"; 64 | const char TXN_ID[] = "id"; 65 | const char TXN_DONE[] = "done"; 66 | const char TXN_ABORTING[] = "aborting"; 67 | const char TXN_NUM_LO[] = "num_lo"; 68 | const char TXN_NUM_DISPATCHES[] = "num_dispatches"; 69 | const char TXN_EXPECTED_NUM_LO[] = "expected_num_lo"; 70 | const char TXN_MULTI_HOME[] = "multi_home"; 71 | const char TXN_MULTI_PARTITION[] = "multi_partition"; 72 | 73 | } // namespace slog -------------------------------------------------------------------------------- /common/csv_writer.cpp: -------------------------------------------------------------------------------- 1 | #include "common/csv_writer.h" 2 | 3 | #include 4 | 5 | namespace slog { 6 | 7 | CSVWriter::CSVWriter(const std::string& file_name, const std::vector& columns, char delimiter) { 8 | if (columns.empty()) { 9 | throw std::runtime_error("There must be at least one column"); 10 | } 11 | num_columns_ = columns.size(); 12 | line_items_ = 0; 13 | delim_ = delimiter; 14 | 15 | file_ = std::ofstream(file_name, std::ios::out); 16 | if (!file_) { 17 | throw std::runtime_error(std::string("Cannot open file: ") + file_name); 18 | } 19 | bool first = true; 20 | for (const auto& col : columns) { 21 | if (!first) { 22 | file_ << ","; 23 | } 24 | file_ << col; 25 | first = false; 26 | } 27 | file_ << "\n"; 28 | } 29 | 30 | void CSVWriter::AppendDelim() { 31 | if (line_items_ > 1) { 32 | file_ << delim_; 33 | } 34 | } 35 | 36 | void CSVWriter::IncrementLineItemsAndCheck() { 37 | line_items_++; 38 | if (line_items_ > num_columns_) { 39 | throw std::runtime_error("Number of items exceeds number of columns"); 40 | } 41 | } 42 | 43 | } // namespace slog -------------------------------------------------------------------------------- /common/csv_writer.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | namespace slog { 8 | 9 | const struct CSVWriterLineEnder { 10 | } csvendl; 11 | 12 | class CSVWriter { 13 | public: 14 | CSVWriter(const std::string& file_name, const std::vector& columns, char delimiter = ','); 15 | 16 | template ::value>> 17 | CSVWriter& operator<<(T val) { 18 | IncrementLineItemsAndCheck(); 19 | AppendDelim(); 20 | file_ << val; 21 | return *this; 22 | } 23 | 24 | CSVWriter& operator<<(const std::string& str) { 25 | IncrementLineItemsAndCheck(); 26 | AppendDelim(); 27 | file_ << str; 28 | return *this; 29 | } 30 | 31 | CSVWriter& operator<<(const CSVWriterLineEnder& ender) { 32 | if (line_items_ != num_columns_) { 33 | throw std::runtime_error("Number of items must match number of columns"); 34 | } 35 | (void)ender; // Silent unused warning 36 | file_ << "\n"; 37 | line_items_ = 0; 38 | return *this; 39 | } 40 | 41 | private: 42 | void AppendDelim(); 43 | void IncrementLineItemsAndCheck(); 44 | 45 | size_t line_items_; 46 | size_t num_columns_; 47 | std::ofstream file_; 48 | char delim_; 49 | }; 50 | 51 | } // namespace slog -------------------------------------------------------------------------------- /common/json_utils.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include "rapidjson/allocators.h" 6 | #include "rapidjson/document.h" 7 | #include "rapidjson/prettywriter.h" 8 | #include "rapidjson/rapidjson.h" 9 | #include "rapidjson/stringbuffer.h" 10 | #include "rapidjson/writer.h" 11 | 12 | namespace slog { 13 | 14 | template 15 | rapidjson::Value ToJsonArrayOfKeyValue(const Container& container, ValueFn value_fn, 16 | rapidjson::Document::AllocatorType& alloc) { 17 | rapidjson::Value json_array(rapidjson::kArrayType); 18 | for (const auto& [key, value] : container) { 19 | rapidjson::Value entry(rapidjson::kArrayType); 20 | entry.PushBack(key, alloc).PushBack(rapidjson::Value(value_fn(value)), alloc); 21 | json_array.PushBack(std::move(entry), alloc); 22 | } 23 | return json_array; 24 | } 25 | 26 | template 27 | rapidjson::Value ToJsonArrayOfKeyValue(const Container& container, rapidjson::Document::AllocatorType& alloc) { 28 | return ToJsonArrayOfKeyValue( 29 | container, [](const auto& value) { return value; }, alloc); 30 | } 31 | 32 | template 33 | rapidjson::Value ToJsonArray(const Container& container, Function fn, rapidjson::Document::AllocatorType& alloc) { 34 | rapidjson::Value json_array(rapidjson::kArrayType); 35 | for (const auto& v : container) { 36 | json_array.PushBack(rapidjson::Value(fn(v)), alloc); 37 | } 38 | return json_array; 39 | } 40 | 41 | template 42 | rapidjson::Value ToJsonArray(const Container& container, rapidjson::Document::AllocatorType& alloc) { 43 | return ToJsonArray( 44 | container, [](const auto& value) { return value; }, alloc); 45 | } 46 | 47 | const std::array kPctlLevels = {0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100}; 48 | 49 | template 50 | rapidjson::Value Percentiles(Container& container, rapidjson::Document::AllocatorType& alloc) { 51 | rapidjson::Value pctls(rapidjson::kArrayType); 52 | if (container.empty()) { 53 | return pctls; 54 | } 55 | std::sort(container.begin(), container.end()); 56 | for (auto p : kPctlLevels) { 57 | size_t i = p * (container.size() - 1) / 100; 58 | pctls.PushBack(container[i], alloc); 59 | } 60 | return pctls; 61 | } 62 | 63 | } // namespace slog -------------------------------------------------------------------------------- /common/metrics.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "common/configuration.h" 11 | #include "common/spin_latch.h" 12 | #include "proto/transaction.pb.h" 13 | 14 | namespace slog { 15 | 16 | constexpr uint32_t kSampleMaskSize = 1 << 8; 17 | using sample_mask_t = std::array; 18 | 19 | class TransactionEventMetrics; 20 | 21 | /** 22 | * Repository of metrics per thread 23 | */ 24 | class MetricsRepository { 25 | public: 26 | MetricsRepository(const ConfigurationPtr& config, const sample_mask_t& sample_mask); 27 | 28 | std::chrono::system_clock::time_point RecordTxnEvent(TransactionEvent event); 29 | std::unique_ptr Reset(); 30 | 31 | private: 32 | const ConfigurationPtr config_; 33 | sample_mask_t sample_mask_; 34 | SpinLatch latch_; 35 | 36 | std::unique_ptr txn_event_metrics_; 37 | }; 38 | 39 | extern thread_local std::shared_ptr per_thread_metrics_repo; 40 | 41 | /** 42 | * Handles thread registering, aggregates results, and output results to files 43 | */ 44 | class MetricsRepositoryManager { 45 | public: 46 | MetricsRepositoryManager(const std::string& config_name, const ConfigurationPtr& config); 47 | void RegisterCurrentThread(); 48 | void AggregateAndFlushToDisk(const std::string& dir); 49 | 50 | private: 51 | const std::string config_name_; 52 | const ConfigurationPtr config_; 53 | sample_mask_t sample_mask_; 54 | std::unordered_map> metrics_repos_; 55 | std::mutex mut_; 56 | }; 57 | 58 | using MetricsRepositoryManagerPtr = std::shared_ptr; 59 | 60 | extern uint32_t gLocalMachineId; 61 | extern uint64_t gEnabledEvents; 62 | 63 | void InitializeRecording(const ConfigurationPtr& config); 64 | 65 | template 66 | inline void RecordTxnEvent(TxnOrBatchInternalPtr txn_internal, TransactionEvent event) { 67 | if (!((gEnabledEvents >> event) & 1)) { 68 | return; 69 | } 70 | if (txn_internal != nullptr) { 71 | auto now = std::chrono::system_clock::now().time_since_epoch().count(); 72 | auto new_event = txn_internal->mutable_events()->Add(); 73 | new_event->set_event(event); 74 | new_event->set_time(now); 75 | new_event->set_machine(gLocalMachineId); 76 | new_event->set_home(-1); 77 | } 78 | if (per_thread_metrics_repo != nullptr) { 79 | per_thread_metrics_repo->RecordTxnEvent(event); 80 | } 81 | } 82 | 83 | #ifdef ENABLE_TXN_EVENT_RECORDING 84 | #define INIT_RECORDING(config) slog::InitializeRecording(config) 85 | #define RECORD(txn, event) RecordTxnEvent(txn, event) 86 | #else 87 | #define INIT_RECORDING(config) 88 | #define RECORD(txn, event) 89 | #endif 90 | 91 | // Helper function for quickly monitor throughput at a certain place 92 | // TODO: use thread_local instead of static 93 | #define MONITOR_THROUGHPUT() \ 94 | static int TP_COUNTER = 0; \ 95 | static int TP_LAST_COUNTER = 0; \ 96 | static std::chrono::steady_clock::time_point TP_LAST_LOG_TIME; \ 97 | TP_COUNTER++; \ 98 | auto TP_LOG_SPAN = std::chrono::steady_clock::now() - TP_LAST_LOG_TIME; \ 99 | if (TP_LOG_SPAN > 1s) { \ 100 | LOG(INFO) << "Throughput: " \ 101 | << (TP_COUNTER - TP_LAST_COUNTER) / std::chrono::duration_cast(TP_LOG_SPAN).count(); \ 102 | TP_LAST_COUNTER = TP_COUNTER; \ 103 | TP_LAST_LOG_TIME = std::chrono::steady_clock::now(); \ 104 | } 105 | 106 | } // namespace slog -------------------------------------------------------------------------------- /common/offline_data_reader.cpp: -------------------------------------------------------------------------------- 1 | #include "common/offline_data_reader.h" 2 | 3 | #include 4 | #include 5 | 6 | using google::protobuf::io::CodedInputStream; 7 | using google::protobuf::io::FileInputStream; 8 | 9 | namespace slog { 10 | 11 | OfflineDataReader::OfflineDataReader(int fd) 12 | : raw_input_(new FileInputStream(fd)), coded_input_(new CodedInputStream(raw_input_)) { 13 | if (!coded_input_->ReadVarint32(&num_datums_)) { 14 | LOG(FATAL) << "Error while reading data file"; 15 | } 16 | num_read_datums_ = 0; 17 | } 18 | 19 | OfflineDataReader::~OfflineDataReader() { 20 | delete coded_input_; 21 | delete raw_input_; 22 | } 23 | 24 | uint32_t OfflineDataReader::GetNumDatums() { return num_datums_; } 25 | 26 | bool OfflineDataReader::HasNextDatum() { return num_read_datums_ < num_datums_; } 27 | 28 | Datum OfflineDataReader::GetNextDatum() { 29 | int sz; 30 | // Read the size of the next datum 31 | if (!coded_input_->ReadVarintSizeAsInt(&sz)) { 32 | LOG(FATAL) << "Error while reading data file"; 33 | } 34 | std::string buf; 35 | // Read the datum given the size 36 | if (!coded_input_->ReadString(&buf, sz)) { 37 | LOG(FATAL) << "Error while reading data file"; 38 | } 39 | 40 | Datum datum; 41 | // Parse raw bytes into protobuf object 42 | datum.ParseFromString(buf); 43 | 44 | num_read_datums_++; 45 | 46 | return datum; 47 | } 48 | 49 | } // namespace slog -------------------------------------------------------------------------------- /common/offline_data_reader.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "proto/offline_data.pb.h" 7 | 8 | namespace slog { 9 | 10 | class OfflineDataReader { 11 | public: 12 | OfflineDataReader(int fd); 13 | ~OfflineDataReader(); 14 | 15 | uint32_t GetNumDatums(); 16 | bool HasNextDatum(); 17 | Datum GetNextDatum(); 18 | 19 | private: 20 | google::protobuf::io::ZeroCopyInputStream* raw_input_; 21 | google::protobuf::io::CodedInputStream* coded_input_; 22 | uint32_t num_datums_; 23 | uint32_t num_read_datums_; 24 | }; 25 | 26 | } // namespace slog -------------------------------------------------------------------------------- /common/proto_utils.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include "common/configuration.h" 8 | #include "common/sharder.h" 9 | #include "common/types.h" 10 | 11 | #define ENUM_NAME(enum, enum_type) enum_type##_descriptor()->FindValueByNumber(enum)->name() 12 | #define CASE_NAME(case, type) type::descriptor()->FindFieldByNumber(case)->name() 13 | 14 | using std::pair; 15 | using std::string; 16 | using std::unordered_map; 17 | 18 | namespace slog { 19 | 20 | struct KeyMetadata { 21 | KeyMetadata(const Key& key, KeyType type = KeyType::READ, std::optional metadata = {}) 22 | : key(key), type(type), metadata(metadata) {} 23 | 24 | KeyMetadata(const Key& key, KeyType type, uint32_t master) : KeyMetadata(key, type, {{master}}) {} 25 | 26 | Key key; 27 | KeyType type; 28 | std::optional metadata; 29 | }; 30 | 31 | /** 32 | * Creates a new transaction 33 | * @param key_metadatas Keys and metadata 34 | * @param code Code 35 | * @param remaster If specify, `code` is ignored and the txn becomes a remaster txn 36 | * @param coordinating_server MachineId of the server in charge of responding the 37 | * transaction result to the client. 38 | * @return A new transaction having given properties 39 | */ 40 | Transaction* MakeTransaction(const std::vector& key_metadatas, 41 | const std::vector>& code = {{}}, 42 | std::optional remaster = std::nullopt, MachineId coordinating_server = 0); 43 | 44 | /** 45 | * Inspects the internal metadata of a transaction then determines whether 46 | * a transaction is SINGLE_HOME, MULTI_HOME, or UNKNOWN. 47 | * Pre-condition: all keys in master_metadata exist in either write set or 48 | * read set of the transaction 49 | * 50 | * @param txn The questioned transaction. Its `type` property will also be 51 | * set to the result. 52 | * @return The type of the given transaction. 53 | */ 54 | TransactionType SetTransactionType(Transaction& txn); 55 | 56 | /** 57 | * If in_place is set to true, the given txn is modified 58 | */ 59 | Transaction* GenerateLockOnlyTxn(Transaction* txn, uint32_t lo_master, bool in_place = false); 60 | 61 | /** 62 | * Returns nullptr if the generated txn contains no relevant key 63 | */ 64 | Transaction* GeneratePartitionedTxn(const SharderPtr& sharder, Transaction* txn, uint32_t partition, 65 | bool in_place = false); 66 | 67 | /** 68 | * Populate the involved_replicas field in the transaction 69 | */ 70 | void PopulateInvolvedReplicas(Transaction& txn); 71 | 72 | /** 73 | * Populate the involved_partitions field in the transaction 74 | */ 75 | void PopulateInvolvedPartitions(const SharderPtr& sharder, Transaction& txn); 76 | 77 | /** 78 | * Merges the results of two transactions 79 | * 80 | * @param txn The transaction that will hold the final merged result 81 | * @param other The transaction to be merged with 82 | */ 83 | void MergeTransaction(Transaction& txn, const Transaction& other); 84 | 85 | std::ostream& operator<<(std::ostream& os, const Transaction& txn); 86 | std::ostream& operator<<(std::ostream& os, const MasterMetadata& metadata); 87 | 88 | bool operator==(const MasterMetadata& metadata1, const MasterMetadata& metadata2); 89 | bool operator==(const ValueEntry& val1, const ValueEntry& val2); 90 | bool operator==(const KeyValueEntry& kv1, const KeyValueEntry& kv2); 91 | bool operator==(const Transaction& txn1, const Transaction txn2); 92 | 93 | /** 94 | * Extract txns from a batch 95 | */ 96 | std::vector Unbatch(internal::Batch* batch); 97 | 98 | } // namespace slog -------------------------------------------------------------------------------- /common/sharder.cpp: -------------------------------------------------------------------------------- 1 | #include "common/sharder.h" 2 | 3 | namespace slog { 4 | 5 | namespace { 6 | 7 | template 8 | uint32_t FNVHash(It begin, It end) { 9 | uint64_t hash = 0x811c9dc5; 10 | for (auto it = begin; it != end; it++) { 11 | hash = (hash * 0x01000193) % (1LL << 32); 12 | hash ^= *it; 13 | } 14 | return hash; 15 | } 16 | 17 | } // namespace 18 | 19 | std::shared_ptr Sharder::MakeSharder(const ConfigurationPtr& config) { 20 | if (config->proto_config().has_simple_partitioning()) { 21 | return std::make_shared(config); 22 | } else if (config->proto_config().has_tpcc_partitioning()) { 23 | return std::make_shared(config); 24 | } 25 | return std::make_shared(config); 26 | } 27 | 28 | Sharder::Sharder(const ConfigurationPtr& config) 29 | : local_partition_(config->local_partition()), num_partitions_(config->num_partitions()) {} 30 | 31 | bool Sharder::is_local_key(const Key& key) const { return compute_partition(key) == local_partition_; } 32 | 33 | uint32_t Sharder::num_partitions() const { return num_partitions_; } 34 | 35 | uint32_t Sharder::local_partition() const { return local_partition_; } 36 | 37 | HashSharder::HashSharder(const ConfigurationPtr& config) 38 | : Sharder(config), partition_key_num_bytes_(config->proto_config().hash_partitioning().partition_key_num_bytes()) {} 39 | 40 | uint32_t HashSharder::compute_partition(const Key& key) const { 41 | auto end = partition_key_num_bytes_ >= key.length() ? key.end() : key.begin() + partition_key_num_bytes_; 42 | return FNVHash(key.begin(), end) % num_partitions_; 43 | } 44 | 45 | SimpleSharder::SimpleSharder(const ConfigurationPtr& config) : Sharder(config) {} 46 | 47 | uint32_t SimpleSharder::compute_partition(const Key& key) const { return std::stoll(key) % num_partitions_; } 48 | 49 | TPCCSharder::TPCCSharder(const ConfigurationPtr& config) : Sharder(config) {} 50 | uint32_t TPCCSharder::compute_partition(const Key& key) const { 51 | int w_id = *reinterpret_cast(key.data()); 52 | return (w_id - 1) % num_partitions_; 53 | } 54 | 55 | } // namespace slog -------------------------------------------------------------------------------- /common/sharder.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "common/configuration.h" 4 | #include "common/types.h" 5 | 6 | namespace slog { 7 | 8 | class Sharder; 9 | 10 | using SharderPtr = std::shared_ptr; 11 | 12 | class Sharder { 13 | public: 14 | static std::shared_ptr MakeSharder(const ConfigurationPtr& config); 15 | 16 | Sharder(const ConfigurationPtr& config); 17 | 18 | bool is_local_key(const Key& key) const; 19 | uint32_t num_partitions() const; 20 | uint32_t local_partition() const; 21 | 22 | virtual uint32_t compute_partition(const Key& key) const = 0; 23 | 24 | protected: 25 | uint32_t local_partition_; 26 | uint32_t num_partitions_; 27 | }; 28 | 29 | class HashSharder : public Sharder { 30 | public: 31 | HashSharder(const ConfigurationPtr& config); 32 | uint32_t compute_partition(const Key& key) const final; 33 | 34 | private: 35 | size_t partition_key_num_bytes_; 36 | }; 37 | 38 | class SimpleSharder : public Sharder { 39 | public: 40 | SimpleSharder(const ConfigurationPtr& config); 41 | uint32_t compute_partition(const Key& key) const final; 42 | }; 43 | 44 | class TPCCSharder : public Sharder { 45 | public: 46 | TPCCSharder(const ConfigurationPtr& config); 47 | uint32_t compute_partition(const Key& key) const final; 48 | }; 49 | 50 | } // namespace slog -------------------------------------------------------------------------------- /common/spin_latch.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | // See https://rigtorp.se/spinlock/ 6 | class SpinLatch { 7 | public: 8 | void lock() { 9 | for (;;) { 10 | if (!lock_.exchange(true)) { 11 | break; 12 | } 13 | while (lock_.load()) 14 | ; 15 | }; 16 | } 17 | 18 | void unlock() { lock_.store(false); } 19 | 20 | private: 21 | std::atomic lock_ = {false}; 22 | }; 23 | -------------------------------------------------------------------------------- /common/string_utils.cpp: -------------------------------------------------------------------------------- 1 | #include "common/string_utils.h" 2 | 3 | #include 4 | 5 | namespace slog { 6 | 7 | using std::string; 8 | using std::vector; 9 | 10 | size_t NextToken(string& token, const string& str, const string& delims, size_t pos) { 11 | auto start = str.find_first_not_of(delims, pos); 12 | if (start == string::npos) { 13 | token = ""; 14 | return string::npos; 15 | } 16 | auto end = str.find_first_of(delims, start); 17 | if (end == string::npos) { 18 | end = str.length(); 19 | } 20 | token = str.substr(start, end - start); 21 | return start + token.length(); 22 | } 23 | 24 | size_t NextNTokens(vector& tokens, const string& str, const string& delims, size_t n, size_t pos) { 25 | tokens.clear(); 26 | for (size_t i = 0; i < n; i++) { 27 | string token; 28 | pos = NextToken(token, str, delims, pos); 29 | if (pos == string::npos) { 30 | tokens.clear(); 31 | return string::npos; 32 | } 33 | tokens.push_back(std::move(token)); 34 | } 35 | return pos; 36 | } 37 | 38 | string Trim(string str) { 39 | // trim left 40 | auto it = str.begin(); 41 | while (it != str.end() && std::isspace(*it)) { 42 | it++; 43 | } 44 | str.erase(str.begin(), it); 45 | 46 | // trim right 47 | auto rit = str.rbegin(); 48 | while (rit != str.rend() && std::isspace(*rit)) { 49 | rit++; 50 | } 51 | str.erase(rit.base(), str.end()); 52 | 53 | return str; 54 | } 55 | 56 | vector Split(const std::string& str, const std::string& delims) { 57 | vector res; 58 | string token; 59 | size_t pos = NextToken(token, str, delims, 0); 60 | while (pos != std::string::npos) { 61 | res.push_back(token); 62 | pos = NextToken(token, str, delims, pos); 63 | } 64 | return res; 65 | } 66 | 67 | const std::string RandomStringGenerator::kCharacters("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz "); 68 | 69 | RandomStringGenerator::RandomStringGenerator(int seed, size_t pool_size) : rg_(seed), rnd_str_pool_offset_(0) { 70 | rnd_str_pool_.reserve(pool_size); 71 | std::uniform_int_distribution char_rnd(0, kCharacters.size() - 1); 72 | for (size_t i = 0; i < pool_size; i++) { 73 | rnd_str_pool_.push_back(kCharacters[char_rnd(rg_)]); 74 | } 75 | } 76 | 77 | std::string RandomStringGenerator::operator()(size_t len) { 78 | if (rnd_str_pool_offset_ + len >= rnd_str_pool_.size()) { 79 | rnd_str_pool_offset_ = 0; 80 | } 81 | auto res = rnd_str_pool_.substr(rnd_str_pool_offset_, len); 82 | rnd_str_pool_offset_ += len; 83 | return res; 84 | } 85 | 86 | } // namespace slog -------------------------------------------------------------------------------- /common/string_utils.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | namespace slog { 9 | 10 | size_t NextToken(std::string& token, const std::string& str, const std::string& delims, size_t pos = 0); 11 | 12 | size_t NextNTokens(std::vector& tokens, const std::string& str, const std::string& delims, size_t n = 1, 13 | size_t pos = 0); 14 | 15 | std::string Trim(std::string str); 16 | 17 | std::vector Split(const std::string& str, const std::string& delims); 18 | 19 | template