├── .gitignore ├── CMakeLists.txt ├── LICENSE ├── LICENSE_HEADER ├── README.md ├── add_license.sh ├── docker └── Dockerfile ├── external ├── CMakeLists.txt └── EnableCMP0048.cmake ├── include ├── Blacksmith.hpp ├── Forges │ ├── FuzzyHammerer.hpp │ ├── ReplayingHammerer.hpp │ └── TraditionalHammerer.hpp ├── Fuzzer │ ├── Aggressor.hpp │ ├── AggressorAccessPattern.hpp │ ├── BitFlip.hpp │ ├── CodeJitter.hpp │ ├── FuzzingParameterSet.hpp │ ├── HammeringPattern.hpp │ ├── PatternAddressMapper.hpp │ └── PatternBuilder.hpp ├── GlobalDefines.hpp ├── Memory │ ├── DRAMAddr.hpp │ ├── DramAnalyzer.hpp │ └── Memory.hpp └── Utilities │ ├── AsmPrimitives.hpp │ ├── Enums.hpp │ ├── Logger.hpp │ ├── Range.hpp │ ├── TimeHelper.hpp │ └── Uuid.hpp └── src ├── Blacksmith.cpp ├── Forges ├── FuzzyHammerer.cpp ├── ReplayingHammerer.cpp └── TraditionalHammerer.cpp ├── Fuzzer ├── Aggressor.cpp ├── AggressorAccessPattern.cpp ├── BitFlip.cpp ├── CodeJitter.cpp ├── FuzzingParameterSet.cpp ├── HammeringPattern.cpp ├── PatternAddressMapper.cpp └── PatternBuilder.cpp ├── Memory ├── DRAMAddr.cpp ├── DramAnalyzer.cpp └── Memory.cpp └── Utilities ├── Enums.cpp └── Logger.cpp /.gitignore: -------------------------------------------------------------------------------- 1 | # folders used by the Makefile targets 2 | bin/* 3 | obj/* 4 | 5 | # generated by the Makefile's benchmark target 6 | log/* 7 | 8 | # dumpfile 9 | core 10 | 11 | # VS-code related files 12 | .vscode/* 13 | 14 | # CLion-related files 15 | scripts/run_as_sudo.sh 16 | .idea 17 | 18 | # build files 19 | CMakeFiles/* 20 | build/* 21 | cmake-build-* 22 | 23 | # OSX-related files 24 | .DS_Store 25 | 26 | # visualization scripts 27 | visualize/venv/ 28 | visualize/data/* 29 | 30 | scripts/portability_experiment/* 31 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.11 FATAL_ERROR) 2 | project(blacksmith VERSION 0.0.1 LANGUAGES CXX) 3 | 4 | # === OPTIONS ================================================================== 5 | 6 | set( 7 | BLACKSMITH_ENABLE_JSON 8 | ON 9 | CACHE BOOL 10 | "Use the nlohmann/json library to export JSON-formatted fuzzing data." 11 | FORCE 12 | ) 13 | 14 | set( 15 | BLACKSMITH_ENABLE_JITTING 16 | ON 17 | CACHE BOOL 18 | "Use the asmjit library to jit the hammering code." 19 | FORCE 20 | ) 21 | 22 | string(ASCII 27 ESC) 23 | 24 | # === DEFINITIONS ============================================================== 25 | 26 | set(GIT_COMMIT_HASH "NO_REPOSITORY") 27 | 28 | execute_process( 29 | COMMAND git status 30 | WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} 31 | RESULT_VARIABLE ret 32 | OUTPUT_QUIET 33 | ERROR_QUIET 34 | ) 35 | 36 | if (ret EQUAL "0") 37 | # We're in a git repository, attempt to retrieve the current commit tag. 38 | execute_process( 39 | COMMAND git rev-parse HEAD 40 | WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} 41 | OUTPUT_VARIABLE GIT_COMMIT_HASH 42 | OUTPUT_STRIP_TRAILING_WHITESPACE 43 | ) 44 | endif () 45 | 46 | # === DEPENDENCIES ============================================================= 47 | 48 | add_subdirectory(external) 49 | 50 | # === LIBBLACKSMITH ============================================================ 51 | 52 | add_library( 53 | bs 54 | include/GlobalDefines.hpp 55 | include/Utilities/TimeHelper.hpp 56 | src/Forges/FuzzyHammerer.cpp 57 | src/Forges/ReplayingHammerer.cpp 58 | src/Forges/TraditionalHammerer.cpp 59 | src/Fuzzer/Aggressor.cpp 60 | src/Fuzzer/AggressorAccessPattern.cpp 61 | src/Fuzzer/BitFlip.cpp 62 | src/Fuzzer/CodeJitter.cpp 63 | src/Fuzzer/FuzzingParameterSet.cpp 64 | src/Fuzzer/HammeringPattern.cpp 65 | src/Fuzzer/PatternAddressMapper.cpp 66 | src/Fuzzer/PatternBuilder.cpp 67 | src/Memory/DRAMAddr.cpp 68 | src/Memory/DramAnalyzer.cpp 69 | src/Memory/Memory.cpp 70 | src/Utilities/Enums.cpp 71 | src/Utilities/Logger.cpp 72 | ) 73 | 74 | target_include_directories( 75 | bs 76 | PUBLIC 77 | ${CMAKE_CURRENT_SOURCE_DIR}/include 78 | ) 79 | 80 | # Note: PUBLIC to also force consumers (i.e., the blacksmith executable) to use 81 | # these features and options. 82 | target_compile_features( 83 | bs 84 | PUBLIC 85 | cxx_std_17 86 | ) 87 | 88 | target_compile_options( 89 | bs 90 | PUBLIC 91 | -O0 92 | -Wall 93 | -Wextra 94 | -Wno-unused-function 95 | -Wno-format-security 96 | ) 97 | 98 | if (BLACKSMITH_ENABLE_JSON) 99 | target_link_libraries( 100 | bs 101 | PUBLIC 102 | nlohmann_json::nlohmann_json 103 | ) 104 | 105 | target_compile_definitions( 106 | bs 107 | PUBLIC 108 | ENABLE_JSON 109 | ) 110 | endif () 111 | 112 | if (BLACKSMITH_ENABLE_JITTING) 113 | # This fixes an issue that causes GCC 10.3 (but not 8.3 or 11.1) to miss a 114 | # header somehow. 115 | FetchContent_MakeAvailable(asmjit) 116 | 117 | target_include_directories( 118 | bs 119 | PUBLIC 120 | ${asmjit_SOURCE_DIR}/src 121 | ) 122 | 123 | target_link_libraries( 124 | bs 125 | PRIVATE 126 | asmjit 127 | ) 128 | 129 | target_compile_definitions( 130 | bs 131 | PUBLIC 132 | ENABLE_JITTING 133 | ) 134 | endif () 135 | 136 | # === BLACKSMITH =============================================================== 137 | 138 | add_executable( 139 | blacksmith 140 | include/Blacksmith.hpp 141 | src/Blacksmith.cpp 142 | ) 143 | 144 | target_compile_definitions( 145 | blacksmith 146 | PRIVATE 147 | GIT_COMMIT_HASH="${GIT_COMMIT_HASH}" 148 | ) 149 | 150 | target_link_libraries( 151 | blacksmith 152 | PRIVATE 153 | bs 154 | argagg 155 | ) 156 | 157 | # === CLEANUP ================================================================== 158 | 159 | unset(BLACKSMITH_ENABLE_JSON CACHE) 160 | unset(BLACKSMITH_ENABLE_JITTING CACHE) 161 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 ETH Zurich 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /LICENSE_HEADER: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Blacksmith Rowhammer Fuzzer 2 | 3 | [![Academic Code](https://img.shields.io/badge/Origin-Academic%20Code-C1ACA0.svg?style=flat)]() [![Language Badge](https://img.shields.io/badge/Made%20with-C/C++-blue.svg)](https://isocpp.org/std/the-standard) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![contributions welcome](https://img.shields.io/badge/Contributions-welcome-lightgray.svg?style=flat)]() 4 | 5 | 6 | [![DOI](https://img.shields.io/badge/DOI-20.500.11850/525008-yellow.svg)](https://www.research-collection.ethz.ch/handle/20.500.11850/525013) [![Preprint](https://img.shields.io/badge/Preprint-ETH%20Research%20Collection-orange.svg)](https://www.research-collection.ethz.ch/handle/20.500.11850/525008) [![Paper](https://img.shields.io/badge/To%20appear%20in-IEEE%20S&P%20'22-brightgreen.svg)](https://www.ieee-security.org/TC/SP2022/program-papers.html) [![Funding](https://img.shields.io/badge/Grant-NCCR%20Automation%20(51NF40180545)-red.svg)](https://nccr-automation.ch/) 7 | 8 | This repository provides the code accompanying the paper _[Blacksmith: Scalable Rowhammering in the Frequency Domain](https://comsec.ethz.ch/wp-content/files/blacksmith_sp22.pdf)_ that is to appear in the IEEE conference Security & Privacy (S&P) 2022. 9 | 10 | This is the implementation of our Blacksmith Rowhammer fuzzer. This fuzzer crafts novel non-uniform Rowhammer access patterns based on the concepts of frequency, phase, and amplitude. Our evaluation on 40 DIMMs showed that it is able to bypass recent Target Row Refresh (TRR) in-DRAM mitigations effectively and as such can could trigger bit flips on all 40 tested DIMMs. 11 | 12 | ## Getting Started 13 | 14 | Following, we quickly describe how to build and run Blacksmith. 15 | 16 | ### Prerequisites 17 | 18 | Blacksmith has been tested on Ubuntu 18.04 LTS with Linux kernel 4.15.0. As the CMakeLists we ship with Blacksmith downloads all required dependencies at compile time, there is no need to install any package other than g++ (>= 8) and cmake (>= 3.14). 19 | 20 | **NOTE**: The DRAM address functions that are hard-coded in [DRAMAddr.cpp](https://github.com/comsec-group/blacksmith/blob/public/src/Memory/DRAMAddr.cpp) assume an Intel Core i7-8700K. For any other microarchitecture, you will need to first reverse-engineer these functions (e.g., using [DRAMA](https://github.com/IAIK/drama) or [TRResspass' DRAMA](https://github.com/vusec/trrespass/tree/master/drama)) and then update the matrices in this class accordingly. 21 | 22 | To facilitate the development, we also provide a Docker container (see [Dockerfile](docker/Dockerfile)) where all required tools and libraries are installed. This container can be configured, for example, as remote host in the CLion IDE, which automatically transfers the files via SSH to the Docker container (i.e., no manual mapping required). 23 | 24 | ### Building Blacksmith 25 | 26 | You can build Blacksmith with its supplied `CMakeLists.txt` in a new `build` directory: 27 | 28 | ```bash 29 | mkdir build \ 30 | && cd build \ 31 | && cmake .. \ 32 | && make -j$(nproc) 33 | ``` 34 | 35 | Now we can run Blacksmith. For example, we can run Blacksmith in fuzzing mode by passing a random DIMM ID (e.g., `--dimm-id 1`; only used internally for logging into `stdout.log`), we limit the fuzzing to 6 hours (`--runtime-limit 21600`), pass the number of ranks of our current DIMM (`--ranks 1`) to select the proper bank/rank functions, and tell Blacksmith to do a sweep with the best found pattern after fuzzing finished (`--sweeping`): 36 | 37 | ```bash 38 | sudo ./blacksmith --dimm-id 1 --runtime-limit 21600 --ranks 1 --sweeping 39 | ``` 40 | 41 | While Blacksmith is running, you can use `tail -f stdout.log` to keep track of the current progress (e.g., patterns, found bit flips). You will see a line like 42 | ``` 43 | [!] Flip 0x2030486dcc, row 3090, page offset: 3532, from 8f to 8b, detected after 0 hours 6 minutes 6 seconds. 44 | ``` 45 | in case that a bit flip was found. After finishing the Blacksmith run, you can find a `fuzz-summary.json` that contains the information found in the stdout.log in a machine-processable format. In case you passed the `--sweeping` flag, you can additionally find a `sweep-summary-*.json` file that contains the information of the sweeping pass. 46 | 47 | ## Supported Parameters 48 | 49 | Blacksmith supports the command-line arguments listed in the following. 50 | Except for the parameters `--dimm-id` and `--ranks` all other parameters are optional. 51 | 52 | ``` 53 | -h, --help 54 | shows this help message 55 | 56 | ==== Mandatory Parameters ================================== 57 | 58 | -d, --dimm-id 59 | internal identifier of the currently inserted DIMM (default: 0) 60 | -r, --ranks 61 | number of ranks on the DIMM, used to determine bank/rank/row functions, assumes Intel Coffe Lake CPU (default: None) 62 | 63 | ==== Execution Modes ============================================== 64 | 65 | -f, --fuzzing 66 | perform a fuzzing run (default program mode) 67 | -g, --generate-patterns 68 | generates N patterns, but does not perform hammering; used by ARM port 69 | -y, --replay-patterns 70 | replays patterns given as comma-separated list of pattern IDs 71 | 72 | ==== Replaying-Specific Configuration ============================= 73 | 74 | -j, --load-json 75 | loads the specified JSON file generated in a previous fuzzer run, required for --replay-patterns 76 | 77 | ==== Fuzzing-Specific Configuration ============================= 78 | 79 | -s, --sync 80 | synchronize with REFRESH while hammering (default: 1) 81 | -w, --sweeping 82 | sweep the best pattern over a contig. memory area after fuzzing (default: 0) 83 | -t, --runtime-limit 84 | number of seconds to run the fuzzer before sweeping/terminating (default: 120) 85 | -a, --acts-per-ref 86 | number of activations in a tREF interval, i.e., 7.8us (default: None) 87 | -p, --probes 88 | number of different DRAM locations to try each pattern on (default: NUM_BANKS/4) 89 | 90 | ``` 91 | 92 | The default values of the parameters can be found in the [`struct ProgramArguments`](include/Blacksmith.hpp#L8). 93 | 94 | Configuration parameters of Blacksmith that we did not need to modify frequently, and thus are not runtime parameters, can be found in the [`GlobalDefines.hpp`](include/GlobalDefines.hpp) file. 95 | 96 | ## Citing our Work 97 | 98 | To cite the Blacksmith **paper** in other academic papers, please use the following BibTeX entry: 99 | 100 | ``` 101 | @inproceedings{20.500.11850/525008, 102 | title = {{{BLACKSMITH}}: {{Scalable}} {{Rowhammering}} in the {{Frequency Domain}}}, 103 | shorttitle = {Blacksmith}, 104 | booktitle = {{{IEEE S}}\&{{P}} '22}, 105 | author = {Jattke, Patrick and {van der Veen}, Victor and Frigo, Pietro and Gunter, Stijn and Razavi, Kaveh}, 106 | year = {2022-05}, 107 | note = {\url{https://comsec.ethz.ch/wp-content/files/blacksmith_sp22.pdf}} 108 | doi = {20.500.11850/525008}, 109 | } 110 | ``` 111 | 112 | To cite the Blacksmith **software** in academic papers, please use the following BibTeX entry: 113 | 114 | ``` 115 | @MISC{20.500.11850/525013, 116 | title = {{{BLACKSMITH}}: {{Scalable}} {{Rowhammering}} in the {{Frequency Domain}}}, 117 | copyright = {MIT License}, 118 | year = {2022-05}, 119 | author = {Jattke, Patrick and van der Veen, Victor and Frigo, Pietro and Gunter, Stijn and Razavi, Kaveh}, 120 | language = {en}, 121 | note = {\url{https://github.com/comsec-group/blacksmith}} 122 | doi = {20.500.11850/525013} 123 | } 124 | ``` 125 | -------------------------------------------------------------------------------- /add_license.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | prepend_license_info() { 4 | for file in include/**/*.hpp; do 5 | echo "Processing $file..." 6 | # check whether "MIT License" is already present in file 7 | grep -q "MIT License" "$file" 8 | if [ $? -ne 0 ]; then 9 | # if string is not present, append license information to file 10 | cat LICENSE_HEADER "$file" >tempfile && mv tempfile $file 11 | fi 12 | done 13 | } 14 | 15 | remove_license_info() { 16 | for file in include/**/*.hpp; do 17 | echo "Processing $file..." 18 | # check whether "MIT License" is already present in file 19 | grep -q "MIT License" "$file" 20 | if [ $? -eq 0 ]; then 21 | sed -e '1,5d' <"$file" >tempfile && mv tempfile "$file" 22 | fi 23 | done 24 | } 25 | 26 | # enable calling the script's functions from terminal, e.g.: 27 | # ./add_license.sh prepend_license_info 28 | "$@" 29 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # CLion remote docker environment (How to build docker container, run and stop it) 2 | # 3 | # Build and run: 4 | # docker build -t clion/remote-cpp-env:0.5 -f Dockerfile.remote-cpp-env . 5 | # docker run -d --cap-add sys_ptrace -p127.0.0.1:2222:22 --name clion_remote_env clion/remote-cpp-env:0.5 6 | # ssh-keygen -f "$HOME/.ssh/known_hosts" -R "[localhost]:2222" 7 | # 8 | # stop: 9 | # docker stop clion_remote_env 10 | # 11 | # ssh credentials (test user): 12 | # user@password 13 | 14 | FROM ubuntu:21.04 15 | 16 | RUN DEBIAN_FRONTEND="noninteractive" apt-get update && apt-get -y install tzdata 17 | 18 | RUN apt-get update \ 19 | && apt-get install -y ssh \ 20 | build-essential \ 21 | gcc-8 \ 22 | g++-8 \ 23 | gdb \ 24 | clang \ 25 | cmake \ 26 | rsync \ 27 | tar \ 28 | python \ 29 | git \ 30 | && apt-get clean 31 | 32 | RUN ( \ 33 | echo 'LogLevel DEBUG2'; \ 34 | echo 'PermitRootLogin yes'; \ 35 | echo 'PasswordAuthentication yes'; \ 36 | echo 'Subsystem sftp /usr/lib/openssh/sftp-server'; \ 37 | ) > /etc/ssh/sshd_config_test_clion \ 38 | && mkdir /run/sshd 39 | 40 | RUN useradd -m user \ 41 | && yes password | passwd user 42 | 43 | RUN usermod -s /bin/bash user 44 | 45 | CMD ["/usr/sbin/sshd", "-D", "-e", "-f", "/etc/ssh/sshd_config_test_clion"] 46 | 47 | # build with: 48 | # docker build -t clion/remotecpp . 49 | 50 | # run with: 51 | # docker run --rm -d --cap-add sys_ptrace -p127.0.0.1:2222:22 --name cre clion/remotecpp 52 | -------------------------------------------------------------------------------- /external/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include(FetchContent) 2 | 3 | function(message) 4 | if (NOT MESSAGE_QUIET) 5 | _message(${ARGN}) 6 | endif() 7 | endfunction() 8 | 9 | # === ARGAGG =================================================================== 10 | 11 | # this is to silence the CMake warning "Policy CMP0048 is not set: project() command manages VERSION variables." 12 | # because argagg does not set the project() infos properly 13 | set(CMAKE_PROJECT_INCLUDE_BEFORE "${CMAKE_CURRENT_SOURCE_DIR}/EnableCMP0048.cmake") 14 | 15 | message(STATUS "Fetching external dependency argagg") 16 | set(MESSAGE_QUIET ON) 17 | 18 | FetchContent_Declare( 19 | argagg 20 | GIT_REPOSITORY https://github.com/vietjtnguyen/argagg.git 21 | GIT_TAG 79e4adfa2c6e2bfbe63da05cc668eb9ad5596748 22 | ) 23 | 24 | FetchContent_MakeAvailable(argagg) 25 | 26 | # This is a header-only library, and the provided CMakeLists.txt does not 27 | # provide a suitable library target. 28 | add_library( 29 | argagg 30 | INTERFACE 31 | ) 32 | 33 | target_include_directories( 34 | argagg 35 | INTERFACE 36 | ${argagg_SOURCE_DIR}/include 37 | ) 38 | 39 | unset(CMAKE_PROJECT_INCLUDE_BEFORE) 40 | 41 | message(STATUS "Fetching external dependency argagg -- done!") 42 | set(MESSAGE_QUIET OFF) 43 | 44 | # === ASMJIT =================================================================== 45 | 46 | if (BLACKSMITH_ENABLE_JITTING) 47 | message(STATUS "Fetching external dependency asmjit") 48 | set(MESSAGE_QUIET OFF) 49 | 50 | FetchContent_Declare( 51 | asmjit 52 | GIT_REPOSITORY https://github.com/asmjit/asmjit.git 53 | GIT_TAG 78de7d9c81a6ad1b0f732b52666960d9be1c6461 54 | ) 55 | 56 | FetchContent_MakeAvailable(asmjit) 57 | 58 | set(MESSAGE_QUIET OFF) 59 | message(STATUS "Fetching external dependency asmjit -- done!") 60 | endif() 61 | 62 | # === NLOHMANN/JSON ============================================================ 63 | 64 | if (BLACKSMITH_ENABLE_JSON) 65 | message(STATUS "Fetching external dependency nlohmann/json") 66 | set(MESSAGE_QUIET ON) 67 | 68 | FetchContent_Declare( 69 | json 70 | GIT_REPOSITORY https://github.com/nlohmann/json.git 71 | GIT_TAG v3.9.1 72 | ) 73 | 74 | FetchContent_GetProperties(json) 75 | if (NOT json_POPULATED) 76 | FetchContent_Populate(json) 77 | add_subdirectory(${json_SOURCE_DIR} ${json_BINARY_DIR} EXCLUDE_FROM_ALL) 78 | endif () 79 | 80 | set(MESSAGE_QUIET OFF) 81 | message(STATUS "Fetching external dependency nlohmann/json -- done!") 82 | endif() 83 | -------------------------------------------------------------------------------- /external/EnableCMP0048.cmake: -------------------------------------------------------------------------------- 1 | cmake_policy(SET CMP0048 NEW) 2 | -------------------------------------------------------------------------------- /include/Blacksmith.hpp: -------------------------------------------------------------------------------- 1 | #ifndef BLACKSMITH_INCLUDE_BLACKSMITH_HPP_ 2 | #define BLACKSMITH_INCLUDE_BLACKSMITH_HPP_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | // defines the program's arguments and their default values 9 | struct ProgramArguments { 10 | // the duration of the fuzzing run in second 11 | unsigned long runtime_limit = 120; 12 | // the number of ranks of the DIMM to hammer 13 | int num_ranks = 0; 14 | // no. of activations we can do within a refresh interval 15 | size_t acts_per_trefi = 0; 16 | // path to JSON file to load 17 | std::string load_json_filename; 18 | // the IDs of the patterns to be loaded from a given JSON file 19 | std::unordered_set pattern_ids{}; 20 | // total number of mappings (i.e., Aggressor ID -> DRAM rows mapping) to try for a pattern 21 | size_t num_address_mappings_per_pattern = 3; 22 | // number of DRAM locations we use to check a (pattern, address mapping)'s effectiveness 23 | size_t num_dram_locations_per_mapping = 3; 24 | // whether to sweep the 'best pattern' that was found during fuzzing afterward over a contiguous chunk of memory 25 | bool sweeping = false; 26 | // the ID of the DIMM that is currently inserted 27 | long dimm_id = -1; 28 | // these two parameters define the default program mode: do fuzzing and synchronize with REFRESH 29 | bool do_fuzzing = true; 30 | bool use_synchronization = true; 31 | bool fixed_acts_per_ref = false; 32 | }; 33 | 34 | extern ProgramArguments program_args; 35 | 36 | int main(int argc, char **argv); 37 | 38 | void handle_args(int argc, char **argv); 39 | 40 | [[ noreturn ]] void handle_arg_generate_patterns(int num_activations, size_t probes_per_pattern); 41 | 42 | #endif //BLACKSMITH_INCLUDE_BLACKSMITH_HPP_ 43 | -------------------------------------------------------------------------------- /include/Forges/FuzzyHammerer.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef BLACKSMITH_SRC_FORGES_FUZZYHAMMERER_HPP_ 7 | #define BLACKSMITH_SRC_FORGES_FUZZYHAMMERER_HPP_ 8 | 9 | #include "Fuzzer/HammeringPattern.hpp" 10 | #include "Memory/Memory.hpp" 11 | #include "ReplayingHammerer.hpp" 12 | 13 | class FuzzyHammerer { 14 | public: 15 | // counter for the number of generated patterns so far 16 | static size_t cnt_generated_patterns; 17 | 18 | // counter for the number of different locations where we tried the current pattern 19 | static size_t cnt_pattern_probes; 20 | 21 | // this and cnt_pattern_probes are a workaround for the generate_pattern_for_ARM as we there somehow need to keep 22 | // track of whether we need to generate new pattern or only randomize the mapping of an existing one 23 | static HammeringPattern hammering_pattern; 24 | 25 | // maps (pattern_id) -> (address_mapper_id -> number_of_detected_bit_flips) where 'number_of_detected_bit_flips' 26 | // refers to the number of bit flips we detected when hammering a pattern at a specific location 27 | // note: it does not consider the bit flips triggered during the reproducibility runs 28 | static std::unordered_map> map_pattern_mappings_bitflips; 29 | 30 | static void do_random_accesses(const std::vector& random_rows, int duration_us); 31 | 32 | static void n_sided_frequency_based_hammering(DramAnalyzer &dramAnalyzer, Memory &memory, int acts, 33 | unsigned long runtime_limit, size_t probes_per_pattern, 34 | bool sweep_best_pattern); 35 | 36 | static void test_location_dependence(ReplayingHammerer &rh, HammeringPattern &pattern); 37 | 38 | static void probe_mapping_and_scan(PatternAddressMapper &mapper, Memory &memory, 39 | FuzzingParameterSet &fuzzing_params, size_t num_dram_locations); 40 | 41 | static void log_overall_statistics(size_t cur_round, const std::string &best_mapping_id, 42 | size_t best_mapping_num_bitflips, size_t num_effective_patterns); 43 | 44 | static void generate_pattern_for_ARM(int acts, int *rows_to_access, int max_accesses, const size_t probes_per_pattern); 45 | }; 46 | 47 | #endif //BLACKSMITH_SRC_FORGES_FUZZYHAMMERER_HPP_ 48 | -------------------------------------------------------------------------------- /include/Forges/ReplayingHammerer.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef BLACKSMITH_SRC_FORGES_REPLAYINGHAMMERER_HPP_ 7 | #define BLACKSMITH_SRC_FORGES_REPLAYINGHAMMERER_HPP_ 8 | 9 | #include "Fuzzer/HammeringPattern.hpp" 10 | #include "Memory/Memory.hpp" 11 | 12 | #include 13 | 14 | struct SweepSummary { 15 | // Number of observed corruptions from zero to one. 16 | size_t num_flips_z2o; 17 | 18 | // Number of observed corruptions from one to zero. 19 | size_t num_flips_o2z; 20 | 21 | std::vector observed_bitflips; 22 | }; 23 | 24 | class ReplayingHammerer { 25 | private: 26 | // the Memory instance for hammering 27 | Memory &mem; 28 | 29 | // a random number generator, required for std::shuffle 30 | std::mt19937 gen; 31 | 32 | private: 33 | 34 | // maps: (mapping ID) -> (HammeringPattern), because there's no back-reference from mapping to HammeringPattern 35 | std::unordered_map map_mapping_id_to_pattern; 36 | 37 | // the reproducibility score computed during the last invocation of hammer_pattern 38 | static double last_reproducibility_score; 39 | 40 | // the number of times in which hammering a pattern (at the same location) is repeated; this is only the initial 41 | // parameter as later we optimize this value 42 | const int initial_hammering_num_reps = 50; 43 | 44 | // the number of repetitions where we hammer the same pattern at the same location: 45 | // this is a dynamically learned parameter that is derived from the result of the reproducibility runs; optimizing 46 | // this allows to save time (and hammer more patterns) as for some DIMMs hammering longer doesn't increase the chance 47 | // to trigger bit flips 48 | int hammering_num_reps = initial_hammering_num_reps; 49 | 50 | size_t hammer_pattern(FuzzingParameterSet &fuzz_params, CodeJitter &code_jitter, HammeringPattern &pattern, 51 | PatternAddressMapper &mapper, FLUSHING_STRATEGY flushing_strategy, 52 | FENCING_STRATEGY fencing_strategy, unsigned long num_reps, int aggressors_for_sync, 53 | int num_activations, bool early_stopping, bool sync_each_ref, bool verbose_sync, 54 | bool verbose_memcheck, bool verbose_params, bool wait_before_hammering, 55 | bool check_flips_after_each_rep); 56 | 57 | 58 | std::vector load_patterns_from_json(const std::string& json_filename, 59 | const std::unordered_set &pattern_ids); 60 | 61 | PatternAddressMapper &determine_most_effective_mapping(HammeringPattern &patt, 62 | bool optimize_hammering_num_reps, 63 | bool offline_mode); 64 | 65 | [[maybe_unused]] void run_refresh_alignment_experiment(PatternAddressMapper &mapper); 66 | 67 | [[maybe_unused]] void run_code_jitting_probing(PatternAddressMapper &mapper); 68 | 69 | [[maybe_unused]] void find_indirect_effective_aggs(PatternAddressMapper &mapper, 70 | const std::unordered_set &direct_effective_aaps, 71 | std::unordered_set &indirect_effective_aggs); 72 | 73 | [[maybe_unused]] void run_pattern_params_probing(PatternAddressMapper &mapper, 74 | const std::unordered_set &direct_effective_aggs, 75 | std::unordered_set &indirect_effective_aggs); 76 | public: 77 | 78 | explicit ReplayingHammerer(Memory &mem); 79 | 80 | void set_params(const FuzzingParameterSet &fuzzParams); 81 | 82 | void replay_patterns(const std::string& json_filename, const std::unordered_set &pattern_ids); 83 | 84 | size_t replay_patterns_brief(const std::string& json_filename, 85 | const std::unordered_set &pattern_ids, size_t sweep_bytes, 86 | bool running_on_original_dimm); 87 | 88 | size_t replay_patterns_brief(std::vector hammering_patterns, size_t sweep_bytes, 89 | size_t num_locations, bool running_on_original_dimm); 90 | 91 | void find_direct_effective_aggs(PatternAddressMapper &mapper, 92 | std::unordered_set &direct_effective_aggs); 93 | 94 | void derive_FuzzingParameterSet_values(HammeringPattern &pattern, PatternAddressMapper &mapper); 95 | 96 | SweepSummary sweep_pattern(HammeringPattern &pattern, PatternAddressMapper &mapper, size_t num_reps, 97 | size_t size_bytes); 98 | 99 | SweepSummary sweep_pattern(HammeringPattern &pattern, PatternAddressMapper &mapper, size_t num_reps, 100 | size_t size_bytes, 101 | const std::unordered_set &effective_aggs); 102 | 103 | static void find_direct_effective_aggs(HammeringPattern &pattern, PatternAddressMapper &mapper, 104 | std::unordered_set &direct_effective_aggs); 105 | 106 | // the FuzzingParameterSet instance belonging to the 107 | FuzzingParameterSet params; 108 | 109 | size_t hammer_pattern(FuzzingParameterSet &fuzz_params, CodeJitter &code_jitter, HammeringPattern &pattern, 110 | PatternAddressMapper &mapper, FLUSHING_STRATEGY flushing_strategy, 111 | FENCING_STRATEGY fencing_strategy, unsigned long num_reps, int aggressors_for_sync, 112 | int num_activations, bool early_stopping, bool sync_each_ref, bool verbose_sync, 113 | bool verbose_memcheck, bool verbose_params, bool wait_before_hammering, 114 | bool check_flips_after_each_rep, std::vector &hammering_accesses_vec); 115 | }; 116 | 117 | #endif //BLACKSMITH_SRC_FORGES_REPLAYINGHAMMERER_HPP_ 118 | -------------------------------------------------------------------------------- /include/Forges/TraditionalHammerer.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef BLACKSMITH_SRC_FORGES_TRADITIONALHAMMERER_HPP_ 7 | #define BLACKSMITH_SRC_FORGES_TRADITIONALHAMMERER_HPP_ 8 | 9 | #include "Memory/Memory.hpp" 10 | 11 | class TraditionalHammerer { 12 | private: 13 | static void hammer(std::vector &aggressors); 14 | 15 | static void hammer_sync(std::vector &aggressors, int acts, volatile char *d1, volatile char *d2); 16 | 17 | public: 18 | // do n-sided hammering 19 | [[maybe_unused]] static void n_sided_hammer(Memory &memory, int acts, long runtime_limit); 20 | 21 | // run experiment where we systematically try out all possible offsets 22 | [[maybe_unused]] static void n_sided_hammer_experiment(Memory &memory, int acts); 23 | 24 | static void n_sided_hammer_experiment_frequencies(Memory &memory); 25 | 26 | static void hammer(std::vector &aggressors, size_t hammer_rounds); 27 | 28 | static void hammer_flush_early(std::vector &aggressors, size_t reps); 29 | }; 30 | 31 | #endif //BLACKSMITH_SRC_FORGES_TRADITIONALHAMMERER_HPP_ 32 | -------------------------------------------------------------------------------- /include/Fuzzer/Aggressor.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef AGGRESSOR 7 | #define AGGRESSOR 8 | 9 | #include 10 | #include 11 | 12 | #ifdef ENABLE_JSON 13 | #include 14 | #endif 15 | 16 | #include "Memory/DRAMAddr.hpp" 17 | 18 | const int ID_PLACEHOLDER_AGG = -1; 19 | 20 | typedef int AGGRESSOR_ID_TYPE; 21 | 22 | class Aggressor { 23 | public: 24 | AGGRESSOR_ID_TYPE id = ID_PLACEHOLDER_AGG; 25 | 26 | // default constructor: required to enable vector initialization 27 | Aggressor() = default; 28 | 29 | // creates a new Aggressor; the caller must ensure that the ID is valid 30 | explicit Aggressor(int id); 31 | 32 | [[nodiscard]] std::string to_string() const; 33 | 34 | static std::vector get_agg_ids(const std::vector &aggressors); 35 | 36 | static std::vector create_aggressors(const std::vector &agg_ids); 37 | 38 | ~Aggressor() = default; 39 | 40 | Aggressor(const Aggressor &other) = default; 41 | 42 | Aggressor& operator=(const Aggressor &other); 43 | 44 | }; 45 | 46 | #ifdef ENABLE_JSON 47 | 48 | void to_json(nlohmann::json &j, const Aggressor &p); 49 | 50 | void from_json(const nlohmann::json &j, Aggressor &p); 51 | 52 | #endif 53 | 54 | #endif /* AGGRESSOR */ 55 | -------------------------------------------------------------------------------- /include/Fuzzer/AggressorAccessPattern.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef AGGRESSORACCESSPATTERN 7 | #define AGGRESSORACCESSPATTERN 8 | 9 | #include 10 | #include 11 | 12 | #ifdef ENABLE_JSON 13 | #include 14 | #endif 15 | 16 | #include "Fuzzer/Aggressor.hpp" 17 | 18 | class AggressorAccessPattern { 19 | public: 20 | size_t frequency; 21 | 22 | int amplitude; 23 | 24 | size_t start_offset; 25 | 26 | std::vector aggressors; 27 | 28 | AggressorAccessPattern() 29 | : frequency(0), amplitude(0), start_offset(0) {}; 30 | 31 | AggressorAccessPattern(size_t frequency, 32 | int amplitude, 33 | std::vector &aggs, 34 | size_t absolute_offset) 35 | : frequency(frequency), 36 | amplitude(amplitude), 37 | start_offset(absolute_offset), 38 | aggressors(aggs) { 39 | } 40 | 41 | ~AggressorAccessPattern() = default; 42 | 43 | AggressorAccessPattern(const AggressorAccessPattern &other) = default; 44 | 45 | AggressorAccessPattern& operator=(const AggressorAccessPattern &other); 46 | 47 | [[nodiscard]] std::string to_string() const; 48 | }; 49 | 50 | bool operator==(const AggressorAccessPattern& lhs, const AggressorAccessPattern& rhs); 51 | 52 | // required to use this class with std::unordered_set or any associative container 53 | template<> struct std::hash { 54 | std::size_t operator()(AggressorAccessPattern const& s) const noexcept { 55 | std::size_t h1 = std::hash{}(s.frequency); 56 | std::size_t h2 = std::hash{}(s.amplitude); 57 | std::size_t h3 = std::hash{}(s.start_offset); 58 | std::size_t h4 = std::hash{}(s.aggressors.size()); 59 | return h1 ^ (h3 << h2) ^ (h3 << h4); 60 | } 61 | }; 62 | 63 | #ifdef ENABLE_JSON 64 | 65 | void to_json(nlohmann::json &j, const AggressorAccessPattern &p); 66 | 67 | void from_json(const nlohmann::json &j, AggressorAccessPattern &p); 68 | 69 | #endif 70 | 71 | #endif /* AGGRESSORACCESSPATTERN */ 72 | -------------------------------------------------------------------------------- /include/Fuzzer/BitFlip.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef BLACKSMITH_INCLUDE_FUZZER_BITFLIP_HPP_ 7 | #define BLACKSMITH_INCLUDE_FUZZER_BITFLIP_HPP_ 8 | 9 | #include "Memory/DRAMAddr.hpp" 10 | 11 | class BitFlip { 12 | public: 13 | // the address where the bit flip was observed 14 | DRAMAddr address; 15 | 16 | // mask of the bits that flipped, i.e., positions where value == 1 -> flipped bit 17 | uint8_t bitmask = 0; 18 | 19 | // data containing the bit flips 20 | uint8_t corrupted_data = 0; 21 | 22 | BitFlip(); 23 | 24 | BitFlip(const DRAMAddr &address, uint8_t flips_bitmask, uint8_t corrupted_data); 25 | 26 | [[nodiscard]] size_t count_z2o_corruptions() const; 27 | 28 | [[nodiscard]] size_t count_o2z_corruptions() const; 29 | 30 | [[nodiscard]] size_t count_bit_corruptions() const; 31 | 32 | time_t observation_time; 33 | }; 34 | 35 | #ifdef ENABLE_JSON 36 | 37 | void to_json(nlohmann::json &j, const BitFlip &p); 38 | 39 | void from_json(const nlohmann::json &j, BitFlip &p); 40 | 41 | #endif 42 | 43 | #endif //BLACKSMITH_INCLUDE_FUZZER_BITFLIP_HPP_ 44 | -------------------------------------------------------------------------------- /include/Fuzzer/CodeJitter.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef CODEJITTER 7 | #define CODEJITTER 8 | 9 | #include 10 | #include 11 | 12 | #include "Utilities/Enums.hpp" 13 | #include "Fuzzer/FuzzingParameterSet.hpp" 14 | 15 | #ifdef ENABLE_JITTING 16 | #include 17 | #endif 18 | 19 | #ifdef ENABLE_JSON 20 | #include 21 | #endif 22 | 23 | class CodeJitter { 24 | private: 25 | #ifdef ENABLE_JITTING 26 | /// runtime for JIT code execution, can be reused by cleaning the function ptr (see cleanup method) 27 | asmjit::JitRuntime runtime; 28 | 29 | /// a logger that keeps track of the generated ASM instructions - useful for debugging 30 | asmjit::StringLogger *logger = nullptr; 31 | #endif 32 | 33 | /// a function pointer to a function that takes no input (void) and returns an integer 34 | int (*fn)() = nullptr; 35 | 36 | public: 37 | bool pattern_sync_each_ref; 38 | 39 | FLUSHING_STRATEGY flushing_strategy; 40 | 41 | FENCING_STRATEGY fencing_strategy; 42 | 43 | int total_activations; 44 | 45 | int num_aggs_for_sync; 46 | 47 | /// constructor 48 | CodeJitter(); 49 | 50 | /// destructor 51 | ~CodeJitter(); 52 | 53 | /// generates the jitted function and assigns the function pointer fn to it 54 | void jit_strict(int num_acts_per_trefi, 55 | FLUSHING_STRATEGY flushing, 56 | FENCING_STRATEGY fencing, 57 | const std::vector &aggressor_pairs, 58 | bool sync_each_ref, 59 | int num_aggressors_for_sync, 60 | int total_num_activations); 61 | 62 | /// does the hammering if the function was previously created successfully, otherwise does nothing 63 | int hammer_pattern(FuzzingParameterSet &fuzzing_parameters, bool verbose); 64 | 65 | /// cleans this instance associated function pointer that points to the function that was jitted at runtime; 66 | /// cleaning up is required to release memory before jit_strict can be called again 67 | void cleanup(); 68 | 69 | #ifdef ENABLE_JITTING 70 | static void sync_ref(const std::vector &aggressor_pairs, asmjit::x86::Assembler &assembler); 71 | #endif 72 | }; 73 | 74 | #ifdef ENABLE_JSON 75 | 76 | void to_json(nlohmann::json &j, const CodeJitter &p); 77 | 78 | void from_json(const nlohmann::json &j, CodeJitter &p); 79 | 80 | #endif 81 | 82 | #endif /* CODEJITTER */ 83 | -------------------------------------------------------------------------------- /include/Fuzzer/FuzzingParameterSet.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef BLACKSMITH_INCLUDE_FUZZER_FUZZINGPARAMETERSET_HPP_ 7 | #define BLACKSMITH_INCLUDE_FUZZER_FUZZINGPARAMETERSET_HPP_ 8 | 9 | #include 10 | #include 11 | 12 | #include "Utilities/Range.hpp" 13 | #include "Utilities/Enums.hpp" 14 | 15 | class FuzzingParameterSet { 16 | private: 17 | std::mt19937 gen; 18 | 19 | /// MC issues a REFRESH every 7.8us to ensure that all cells are refreshed within a 64ms interval. 20 | int num_refresh_intervals; 21 | 22 | /// The numbers of aggressors to be picked from during random pattern generation. 23 | int num_aggressors; 24 | 25 | int agg_intra_distance; 26 | 27 | int agg_inter_distance; 28 | 29 | // initialized with -1 to add check for undefined/default value 30 | int num_activations_per_tREFI = -1; 31 | 32 | int hammering_total_num_activations; 33 | 34 | int base_period; 35 | 36 | int max_row_no; 37 | 38 | int total_acts_pattern; 39 | 40 | Range start_row; 41 | 42 | Range num_aggressors_for_sync; 43 | 44 | Range bank_no; 45 | 46 | Range use_sequential_aggressors; 47 | 48 | Range amplitude; 49 | 50 | Range N_sided; 51 | 52 | Range sync_each_ref; 53 | 54 | Range wait_until_start_hammering_refs; 55 | 56 | std::discrete_distribution N_sided_probabilities; 57 | 58 | [[nodiscard]] std::string get_dist_string() const; 59 | 60 | void set_distribution(Range range_N_sided, std::unordered_map probabilities); 61 | 62 | public: 63 | FuzzingParameterSet() = default; 64 | 65 | explicit FuzzingParameterSet(int measured_num_acts_per_ref); 66 | 67 | FLUSHING_STRATEGY flushing_strategy; 68 | 69 | FENCING_STRATEGY fencing_strategy; 70 | 71 | [[nodiscard]] int get_hammering_total_num_activations() const; 72 | 73 | [[nodiscard]] int get_num_aggressors() const; 74 | 75 | int get_random_amplitude(int max); 76 | 77 | int get_random_N_sided(); 78 | 79 | [[nodiscard]] int get_base_period() const; 80 | 81 | [[nodiscard]] int get_agg_intra_distance(); 82 | 83 | [[nodiscard]] int get_agg_inter_distance() const; 84 | 85 | int get_random_even_divisior(int n, int min_value); 86 | 87 | int get_random_N_sided(int upper_bound_max); 88 | 89 | int get_random_start_row(); 90 | 91 | [[nodiscard]] int get_num_activations_per_t_refi() const; 92 | 93 | [[nodiscard]] int get_total_acts_pattern() const; 94 | 95 | bool get_random_use_seq_addresses(); 96 | 97 | bool get_random_sync_each_ref(); 98 | 99 | void randomize_parameters(bool print = true); 100 | 101 | [[nodiscard]] int get_max_row_no() const; 102 | 103 | int get_random_num_aggressors_for_sync(); 104 | 105 | int get_random_wait_until_start_hammering_us(); 106 | 107 | [[nodiscard]] int get_num_refresh_intervals() const; 108 | 109 | [[nodiscard]] int get_num_base_periods() const; 110 | 111 | void set_total_acts_pattern(int pattern_total_acts); 112 | 113 | void set_hammering_total_num_activations(int hammering_total_acts); 114 | 115 | void set_agg_intra_distance(int agg_intra_dist); 116 | 117 | void set_agg_inter_distance(int agg_inter_dist); 118 | 119 | void set_use_sequential_aggressors(const Range &use_seq_addresses); 120 | 121 | void print_semi_dynamic_parameters() const; 122 | 123 | void print_static_parameters() const; 124 | 125 | static void print_dynamic_parameters(int bank, bool seq_addresses, int start_row); 126 | 127 | static void print_dynamic_parameters2(bool sync_at_each_ref, int wait_until_hammering_us, int num_aggs_for_sync); 128 | 129 | void set_num_activations_per_t_refi(int num_activations_per_t_refi); 130 | }; 131 | 132 | #endif //BLACKSMITH_INCLUDE_FUZZER_FUZZINGPARAMETERSET_HPP_ 133 | -------------------------------------------------------------------------------- /include/Fuzzer/HammeringPattern.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef HAMMERING_PATTERN 7 | #define HAMMERING_PATTERN 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #ifdef ENABLE_JSON 15 | #include 16 | #endif 17 | 18 | #include "Fuzzer/AggressorAccessPattern.hpp" 19 | #include "Utilities/Range.hpp" 20 | #include "Utilities/Uuid.hpp" 21 | #include "PatternAddressMapper.hpp" 22 | 23 | class HammeringPattern { 24 | private: 25 | static int get_num_digits(size_t x); 26 | 27 | public: 28 | std::string instance_id; 29 | 30 | // the base period this hammering pattern was generated for 31 | int base_period; 32 | 33 | size_t max_period; 34 | 35 | int total_activations; 36 | 37 | int num_refresh_intervals; 38 | 39 | // is a pattern is location dependent, then there are some aggressors that are bypassing the mitigation because of 40 | // their absolute location in DRAM; in this case we need to move only the aggressor pair triggered the bit flips while 41 | // sweeping the pattern over memory 42 | bool is_location_dependent; 43 | 44 | // the order in which aggressor accesses happen 45 | std::vector aggressors; 46 | 47 | // additional and more structured information about the aggressors involved in this pattern such as whether they are 1-sided or 2-sided 48 | std::vector agg_access_patterns; 49 | 50 | // from an OOP perspective it would make more sense to have a reference to this HammeringPattern in each of the 51 | // PatternAddressMapper objects; however, for the JSON export having this vector of mappings for a pattern works 52 | // better because we need to foreign keys and can easily associate this HammeringPattern to N PatternAddressMappings 53 | std::vector address_mappings; 54 | 55 | HammeringPattern(); 56 | 57 | explicit HammeringPattern(int base_period); 58 | 59 | std::string get_pattern_text_repr(); 60 | 61 | std::string get_agg_access_pairs_text_repr(); 62 | 63 | AggressorAccessPattern &get_access_pattern_by_aggressor(Aggressor &agg); 64 | 65 | PatternAddressMapper &get_most_effective_mapping(); 66 | 67 | void remove_mappings_without_bitflips(); 68 | }; 69 | 70 | #ifdef ENABLE_JSON 71 | 72 | void to_json(nlohmann::json &j, const HammeringPattern &p); 73 | 74 | void from_json(const nlohmann::json &j, HammeringPattern &p); 75 | 76 | #endif 77 | 78 | #endif /* HAMMERING_PATTERN */ 79 | -------------------------------------------------------------------------------- /include/Fuzzer/PatternAddressMapper.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef BLACKSMITH_INCLUDE_PATTERNADDRESSMAPPER_H_ 7 | #define BLACKSMITH_INCLUDE_PATTERNADDRESSMAPPER_H_ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #ifdef ENABLE_JSON 15 | #include 16 | #endif 17 | 18 | #include "Fuzzer/Aggressor.hpp" 19 | #include "Fuzzer/AggressorAccessPattern.hpp" 20 | #include "Fuzzer/BitFlip.hpp" 21 | #include "Fuzzer/FuzzingParameterSet.hpp" 22 | #include "Fuzzer/CodeJitter.hpp" 23 | 24 | class PatternAddressMapper { 25 | private: 26 | void export_pattern_internal(std::vector &aggressors, 27 | int base_period, 28 | std::vector &addresses, 29 | std::vector &rows); 30 | 31 | std::unordered_set victim_rows; 32 | 33 | // the unique identifier of this pattern-to-address mapping 34 | std::string instance_id; 35 | 36 | // a randomization engine 37 | std::mt19937 gen; 38 | 39 | public: 40 | std::unique_ptr code_jitter; 41 | 42 | PatternAddressMapper(); 43 | 44 | // copy constructor 45 | PatternAddressMapper(const PatternAddressMapper& other); 46 | 47 | // copy assignment operator 48 | PatternAddressMapper& operator=(const PatternAddressMapper& other); 49 | 50 | // information about the mapping (required for determining rows not belonging to this mapping) 51 | size_t min_row = 0; 52 | size_t max_row = 0; 53 | int bank_no = 0; 54 | 55 | // a global counter that makes sure that we test patterns on all banks equally often 56 | // it is incremented for each mapping and reset to 0 once we tested all banks (depending on num_probes_per_pattern 57 | // this may happen after we tested more than one pattern) 58 | static int bank_counter; 59 | 60 | // a mapping from aggressors included in this pattern to memory addresses (DRAMAddr) 61 | std::unordered_map aggressor_to_addr; 62 | 63 | // the bit flips that were detected while running the pattern with this mapping 64 | std::vector> bit_flips; 65 | 66 | // the reproducibility score of this mapping, e.g., 67 | // 1 => 100%: was reproducible in all reproducibility runs executed, 68 | // 0.4 => 40%: was reproducible in 40% of all reproducibility runs executed 69 | int reproducibility_score = -1; 70 | 71 | // chooses new addresses for the aggressors involved in its referenced HammeringPattern 72 | void randomize_addresses(FuzzingParameterSet &fuzzing_params, 73 | const std::vector &agg_access_patterns, 74 | bool verbose); 75 | 76 | void remap_aggressors(DRAMAddr &new_location); 77 | 78 | void export_pattern(std::vector &aggressors, int base_period, std::vector &addresses); 79 | 80 | [[nodiscard]] const std::string &get_instance_id() const; 81 | 82 | std::string &get_instance_id(); 83 | 84 | void export_pattern(std::vector &aggressors, size_t base_period, int *rows, size_t max_rows); 85 | 86 | [[nodiscard]] const std::unordered_set & get_victim_rows() const; 87 | 88 | std::vector get_random_nonaccessed_rows(int row_upper_bound); 89 | 90 | void determine_victims(const std::vector &agg_access_patterns); 91 | 92 | std::string get_mapping_text_repr(); 93 | 94 | [[nodiscard]] CodeJitter & get_code_jitter() const; 95 | 96 | void compute_mapping_stats(std::vector &agg_access_patterns, int &agg_intra_distance, 97 | int &agg_inter_distance, bool uses_seq_addresses); 98 | 99 | void shift_mapping(int rows, const std::unordered_set &aggs_to_move); 100 | 101 | [[nodiscard]] size_t count_bitflips() const; 102 | }; 103 | 104 | #ifdef ENABLE_JSON 105 | 106 | void to_json(nlohmann::json &j, const PatternAddressMapper &p); 107 | 108 | void from_json(const nlohmann::json &j, PatternAddressMapper &p); 109 | 110 | #endif 111 | 112 | #endif //BLACKSMITH_INCLUDE_PATTERNADDRESSMAPPER_H_ 113 | -------------------------------------------------------------------------------- /include/Fuzzer/PatternBuilder.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef PATTERNBUILDER 7 | #define PATTERNBUILDER 8 | 9 | #ifdef ENABLE_JITTING 10 | #include 11 | #endif 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | #include "Fuzzer/HammeringPattern.hpp" 18 | #include "Utilities/Range.hpp" 19 | 20 | class PatternBuilder { 21 | private: 22 | HammeringPattern &pattern; 23 | 24 | std::mt19937 gen; 25 | 26 | int aggressor_id_counter; 27 | 28 | static int get_next_prefilled_slot(size_t cur_idx, std::vector start_indices_prefilled_slots, int base_period, 29 | int &cur_prefilled_slots_idx); 30 | 31 | public: 32 | /// default constructor that randomizes fuzzing parameters 33 | explicit PatternBuilder(HammeringPattern &hammering_pattern); 34 | 35 | void generate_frequency_based_pattern(FuzzingParameterSet ¶ms, int pattern_length, int base_period); 36 | 37 | void generate_frequency_based_pattern(FuzzingParameterSet ¶ms); 38 | 39 | size_t get_random_gaussian(std::vector &list); 40 | 41 | static void remove_smaller_than(std::vector &vec, int N); 42 | 43 | static int all_slots_full(size_t offset, size_t period, int pattern_length, std::vector &aggs); 44 | 45 | static void fill_slots(size_t start_period, 46 | size_t period_length, 47 | size_t amplitude, 48 | std::vector &aggressors, 49 | std::vector &accesses, 50 | size_t pattern_length); 51 | 52 | void get_n_aggressors(size_t N, std::vector &aggs); 53 | 54 | void prefill_pattern(int pattern_total_acts, std::vector &fixed_aggs); 55 | 56 | static std::vector get_available_multiplicators(FuzzingParameterSet &fuzzing_params); 57 | 58 | static std::vector get_available_multiplicators(int num_base_periods); 59 | }; 60 | 61 | #endif /* PATTERNBUILDER */ 62 | -------------------------------------------------------------------------------- /include/GlobalDefines.hpp: -------------------------------------------------------------------------------- 1 | #ifndef GLOBAL_DEFINES 2 | #define GLOBAL_DEFINES 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "Utilities/Logger.hpp" 10 | 11 | uint64_t static inline MB(uint64_t value) { 12 | return ((value) << 20ULL); 13 | } 14 | 15 | uint64_t static inline GB(uint64_t value) { 16 | return ((value) << 30ULL); 17 | } 18 | 19 | [[gnu::unused]] static inline uint64_t BIT_SET(uint64_t value) { 20 | return (1ULL << (value)); 21 | } 22 | 23 | // font colors 24 | #define FC_RED "\033[0;31m" // error 25 | #define FC_RED_BRIGHT "\033[0;91m" // generic failure message 26 | #define FC_GREEN "\033[0;32m" // bit flip, generic success message 27 | #define FC_YELLOW "\033[0;33m" // debugging 28 | #define FC_MAGENTA "\033[0;35m" // new (pattern,address_mapping) rond 29 | #define FC_CYAN "\033[0;36m" // status message 30 | #define FC_CYAN_BRIGHT "\033[0;96m" // stages in pattern analysis 31 | 32 | // font faces 33 | #define FF_BOLD "\033[1m" 34 | #define F_RESET "\033[0m" // reset to default font face/color 35 | 36 | // ######################################################## 37 | // ################### CONFIG PARAMETERS ################## 38 | // ######################################################## 39 | 40 | // number of rounds to measure cache hit/miss latency 41 | #define DRAMA_ROUNDS 1000 42 | 43 | // size in bytes of a cacheline 44 | #define CACHELINE_SIZE 64 45 | 46 | // number of rounds to hammer 47 | #define HAMMER_ROUNDS 1000000 48 | 49 | // threshold to distinguish between row buffer miss (t > THRESH) and row buffer hit (t < THRESH) 50 | #define THRESH 495 // worked best on DIMM 6 51 | //#define THRESH 430 // worked best on DIMM 18 52 | 53 | // number of conflicting addresses to be determined for each bank 54 | #define NUM_TARGETS 10 55 | 56 | // maximum number of aggressor rows 57 | #define MAX_ROWS 30 58 | 59 | // number of banks in the system 60 | #define NUM_BANKS 16 61 | 62 | // number of active DIMMs in the system 63 | #define DIMM 1 64 | 65 | // number of active channels in the system 66 | #define CHANNEL 1 67 | 68 | // number of bytes to be allocated 69 | #define MEM_SIZE (GB(1)) 70 | 71 | #endif /* GLOBAL_DEFINES */ 72 | -------------------------------------------------------------------------------- /include/Memory/DRAMAddr.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef DRAMADDR 7 | #define DRAMADDR 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #ifdef ENABLE_JSON 14 | #include 15 | #endif 16 | 17 | #define CHANS(x) ((x) << (8UL * 3UL)) 18 | #define DIMMS(x) ((x) << (8UL * 2UL)) 19 | #define RANKS(x) ((x) << (8UL * 1UL)) 20 | #define BANKS(x) ((x) << (8UL * 0UL)) 21 | 22 | #define MTX_SIZE (30) 23 | 24 | typedef size_t mem_config_t; 25 | 26 | struct MemConfiguration { 27 | size_t IDENTIFIER; 28 | size_t BK_SHIFT; 29 | size_t BK_MASK; 30 | size_t ROW_SHIFT; 31 | size_t ROW_MASK; 32 | size_t COL_SHIFT; 33 | size_t COL_MASK; 34 | size_t DRAM_MTX[MTX_SIZE]; 35 | size_t ADDR_MTX[MTX_SIZE]; 36 | }; 37 | 38 | class DRAMAddr { 39 | private: 40 | // Class attributes 41 | static std::map Configs; 42 | static MemConfiguration MemConfig; 43 | static size_t base_msb; 44 | 45 | [[nodiscard]] size_t linearize() const; 46 | 47 | public: 48 | size_t bank{}; 49 | size_t row{}; 50 | size_t col{}; 51 | 52 | // class methods 53 | static void set_base_msb(void *buff); 54 | 55 | static void load_mem_config(mem_config_t cfg); 56 | 57 | // instance methods 58 | DRAMAddr(size_t bk, size_t r, size_t c); 59 | 60 | explicit DRAMAddr(void *addr); 61 | 62 | // must be DefaultConstructible for JSON (de-)serialization 63 | DRAMAddr(); 64 | 65 | void *to_virt(); 66 | 67 | [[gnu::unused]] std::string to_string(); 68 | 69 | static void initialize(uint64_t num_bank_rank_functions, volatile char *start_address); 70 | 71 | [[nodiscard]] std::string to_string_compact() const; 72 | 73 | [[nodiscard]] void *to_virt() const; 74 | 75 | [[nodiscard]] DRAMAddr add(size_t bank_increment, size_t row_increment, size_t column_increment) const; 76 | 77 | void add_inplace(size_t bank_increment, size_t row_increment, size_t column_increment); 78 | 79 | #ifdef ENABLE_JSON 80 | static nlohmann::json get_memcfg_json(); 81 | #endif 82 | 83 | static void initialize_configs(); 84 | }; 85 | 86 | #ifdef ENABLE_JSON 87 | 88 | void to_json(nlohmann::json &j, const DRAMAddr &p); 89 | 90 | void from_json(const nlohmann::json &j, DRAMAddr &p); 91 | 92 | #endif 93 | 94 | #endif /* DRAMADDR */ 95 | -------------------------------------------------------------------------------- /include/Memory/DramAnalyzer.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef DRAMANALYZER 7 | #define DRAMANALYZER 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include "Utilities/AsmPrimitives.hpp" 14 | 15 | class DramAnalyzer { 16 | private: 17 | std::vector> banks; 18 | 19 | std::vector bank_rank_functions; 20 | 21 | uint64_t row_function; 22 | 23 | volatile char *start_address; 24 | 25 | void find_targets(std::vector &target_bank); 26 | 27 | std::mt19937 gen; 28 | 29 | std::uniform_int_distribution dist; 30 | 31 | public: 32 | explicit DramAnalyzer(volatile char *target); 33 | 34 | /// Finds addresses of the same bank causing bank conflicts when accessed sequentially 35 | void find_bank_conflicts(); 36 | 37 | /// Measures the time between accessing two addresses. 38 | static int inline measure_time(volatile char *a1, volatile char *a2) { 39 | uint64_t before, after; 40 | before = rdtscp(); 41 | lfence(); 42 | for (size_t i = 0; i < DRAMA_ROUNDS; i++) { 43 | (void)*a1; 44 | (void)*a2; 45 | clflushopt(a1); 46 | clflushopt(a2); 47 | mfence(); 48 | } 49 | after = rdtscp(); 50 | return (int) ((after - before)/DRAMA_ROUNDS); 51 | } 52 | 53 | std::vector get_bank_rank_functions(); 54 | 55 | void load_known_functions(int num_ranks); 56 | 57 | /// Determine the number of possible activations within a refresh interval. 58 | size_t count_acts_per_trefi(); 59 | }; 60 | 61 | #endif /* DRAMANALYZER */ 62 | -------------------------------------------------------------------------------- /include/Memory/Memory.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef BLACKSMITH_SRC_MEMORY_H_ 7 | #define BLACKSMITH_SRC_MEMORY_H_ 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include "Memory/DramAnalyzer.hpp" 14 | #include "Fuzzer/PatternAddressMapper.hpp" 15 | 16 | enum class DATA_PATTERN : char { 17 | ZEROES, ONES, RANDOM 18 | }; 19 | 20 | class Memory { 21 | private: 22 | /// the starting address of the allocated memory area 23 | /// this is a fixed value as the assumption is that all memory cells are equally vulnerable 24 | volatile char *start_address = (volatile char *) 0x2000000000; 25 | 26 | // the mount point of the huge pages filesystem 27 | const std::string hugetlbfs_mountpoint = "/mnt/huge/buff"; 28 | 29 | // the size of the allocated memory area in bytes 30 | uint64_t size; 31 | 32 | // whether this memory allocation is backed up by a superage 33 | const bool superpage; 34 | 35 | size_t check_memory_internal(PatternAddressMapper &mapping, const volatile char *start, 36 | const volatile char *end, bool reproducibility_mode, bool verbose); 37 | 38 | public: 39 | 40 | // the flipped bits detected during the last call to check_memory 41 | std::vector flipped_bits; 42 | 43 | explicit Memory(bool use_superpage); 44 | 45 | ~Memory(); 46 | 47 | void allocate_memory(size_t mem_size); 48 | 49 | void initialize(DATA_PATTERN data_pattern); 50 | 51 | size_t check_memory(const volatile char *start, const volatile char *end); 52 | 53 | size_t check_memory(PatternAddressMapper &mapping, bool reproducibility_mode, bool verbose); 54 | 55 | [[nodiscard]] volatile char *get_starting_address() const; 56 | 57 | std::string get_flipped_rows_text_repr(); 58 | }; 59 | 60 | #endif //BLACKSMITH_SRC_MEMORY_H_ 61 | -------------------------------------------------------------------------------- /include/Utilities/AsmPrimitives.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef UTILS 7 | #define UTILS 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "GlobalDefines.hpp" 16 | 17 | [[gnu::unused]] static inline __attribute__((always_inline)) void clflush(volatile void *p) { 18 | asm volatile("clflush (%0)\n"::"r"(p) 19 | : "memory"); 20 | } 21 | 22 | [[gnu::unused]] static inline __attribute__((always_inline)) void clflushopt(volatile void *p) { 23 | #ifdef DDR3 24 | asm volatile("clflush (%0)\n" ::"r"(p) 25 | : "memory"); 26 | #else 27 | asm volatile("clflushopt (%0)\n"::"r"(p) 28 | : "memory"); 29 | # 30 | 31 | #endif 32 | } 33 | 34 | [[gnu::unused]] static inline __attribute__((always_inline)) void cpuid() { 35 | asm volatile("cpuid":: 36 | : "rax", "rbx", "rcx", "rdx"); 37 | } 38 | 39 | [[gnu::unused]] static inline __attribute__((always_inline)) void mfence() { 40 | asm volatile("mfence":: 41 | : "memory"); 42 | } 43 | 44 | [[gnu::unused]] static inline __attribute__((always_inline)) void sfence() { 45 | asm volatile("sfence":: 46 | : "memory"); 47 | } 48 | 49 | [[gnu::unused]] static inline __attribute__((always_inline)) void lfence() { 50 | asm volatile("lfence":: 51 | : "memory"); 52 | } 53 | 54 | [[gnu::unused]] static inline __attribute__((always_inline)) uint64_t rdtscp() { 55 | uint64_t lo, hi; 56 | asm volatile("rdtscp\n" 57 | : "=a"(lo), "=d"(hi)::"%rcx"); 58 | return (hi << 32UL) | lo; 59 | } 60 | 61 | [[gnu::unused]] static inline __attribute__((always_inline)) uint64_t rdtsc() { 62 | uint64_t lo, hi; 63 | asm volatile("rdtsc\n" 64 | : "=a"(lo), "=d"(hi)::"%rcx"); 65 | return (hi << 32UL) | lo; 66 | } 67 | 68 | [[gnu::unused]] static inline __attribute__((always_inline)) uint64_t realtime_now() { 69 | struct timespec now_ts{}; 70 | clock_gettime(CLOCK_MONOTONIC, &now_ts); 71 | return static_cast( 72 | (static_cast(now_ts.tv_sec)*1e9 + static_cast(now_ts.tv_nsec))); 73 | } 74 | 75 | #endif /* UTILS */ 76 | -------------------------------------------------------------------------------- /include/Utilities/Enums.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef BLACKSMITH_INCLUDE_UTILITIES_ENUMS_HPP_ 7 | #define BLACKSMITH_INCLUDE_UTILITIES_ENUMS_HPP_ 8 | 9 | #include 10 | #include 11 | 12 | enum class FLUSHING_STRATEGY : int { 13 | // flush an accessed aggressor as soon as it has been accessed (i.e., pairs are flushed in-between) 14 | EARLIEST_POSSIBLE = 1, 15 | // add the flush right before the next access of the aggressor 16 | LATEST_POSSIBLE = 2 17 | }; 18 | 19 | std::string to_string(FLUSHING_STRATEGY strategy); 20 | 21 | void from_string(const std::string &strategy, FLUSHING_STRATEGY &dest); 22 | 23 | enum class FENCING_STRATEGY : int { 24 | // do not fence before accessing an aggressor even if it has been accessed before 25 | OMIT_FENCING = 0, 26 | // add the fence right after the access 27 | EARLIEST_POSSIBLE = 1, 28 | // add the fence right before the next access of the aggressor if it has been flushed before 29 | LATEST_POSSIBLE = 2, 30 | }; 31 | 32 | std::string to_string(FENCING_STRATEGY strategy); 33 | 34 | void from_string(const std::string &strategy, FENCING_STRATEGY &dest); 35 | 36 | std::vector> get_valid_strategies(); 37 | 38 | [[maybe_unused]] std::pair get_valid_strategy_pair(); 39 | 40 | #endif //BLACKSMITH_INCLUDE_UTILITIES_ENUMS_HPP_ 41 | -------------------------------------------------------------------------------- /include/Utilities/Logger.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef BLACKSMITH_INCLUDE_LOGGER_HPP_ 7 | #define BLACKSMITH_INCLUDE_LOGGER_HPP_ 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | template 14 | std::string format_string(const std::string &format, Args ... args) { 15 | int size = snprintf(nullptr, 0, format.c_str(), args ...) + 1; // Extra space for '\0' 16 | if (size <= 0) { throw std::runtime_error("Error during formatting."); } 17 | std::unique_ptr buf(new char[size]); 18 | snprintf(buf.get(), static_cast(size), format.c_str(), args ...); 19 | return std::string(buf.get(), buf.get() + size - 1); // We don't want the '\0' inside 20 | } 21 | 22 | class Logger { 23 | private: 24 | Logger(); 25 | 26 | // a reference to the file output stream associated to the logfile 27 | std::ofstream logfile; 28 | 29 | // the logger instance (a singleton) 30 | static Logger instance; 31 | 32 | static std::string format_timestamp(unsigned long ts); 33 | 34 | unsigned long timestamp_start{}; 35 | 36 | public: 37 | 38 | static void initialize(); 39 | 40 | static void close(); 41 | 42 | static void log_info(const std::string &message, bool newline = true); 43 | 44 | static void log_highlight(const std::string &message, bool newline = true); 45 | 46 | static void log_error(const std::string &message, bool newline = true); 47 | 48 | static void log_data(const std::string &message, bool newline = true); 49 | 50 | static void log_bitflip(volatile char *flipped_address, uint64_t row_no, unsigned char actual_value, 51 | unsigned char expected_value, unsigned long timestamp, bool newline); 52 | 53 | static void log_debug(const std::string &message, bool newline = true); 54 | 55 | static void log_timestamp(); 56 | 57 | static void log_global_defines(); 58 | 59 | static void log_metadata(const char *commit_hash, unsigned long run_time_limit_seconds); 60 | 61 | static void log_analysis_stage(const std::string &message, bool newline = true); 62 | 63 | static void log_success(const std::string &message, bool newline = true); 64 | 65 | static void log_failure(const std::string &message, bool newline = true); 66 | }; 67 | 68 | #endif //BLACKSMITH_INCLUDE_LOGGER_HPP_ 69 | -------------------------------------------------------------------------------- /include/Utilities/Range.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef RANGE 7 | #define RANGE 8 | 9 | #include 10 | #include "Logger.hpp" 11 | 12 | template 13 | struct Range { 14 | T min{0}; 15 | 16 | T max{0}; 17 | 18 | T step{1}; 19 | 20 | std::uniform_int_distribution dist; 21 | 22 | Range() = default; 23 | 24 | Range(T min, T max) : min(min), max(max), dist(std::uniform_int_distribution(min, max)) {} 25 | 26 | Range(T min, T max, T step) : min(min), max(max), step(step) { 27 | if (min%step!=0 || max%step!=0) { 28 | Logger::log_error( 29 | format_string("Range(%d,%d,%d) failed: min and max must both be divisible by step.", min, max, step)); 30 | exit(1); 31 | } 32 | dist = std::uniform_int_distribution(min/step, max/step); 33 | } 34 | 35 | T get_random_number(std::mt19937 &gen) { 36 | if (min==max) { 37 | return min; 38 | } else if (max < min) { 39 | std::swap(max, min); 40 | } 41 | auto number = dist(gen); 42 | return (step!=1) ? number*step : number; 43 | } 44 | 45 | T get_random_number(int upper_bound, std::mt19937 &gen) { 46 | T number; 47 | if (max > upper_bound) { 48 | number = Range(min, upper_bound).get_random_number(gen); 49 | } else { 50 | number = dist(gen); 51 | } 52 | return (step!=1) ? number*step : number; 53 | } 54 | }; 55 | #endif /* RANGE */ 56 | -------------------------------------------------------------------------------- /include/Utilities/TimeHelper.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef BLACKSMITH_INCLUDE_UTILITIES_TIMEHELPER_HPP_ 7 | #define BLACKSMITH_INCLUDE_UTILITIES_TIMEHELPER_HPP_ 8 | 9 | #include 10 | 11 | inline int64_t get_timestamp_sec() { 12 | return std::chrono::duration_cast( 13 | std::chrono::system_clock::now().time_since_epoch()).count(); 14 | } 15 | 16 | inline int64_t get_timestamp_us() { 17 | return std::chrono::duration_cast( 18 | std::chrono::system_clock::now().time_since_epoch()).count(); 19 | } 20 | 21 | #endif //BLACKSMITH_INCLUDE_UTILITIES_TIMEHELPER_HPP_ 22 | -------------------------------------------------------------------------------- /include/Utilities/Uuid.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 by ETH Zurich. 3 | * Licensed under the MIT License, see LICENSE file for more details. 4 | */ 5 | 6 | #ifndef UUID 7 | #define UUID 8 | 9 | #include 10 | #include 11 | 12 | namespace uuid { 13 | static std::random_device rd; /* NOLINT */ 14 | static std::mt19937 gen(rd()); /* NOLINT */ 15 | static std::uniform_int_distribution<> dis(0, 15); /* NOLINT */ 16 | static std::uniform_int_distribution<> dis2(8, 11); /* NOLINT */ 17 | 18 | static std::string gen_uuid() { 19 | std::stringstream ss; 20 | int i; 21 | ss << std::hex; 22 | for (i = 0; i < 8; i++) { 23 | ss << dis(gen); 24 | } 25 | ss << "-"; 26 | for (i = 0; i < 4; i++) { 27 | ss << dis(gen); 28 | } 29 | ss << "-4"; 30 | for (i = 0; i < 3; i++) { 31 | ss << dis(gen); 32 | } 33 | ss << "-"; 34 | ss << dis2(gen); 35 | for (i = 0; i < 3; i++) { 36 | ss << dis(gen); 37 | } 38 | ss << "-"; 39 | for (i = 0; i < 12; i++) { 40 | ss << dis(gen); 41 | } 42 | return ss.str(); 43 | } 44 | } // namespace uuid 45 | 46 | #endif 47 | -------------------------------------------------------------------------------- /src/Blacksmith.cpp: -------------------------------------------------------------------------------- 1 | #include "Blacksmith.hpp" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "Forges/TraditionalHammerer.hpp" 12 | #include "Forges/FuzzyHammerer.hpp" 13 | 14 | #include 15 | #include 16 | 17 | ProgramArguments program_args; 18 | 19 | int check_cpu() { 20 | std::array buffer{}; 21 | std::string cpu_model; 22 | std::unique_ptr pipe(popen("cat /proc/cpuinfo | grep \"model name\" | cut -d':' -f2 | awk '{$1=$1;print}' | head -1", "r"), pclose); 23 | if (!pipe) { 24 | throw std::runtime_error("popen() failed!"); 25 | } 26 | while (fgets(buffer.data(), buffer.size(), pipe.get()) != nullptr) { 27 | cpu_model += buffer.data(); 28 | } 29 | 30 | Logger::log_info("Detecting CPU model:"); 31 | Logger::log_data(format_string("%s", cpu_model.c_str())); 32 | 33 | std::vector supported_cpus = { 34 | // Coffee Lake 35 | "i5-8400", 36 | "i5-8500", 37 | "i5-8600", 38 | "i5-9400", 39 | "i5-9500", 40 | "i5-9600", 41 | "i7-8086", 42 | "i7-8700", 43 | "i7-9700", 44 | "i7-9900" 45 | }; 46 | 47 | bool cpu_supported = false; 48 | for (const auto &model : supported_cpus) { 49 | cpu_supported |= (cpu_model.find(model) != std::string::npos); 50 | } 51 | 52 | if (!cpu_supported) { 53 | Logger::log_error("CPU model is not supported. You need to run DRAMA to update the DRAM address matrices. See the README.md for details."); 54 | exit(EXIT_FAILURE); 55 | } 56 | 57 | return cpu_supported; 58 | } 59 | 60 | int main(int argc, char **argv) { 61 | Logger::initialize(); 62 | 63 | // check if the system's CPU is supported by our hard-coded DRAM address matrices 64 | check_cpu(); 65 | 66 | handle_args(argc, argv); 67 | 68 | // prints the current git commit and some program metadata 69 | Logger::log_metadata(GIT_COMMIT_HASH, program_args.runtime_limit); 70 | 71 | // give this process the highest CPU priority so it can hammer with less interruptions 72 | int ret = setpriority(PRIO_PROCESS, 0, -20); 73 | if (ret!=0) Logger::log_error("Instruction setpriority failed."); 74 | 75 | // allocate a large bulk of contiguous memory 76 | Memory memory(true); 77 | memory.allocate_memory(MEM_SIZE); 78 | 79 | // find address sets that create bank conflicts 80 | DramAnalyzer dram_analyzer(memory.get_starting_address()); 81 | dram_analyzer.find_bank_conflicts(); 82 | if (program_args.num_ranks != 0) { 83 | dram_analyzer.load_known_functions(program_args.num_ranks); 84 | } else { 85 | Logger::log_error("Program argument '--ranks ' was probably not passed. Cannot continue."); 86 | exit(EXIT_FAILURE); 87 | } 88 | // initialize the DRAMAddr class to load the proper memory configuration 89 | DRAMAddr::initialize(dram_analyzer.get_bank_rank_functions().size(), memory.get_starting_address()); 90 | 91 | // count the number of possible activations per refresh interval, if not given as program argument 92 | if (program_args.acts_per_trefi==0) 93 | program_args.acts_per_trefi = dram_analyzer.count_acts_per_trefi(); 94 | 95 | if (!program_args.load_json_filename.empty()) { 96 | ReplayingHammerer replayer(memory); 97 | if (program_args.sweeping) { 98 | replayer.replay_patterns_brief(program_args.load_json_filename, program_args.pattern_ids, 99 | MB(256), false); 100 | } else { 101 | replayer.replay_patterns(program_args.load_json_filename, program_args.pattern_ids); 102 | } 103 | } else if (program_args.do_fuzzing && program_args.use_synchronization) { 104 | FuzzyHammerer::n_sided_frequency_based_hammering(dram_analyzer, memory, static_cast(program_args.acts_per_trefi), program_args.runtime_limit, 105 | program_args.num_address_mappings_per_pattern, program_args.sweeping); 106 | } else if (!program_args.do_fuzzing) { 107 | // TraditionalHammerer::n_sided_hammer(memory, program_args.acts_per_trefi, program_args.runtime_limit); 108 | // TraditionalHammerer::n_sided_hammer_experiment(memory, program_args.acts_per_trefi); 109 | TraditionalHammerer::n_sided_hammer_experiment_frequencies(memory); 110 | } else { 111 | Logger::log_error("Invalid combination of program control-flow arguments given. " 112 | "Note: Fuzzing is only supported with synchronized hammering."); 113 | } 114 | 115 | Logger::close(); 116 | return EXIT_SUCCESS; 117 | } 118 | 119 | void handle_arg_generate_patterns(int num_activations, const size_t probes_per_pattern) { 120 | // this parameter is defined in FuzzingParameterSet 121 | const size_t MAX_NUM_REFRESH_INTERVALS = 32; 122 | const size_t MAX_ACCESSES = num_activations*MAX_NUM_REFRESH_INTERVALS; 123 | void *rows_to_access = calloc(MAX_ACCESSES, sizeof(int)); 124 | if (rows_to_access==nullptr) { 125 | Logger::log_error("Allocation of rows_to_access failed!"); 126 | exit(EXIT_FAILURE); 127 | } 128 | FuzzyHammerer::generate_pattern_for_ARM(num_activations, static_cast(rows_to_access), static_cast(MAX_ACCESSES), probes_per_pattern); 129 | exit(EXIT_SUCCESS); 130 | } 131 | 132 | void handle_args(int argc, char **argv) { 133 | // An option is specified by four things: 134 | // (1) the name of the option, 135 | // (2) the strings that activate the option (flags), 136 | // (3) the option's help message, 137 | // (4) and the number of arguments the option expects. 138 | argagg::parser argparser{{ 139 | {"help", {"-h", "--help"}, "shows this help message", 0}, 140 | {"dimm-id", {"-d", "--dimm-id"}, "internal identifier of the currently inserted DIMM (default: 0)", 1}, 141 | {"ranks", {"-r", "--ranks"}, "number of ranks on the DIMM, used to determine bank/rank/row functions, assumes Intel Coffe Lake CPU (default: None)", 1}, 142 | 143 | {"fuzzing", {"-f", "--fuzzing"}, "perform a fuzzing run (default program mode)", 0}, 144 | {"generate-patterns", {"-g", "--generate-patterns"}, "generates N patterns, but does not perform hammering; used by ARM port", 1}, 145 | {"replay-patterns", {"-y", "--replay-patterns"}, "replays patterns given as comma-separated list of pattern IDs", 1}, 146 | 147 | {"load-json", {"-j", "--load-json"}, "loads the specified JSON file generated in a previous fuzzer run, loads patterns given by --replay-patterns or determines the best ones", 1}, 148 | 149 | // note that these two parameters don't require a value, their presence already equals a "true" 150 | {"sync", {"-s", "--sync"}, "synchronize with REFRESH while hammering (default: present)", 0}, 151 | {"sweeping", {"-w", "--sweeping"}, "sweep the best pattern over a contig. memory area after fuzzing (default: absent)", 0}, 152 | 153 | {"runtime-limit", {"-t", "--runtime-limit"}, "number of seconds to run the fuzzer before sweeping/terminating (default: 120)", 1}, 154 | {"acts-per-ref", {"-a", "--acts-per-ref"}, "number of activations in a tREF interval, i.e., 7.8us (default: None)", 1}, 155 | {"probes", {"-p", "--probes"}, "number of different DRAM locations to try each pattern on (default: NUM_BANKS/4)", 1}, 156 | }}; 157 | 158 | argagg::parser_results parsed_args; 159 | try { 160 | parsed_args = argparser.parse(argc, argv); 161 | } catch (const std::exception &e) { 162 | std::cerr << e.what() << '\n'; 163 | exit(EXIT_FAILURE); 164 | } 165 | 166 | if (parsed_args["help"]) { 167 | std::cerr << argparser; 168 | exit(EXIT_SUCCESS); 169 | } 170 | 171 | /** 172 | * mandatory parameters 173 | */ 174 | if (parsed_args.has_option("dimm-id")) { 175 | program_args.dimm_id = parsed_args["dimm-id"].as(0); 176 | Logger::log_debug(format_string("Set --dimm-id: %ld", program_args.dimm_id)); 177 | } else { 178 | Logger::log_error("Program argument '--dimm-id ' is mandatory! Cannot continue."); 179 | exit(EXIT_FAILURE); 180 | } 181 | 182 | if (parsed_args.has_option("ranks")) { 183 | program_args.num_ranks = parsed_args["ranks"].as(0); 184 | Logger::log_debug(format_string("Set --ranks=%d", program_args.num_ranks)); 185 | } else { 186 | Logger::log_error("Program argument '--ranks ' is mandatory! Cannot continue."); 187 | exit(EXIT_FAILURE); 188 | } 189 | 190 | /** 191 | * optional parameters 192 | */ 193 | program_args.sweeping = parsed_args.has_option("sweeping") || program_args.sweeping; 194 | Logger::log_debug(format_string("Set --sweeping=%s", (program_args.sweeping ? "true" : "false"))); 195 | 196 | program_args.runtime_limit = parsed_args["runtime-limit"].as(program_args.runtime_limit); 197 | Logger::log_debug(format_string("Set --runtime_limit=%ld", program_args.runtime_limit)); 198 | 199 | program_args.acts_per_trefi = parsed_args["acts-per-ref"].as(program_args.acts_per_trefi); 200 | Logger::log_info(format_string("+++ %d", program_args.acts_per_trefi)); 201 | program_args.fixed_acts_per_ref = (program_args.acts_per_trefi != 0); 202 | Logger::log_debug(format_string("Set --acts-per-ref=%d", program_args.acts_per_trefi)); 203 | 204 | program_args.num_address_mappings_per_pattern = parsed_args["probes"].as(program_args.num_address_mappings_per_pattern); 205 | Logger::log_debug(format_string("Set --probes=%d", program_args.num_address_mappings_per_pattern)); 206 | 207 | /** 208 | * program modes 209 | */ 210 | if (parsed_args.has_option("generate-patterns")) { 211 | auto num_activations = parsed_args["generate-patterns"].as(84); 212 | // this must happen AFTER probes-per-pattern has been parsed 213 | // note: the following method call does not return anymore 214 | handle_arg_generate_patterns(num_activations, program_args.num_address_mappings_per_pattern); 215 | } else if (parsed_args.has_option("load-json")) { 216 | program_args.load_json_filename = parsed_args["load-json"].as(""); 217 | if (parsed_args.has_option("replay-patterns")) { 218 | auto vec_pattern_ids = parsed_args["replay-patterns"].as>(); 219 | program_args.pattern_ids = std::unordered_set( 220 | vec_pattern_ids.values.begin(), 221 | vec_pattern_ids.values.end()); 222 | } else { 223 | program_args.pattern_ids = std::unordered_set(); 224 | } 225 | } else { 226 | program_args.do_fuzzing = parsed_args["fuzzing"].as(true); 227 | const bool default_sync = true; 228 | program_args.use_synchronization = parsed_args.has_option("sync") || default_sync; 229 | } 230 | } 231 | -------------------------------------------------------------------------------- /src/Forges/FuzzyHammerer.cpp: -------------------------------------------------------------------------------- 1 | #include "Forges/FuzzyHammerer.hpp" 2 | 3 | #include 4 | 5 | #include "Utilities/TimeHelper.hpp" 6 | #include "Fuzzer/PatternBuilder.hpp" 7 | #include "Forges/ReplayingHammerer.hpp" 8 | 9 | // initialize the static variables 10 | size_t FuzzyHammerer::cnt_pattern_probes = 0UL; 11 | size_t FuzzyHammerer::cnt_generated_patterns = 0UL; 12 | std::unordered_map> FuzzyHammerer::map_pattern_mappings_bitflips; 13 | HammeringPattern FuzzyHammerer::hammering_pattern = HammeringPattern(); /* NOLINT */ 14 | 15 | void FuzzyHammerer::n_sided_frequency_based_hammering(DramAnalyzer &dramAnalyzer, Memory &memory, int acts, 16 | unsigned long runtime_limit, const size_t probes_per_pattern, 17 | bool sweep_best_pattern) { 18 | std::mt19937 gen = std::mt19937(std::random_device()()); 19 | 20 | Logger::log_info( 21 | format_string("Starting frequency-based fuzzer run with time limit of %l minutes.", runtime_limit/60)); 22 | 23 | // make sure that this is empty (e.g., from previous call to this function) 24 | map_pattern_mappings_bitflips.clear(); 25 | 26 | FuzzingParameterSet fuzzing_params(acts); 27 | fuzzing_params.print_static_parameters(); 28 | 29 | ReplayingHammerer replaying_hammerer(memory); 30 | 31 | #ifdef ENABLE_JSON 32 | nlohmann::json arr = nlohmann::json::array(); 33 | #endif 34 | 35 | // all patterns that triggered bit flips 36 | std::vector effective_patterns; 37 | 38 | HammeringPattern best_hammering_pattern; 39 | PatternAddressMapper best_mapping; 40 | 41 | size_t best_mapping_bitflips = 0; 42 | size_t best_hammering_pattern_bitflips = 0; 43 | 44 | const auto start_ts = get_timestamp_sec(); 45 | const auto execution_time_limit = static_cast(start_ts + runtime_limit); 46 | 47 | for (; get_timestamp_sec() < execution_time_limit; ++cnt_generated_patterns) { 48 | Logger::log_timestamp(); 49 | Logger::log_highlight(format_string("Generating hammering pattern #%lu.", cnt_generated_patterns)); 50 | fuzzing_params.randomize_parameters(true); 51 | 52 | // generate a hammering pattern: this is like a general access pattern template without concrete addresses 53 | FuzzyHammerer::hammering_pattern = HammeringPattern(fuzzing_params.get_base_period()); 54 | PatternBuilder pattern_builder(hammering_pattern); 55 | pattern_builder.generate_frequency_based_pattern(fuzzing_params); 56 | 57 | Logger::log_info("Abstract pattern based on aggressor IDs:"); 58 | Logger::log_data(hammering_pattern.get_pattern_text_repr()); 59 | Logger::log_info("Aggressor pairs, given as \"(id ...) : freq, amp, start_offset\":"); 60 | Logger::log_data(hammering_pattern.get_agg_access_pairs_text_repr()); 61 | 62 | // randomize the order of AggressorAccessPatterns to avoid biasing the PatternAddressMapper as it always assigns 63 | // rows in order of the AggressorAccessPatterns map (e.g., first element is assigned to the lowest DRAM row).] 64 | std::shuffle(hammering_pattern.agg_access_patterns.begin(), hammering_pattern.agg_access_patterns.end(), gen); 65 | 66 | // then test this pattern with N different mappings (i.e., address sets) 67 | size_t sum_flips_one_pattern_all_mappings = 0; 68 | for (cnt_pattern_probes = 0; cnt_pattern_probes < probes_per_pattern; ++cnt_pattern_probes) { 69 | PatternAddressMapper mapper; 70 | // Logger::log_info(format_string("Running pattern #%lu (%s) for address set %d (%s).", 71 | // current_round, hammering_pattern.instance_id.c_str(), cnt_pattern_probes, mapper.get_instance_id().c_str())); 72 | // 73 | // we test this combination of (pattern, mapping) at three different DRAM locations 74 | probe_mapping_and_scan(mapper, memory, fuzzing_params, program_args.num_dram_locations_per_mapping); 75 | sum_flips_one_pattern_all_mappings += mapper.count_bitflips(); 76 | 77 | if (sum_flips_one_pattern_all_mappings > 0) { 78 | // it is important that we store this mapper only after we did memory.check_memory to include the found BitFlip 79 | hammering_pattern.address_mappings.push_back(mapper); 80 | } 81 | } 82 | 83 | if (sum_flips_one_pattern_all_mappings > 0) { 84 | effective_patterns.push_back(hammering_pattern); 85 | arr.push_back(hammering_pattern); 86 | } 87 | 88 | // TODO additionally consider the number of locations where this pattern triggers bit flips besides the total 89 | // number of bit flips only because we want to find a pattern that generalizes well 90 | // if this pattern is better than every other pattern tried out before, mark this as 'new best pattern' 91 | if (sum_flips_one_pattern_all_mappings > best_hammering_pattern_bitflips) { 92 | best_hammering_pattern = hammering_pattern; 93 | best_hammering_pattern_bitflips = sum_flips_one_pattern_all_mappings; 94 | 95 | // find the best mapping of this pattern (generally it doesn't matter as we're sweeping anyway over a chunk of 96 | // memory but the mapper also contains a reference to the CodeJitter, which in turn uses some parameters that we 97 | // want to reuse during sweeping; other mappings could differ in these parameters) 98 | for (const auto &m : hammering_pattern.address_mappings) { 99 | size_t num_bitflips = m.count_bitflips(); 100 | if (num_bitflips > best_mapping_bitflips) { 101 | best_mapping = m; 102 | best_mapping_bitflips = num_bitflips; 103 | } 104 | } 105 | } 106 | 107 | // dynamically change num acts per tREF after every 100 patterns; this is to avoid that we made a bad choice at the 108 | // beginning and then get stuck with that value 109 | // if the user provided a fixed num acts per tREF value via the program arguments, then we will not change it 110 | if (cnt_generated_patterns%100==0 && !program_args.fixed_acts_per_ref) { 111 | auto old_nacts = fuzzing_params.get_num_activations_per_t_refi(); 112 | // repeat measuring the number of possible activations per tREF as it might be that the current value is not optimal 113 | fuzzing_params.set_num_activations_per_t_refi(static_cast(dramAnalyzer.count_acts_per_trefi())); 114 | Logger::log_info( 115 | format_string("Recomputed number of ACTs per tREF (old: %d, new: %d).", 116 | old_nacts, 117 | fuzzing_params.get_num_activations_per_t_refi())); 118 | } 119 | 120 | } // end of fuzzing 121 | 122 | log_overall_statistics( 123 | cnt_generated_patterns, 124 | best_mapping.get_instance_id(), 125 | best_mapping_bitflips, 126 | effective_patterns.size()); 127 | 128 | // start the post-analysis stage ============================ 129 | if (arr.empty()) { 130 | Logger::log_info("Skipping post-analysis stage as no effective patterns were found."); 131 | } else { 132 | Logger::log_info("Starting post-analysis stage."); 133 | } 134 | 135 | #ifdef ENABLE_JSON 136 | // export everything to JSON, this includes the HammeringPattern, AggressorAccessPattern, and BitFlips 137 | std::ofstream json_export("fuzz-summary.json"); 138 | 139 | nlohmann::json meta; 140 | meta["start"] = start_ts; 141 | meta["end"] = get_timestamp_sec(); 142 | meta["num_patterns"] = arr.size(); 143 | meta["memory_config"] = DRAMAddr::get_memcfg_json(); 144 | meta["dimm_id"] = program_args.dimm_id; 145 | 146 | nlohmann::json root; 147 | root["metadata"] = meta; 148 | root["hammering_patterns"] = arr; 149 | 150 | json_export << root << std::endl; 151 | json_export.close(); 152 | #endif 153 | 154 | // define the location where we are going to do the large sweep 155 | DRAMAddr sweep_start = DRAMAddr( 156 | Range(0, NUM_BANKS-1).get_random_number(gen), 157 | Range(0, 4095).get_random_number(gen), 158 | 0); 159 | 160 | size_t SWEEP_MEM_SIZE_BEST_PATTERN = 2*1024*1024; // in bytes 161 | Logger::log_info(format_string("Doing a sweep of %d MB to determine most effective pattern.", 162 | SWEEP_MEM_SIZE_BEST_PATTERN)); 163 | 164 | // store for each (pattern, mapping) the number of observed bit flips during sweep 165 | struct PatternMappingStat { 166 | std::string pattern_id; 167 | std::string mapping_id; 168 | }; 169 | std::map> patterns_stat; 170 | 171 | for (auto &patt : effective_patterns) { 172 | for (auto &mapping : patt.address_mappings) { 173 | // move the pattern to the target DRAM location 174 | mapping.remap_aggressors(sweep_start); 175 | 176 | // do the minisweep 177 | SweepSummary summary = replaying_hammerer.sweep_pattern(patt, mapping, 10, SWEEP_MEM_SIZE_BEST_PATTERN, {}); 178 | 179 | PatternMappingStat pms; 180 | pms.pattern_id = patt.instance_id; 181 | pms.mapping_id = mapping.get_instance_id(); 182 | patterns_stat.emplace(summary.observed_bitflips.size(), pms); 183 | } 184 | } 185 | 186 | // printout - just for debugging 187 | Logger::log_info("Summary of minisweep:"); 188 | Logger::log_data( 189 | format_string("%4s\t%-6s\t%-8s\t%-8s", "Rank", "#Flips", "Pattern ID", "Mapping ID\n")); 190 | int rank = 1; 191 | for (const auto &[k,v] : patterns_stat) { 192 | Logger::log_data(format_string("%4d\t%6d\t%-8s\t%-8s", 193 | rank, k, v.pattern_id.substr(0,8).c_str(), v.mapping_id.substr(0,8).c_str())); 194 | rank++; 195 | } 196 | 197 | // do sweep with the pattern that performed best in the minisweep 198 | const std::string best_pattern_id = patterns_stat.begin()->second.pattern_id; 199 | Logger::log_info(format_string("best_pattern_id = %s", best_pattern_id.c_str())); 200 | 201 | const std::string best_pattern_mapping_id = patterns_stat.begin()->second.mapping_id; 202 | Logger::log_info(format_string("best_pattern_mapping_id = %s", best_pattern_mapping_id.c_str())); 203 | 204 | if (!sweep_best_pattern){ 205 | return; 206 | } 207 | 208 | size_t num_bitflips_sweep = 0; 209 | for (const auto &[k,v] : patterns_stat) { 210 | // find the pattern in the effective_patterns list as patterns_stat just contains the pattern/mapping ID 211 | auto target_pattern_id = v.pattern_id; 212 | auto best_pattern_for_sweep = std::find_if(effective_patterns.begin(), effective_patterns.end(), [&](auto &pattern) { 213 | return pattern.instance_id == target_pattern_id; 214 | }); 215 | if (best_pattern_for_sweep == effective_patterns.end()) { 216 | Logger::log_error(format_string("Could not find pattern %s in effective patterns list.", 217 | target_pattern_id.c_str())); 218 | continue; 219 | } 220 | 221 | // remove all mappings from best pattern except the 'best mapping' because the sweep function does otherwise not 222 | // know which the best mapping is 223 | for (auto it = best_pattern_for_sweep->address_mappings.begin(); it != best_pattern_for_sweep->address_mappings.end(); ) { 224 | if (it->get_instance_id() != best_pattern_mapping_id) { 225 | it = best_pattern_for_sweep->address_mappings.erase(it); 226 | } else { 227 | ++it; 228 | } 229 | } 230 | 231 | // do sweep with 1x256MB of memory 232 | replaying_hammerer.set_params(fuzzing_params); 233 | num_bitflips_sweep = replaying_hammerer.replay_patterns_brief({*best_pattern_for_sweep}, 234 | MB(256), 1, true); 235 | 236 | // if the sweep was not successful, we take the next best pattern and repeat 237 | if (num_bitflips_sweep > 0) 238 | break; 239 | } 240 | } 241 | 242 | void FuzzyHammerer::test_location_dependence(ReplayingHammerer &rh, HammeringPattern &pattern) { 243 | // find the most effective mapping of the given pattern by looking into data collected before 244 | Logger::log_info(format_string("[test_location_dependence] Finding best mapping for given pattern (%s).", 245 | pattern.instance_id.c_str())); 246 | PatternAddressMapper &best_mapping = pattern.get_most_effective_mapping(); 247 | Logger::log_info(format_string("[test_location_dependence] Best mapping (%s) triggered %d bit flips.", 248 | best_mapping.get_instance_id().c_str(), best_mapping.count_bitflips())); 249 | 250 | // determine the aggressor pairs that triggered the bit flip 251 | Logger::log_info("[test_location_dependence] Finding the direct effective aggressors."); 252 | std::unordered_set direct_effective_aggs; 253 | ReplayingHammerer::find_direct_effective_aggs(pattern, best_mapping, direct_effective_aggs); 254 | Logger::log_info(format_string("[test_location_dependence] Found %zu direct effective aggressors.", 255 | direct_effective_aggs.size())); 256 | 257 | // copy the mapping 258 | Logger::log_info("[test_location_dependence] Copying the original pattern."); 259 | 260 | // do a sweep over N rows where we move all aggressor pairs each time by 1 row 261 | Logger::log_info("[test_location_dependence] Doing sweep 1/2: moving all aggressor pairs."); 262 | SweepSummary ss_move_all = rh.sweep_pattern(pattern, best_mapping, 1, MB(8)); 263 | 264 | // restore the copied mapping to have the same start position (this should help in avoiding wrong results due to 265 | // memory regions that are differently vulnerable) 266 | Logger::log_info("[test_location_dependence] Restoring original mapping to get same start row."); 267 | 268 | // do a sweep over N rows where we only move the aggressor pair that triggered the bit flip each time by 1 row 269 | Logger::log_info("[test_location_dependence] Doing sweep 2/2: moving only effective agg pairs."); 270 | SweepSummary ss_move_selected = rh.sweep_pattern(pattern, best_mapping, 1, MB(8), direct_effective_aggs); 271 | 272 | // compare number of bit flips 273 | bool is_location_dependent = (ss_move_selected.observed_bitflips.size() > ss_move_all.observed_bitflips.size()); 274 | Logger::log_info(format_string( 275 | "[test_location_dependence] Comparing #bit flips: all %zu vs selected %zu => location-dependent: %s", 276 | ss_move_all.observed_bitflips.size(), 277 | ss_move_selected.observed_bitflips.size(), 278 | is_location_dependent ? "YES" : "NO")); 279 | 280 | // write True in is_location_dependent in HammeringPattern in case that fixing the 'random' aggressors leads to better 281 | // results than moving everything 282 | Logger::log_info("[test_location_dependence] Writing is_location_dependent into HammeringPattern."); 283 | pattern.is_location_dependent = is_location_dependent; 284 | } 285 | 286 | void FuzzyHammerer::probe_mapping_and_scan(PatternAddressMapper &mapper, Memory &memory, 287 | FuzzingParameterSet &fuzzing_params, size_t num_dram_locations) { 288 | 289 | // ATTENTION: This method uses the global variable hammering_pattern to refer to the pattern that is to be hammered 290 | 291 | CodeJitter &code_jitter = mapper.get_code_jitter(); 292 | 293 | // randomize the aggressor ID -> DRAM row mapping 294 | mapper.randomize_addresses(fuzzing_params, hammering_pattern.agg_access_patterns, true); 295 | 296 | // now fill the pattern with these random addresses 297 | std::vector hammering_accesses_vec; 298 | mapper.export_pattern(hammering_pattern.aggressors, hammering_pattern.base_period, hammering_accesses_vec); 299 | Logger::log_info("Aggressor ID to DRAM address mapping (bank, row, column):"); 300 | Logger::log_data(mapper.get_mapping_text_repr()); 301 | 302 | // now create instructions that follow this pattern (i.e., do jitting of code) 303 | bool sync_at_each_ref = fuzzing_params.get_random_sync_each_ref(); 304 | int num_aggs_for_sync = fuzzing_params.get_random_num_aggressors_for_sync(); 305 | Logger::log_info("Creating ASM code for hammering."); 306 | code_jitter.jit_strict(fuzzing_params.get_num_activations_per_t_refi(), 307 | fuzzing_params.flushing_strategy, fuzzing_params.fencing_strategy, 308 | hammering_accesses_vec, sync_at_each_ref, num_aggs_for_sync, 309 | fuzzing_params.get_hammering_total_num_activations()); 310 | 311 | size_t flipped_bits = 0; 312 | for (size_t dram_location = 0; dram_location < num_dram_locations; ++dram_location) { 313 | mapper.bit_flips.emplace_back(); 314 | 315 | Logger::log_info(format_string("Running pattern #%lu (%s) for address set %d (%s) at DRAM location #%ld.", 316 | cnt_generated_patterns, 317 | hammering_pattern.instance_id.c_str(), 318 | cnt_pattern_probes, 319 | mapper.get_instance_id().c_str(), 320 | dram_location)); 321 | 322 | // wait for a random time before starting to hammer, while waiting access random rows that are not part of the 323 | // currently hammering pattern; this wait interval serves for two purposes: to reset the sampler and start from a 324 | // clean state before hammering, and also to fuzz a possible dependence at which REF we start hammering 325 | auto wait_until_hammering_us = fuzzing_params.get_random_wait_until_start_hammering_us(); 326 | FuzzingParameterSet::print_dynamic_parameters2(sync_at_each_ref, wait_until_hammering_us, num_aggs_for_sync); 327 | 328 | std::vector random_rows; 329 | if (wait_until_hammering_us > 0) { 330 | random_rows = mapper.get_random_nonaccessed_rows(fuzzing_params.get_max_row_no()); 331 | do_random_accesses(random_rows, wait_until_hammering_us); 332 | } 333 | 334 | // do hammering 335 | code_jitter.hammer_pattern(fuzzing_params, true); 336 | 337 | // check if any bit flips happened 338 | flipped_bits += memory.check_memory(mapper, false, true); 339 | 340 | // now shift the mapping to another location 341 | std::mt19937 gen = std::mt19937(std::random_device()()); 342 | mapper.shift_mapping(Range(1,32).get_random_number(gen), {}); 343 | 344 | if (dram_location + 1 < num_dram_locations) { 345 | // wait a bit and do some random accesses before checking reproducibility of the pattern 346 | if (random_rows.empty()) random_rows = mapper.get_random_nonaccessed_rows(fuzzing_params.get_max_row_no()); 347 | do_random_accesses(random_rows, 64000); // 64ms (retention time) 348 | } 349 | } 350 | 351 | // store info about this bit flip (pattern ID, mapping ID, no. of bit flips) 352 | map_pattern_mappings_bitflips[hammering_pattern.instance_id].emplace(mapper.get_instance_id(), flipped_bits); 353 | 354 | // cleanup the jitter for its next use 355 | code_jitter.cleanup(); 356 | } 357 | 358 | void FuzzyHammerer::log_overall_statistics(size_t cur_round, const std::string &best_mapping_id, 359 | size_t best_mapping_num_bitflips, size_t num_effective_patterns) { 360 | Logger::log_info("Fuzzing run finished successfully."); 361 | Logger::log_data(format_string("Number of generated patterns: %lu", cur_round)); 362 | Logger::log_data(format_string("Number of generated mappings per pattern: %lu", 363 | program_args.num_address_mappings_per_pattern)); 364 | Logger::log_data(format_string("Number of tested locations per pattern: %lu", 365 | program_args.num_dram_locations_per_mapping)); 366 | Logger::log_data(format_string("Number of effective patterns: %lu", num_effective_patterns)); 367 | Logger::log_data(format_string("Best pattern ID: %s", best_mapping_id.c_str())); 368 | Logger::log_data(format_string("Best pattern #bitflips: %ld", best_mapping_num_bitflips)); 369 | } 370 | 371 | void FuzzyHammerer::generate_pattern_for_ARM(int acts, 372 | int *rows_to_access, 373 | int max_accesses, 374 | const size_t probes_per_pattern) { 375 | FuzzingParameterSet fuzzing_params(acts); 376 | fuzzing_params.print_static_parameters(); 377 | fuzzing_params.randomize_parameters(true); 378 | 379 | hammering_pattern.aggressors.clear(); 380 | if (cnt_pattern_probes > 1 && cnt_pattern_probes < probes_per_pattern) { 381 | cnt_pattern_probes++; 382 | } else { 383 | cnt_pattern_probes = 0; 384 | hammering_pattern = HammeringPattern(fuzzing_params.get_base_period()); 385 | } 386 | 387 | PatternBuilder pattern_builder(hammering_pattern); 388 | pattern_builder.generate_frequency_based_pattern(fuzzing_params); 389 | 390 | Logger::log_info("Aggressor pairs, given as \"(id ...) : freq, amp, start_offset\":"); 391 | Logger::log_data(hammering_pattern.get_agg_access_pairs_text_repr()); 392 | 393 | // choose random addresses for pattern 394 | PatternAddressMapper mapper; 395 | mapper.randomize_addresses(fuzzing_params, hammering_pattern.agg_access_patterns, true); 396 | mapper.export_pattern(hammering_pattern.aggressors, hammering_pattern.base_period, rows_to_access, max_accesses); 397 | Logger::log_info("Aggressor ID to DRAM address mapping (bank, rank, column):"); 398 | Logger::log_data(mapper.get_mapping_text_repr()); 399 | } 400 | 401 | void FuzzyHammerer::do_random_accesses(const std::vector &random_rows, const int duration_us) { 402 | const auto random_access_limit = get_timestamp_us() + static_cast(duration_us); 403 | while (get_timestamp_us() < random_access_limit) { 404 | for (volatile char *e : random_rows) { 405 | (void)*e; // this should be fine as random_rows are volatile 406 | } 407 | } 408 | } 409 | -------------------------------------------------------------------------------- /src/Forges/TraditionalHammerer.cpp: -------------------------------------------------------------------------------- 1 | #include "Forges/TraditionalHammerer.hpp" 2 | 3 | #include "Utilities/TimeHelper.hpp" 4 | #include "Blacksmith.hpp" 5 | 6 | /// Performs hammering on given aggressor rows for HAMMER_ROUNDS times. 7 | void TraditionalHammerer::hammer(std::vector &aggressors) { 8 | hammer(aggressors, HAMMER_ROUNDS); 9 | } 10 | 11 | void TraditionalHammerer::hammer(std::vector &aggressors, size_t reps) { 12 | for (size_t i = 0; i < reps; i++) { 13 | for (auto &a : aggressors) { 14 | (void)*a; 15 | } 16 | for (auto &a : aggressors) { 17 | clflushopt(a); 18 | } 19 | mfence(); 20 | } 21 | } 22 | 23 | void TraditionalHammerer::hammer_flush_early(std::vector &aggressors, size_t reps) { 24 | for (size_t i = 0; i < reps; i++) { 25 | for (auto &a : aggressors) { 26 | (void)*a; 27 | clflushopt(a); 28 | } 29 | mfence(); 30 | } 31 | } 32 | 33 | /// Performs synchronized hammering on the given aggressor rows. 34 | void TraditionalHammerer::hammer_sync(std::vector &aggressors, int acts, 35 | volatile char *d1, volatile char *d2) { 36 | size_t ref_rounds = std::max(1UL, acts/aggressors.size()); 37 | 38 | // determines how often we are repeating 39 | size_t agg_rounds = ref_rounds; 40 | uint64_t before, after; 41 | 42 | (void)*d1; 43 | (void)*d2; 44 | 45 | // synchronize with the beginning of an interval 46 | while (true) { 47 | clflushopt(d1); 48 | clflushopt(d2); 49 | mfence(); 50 | before = rdtscp(); 51 | lfence(); 52 | (void)*d1; 53 | (void)*d2; 54 | after = rdtscp(); 55 | // check if an ACTIVATE was issued 56 | if ((after - before) > 1000) { 57 | break; 58 | } 59 | } 60 | 61 | // perform hammering for HAMMER_ROUNDS/ref_rounds times 62 | for (size_t i = 0; i < HAMMER_ROUNDS/ref_rounds; i++) { 63 | for (size_t j = 0; j < agg_rounds; j++) { 64 | for (size_t k = 0; k < aggressors.size() - 2; k++) { 65 | (void)(*aggressors[k]); 66 | clflushopt(aggressors[k]); 67 | } 68 | mfence(); 69 | } 70 | 71 | // after HAMMER_ROUNDS/ref_rounds times hammering, check for next ACTIVATE 72 | while (true) { 73 | mfence(); 74 | lfence(); 75 | before = rdtscp(); 76 | lfence(); 77 | clflushopt(d1); 78 | (void)*d1; 79 | clflushopt(d2); 80 | (void)*d2; 81 | after = rdtscp(); 82 | lfence(); 83 | // stop if an ACTIVATE was issued 84 | if ((after - before) > 1000) break; 85 | } 86 | } 87 | } 88 | 89 | [[maybe_unused]] void TraditionalHammerer::n_sided_hammer_experiment(Memory &memory, int acts) { 90 | std::random_device rd; 91 | std::mt19937 gen(rd()); 92 | std::uniform_int_distribution dist(0, std::numeric_limits::max()); 93 | 94 | // This implement the experiment showing the offset is an important factor when crafting patterns. 95 | // Randomly chooses a double-sided pair 96 | // Create a pattern of N ACTIVATEs (determine based on number of ACTs per tREF) 97 | // Loop over the offset (position of double-sided pair within pattern) 98 | // Place aggressors at current offset and randomize all other accesses 99 | // Hammer pattern for acts activations 100 | // Scan for flipped rows 101 | 102 | #ifdef ENABLE_JSON 103 | nlohmann::json all_results = nlohmann::json::array(); 104 | nlohmann::json current; 105 | #endif 106 | 107 | const auto start_ts = time(nullptr); 108 | const auto num_aggs = 2; 109 | const auto pattern_length = (size_t) acts; 110 | 111 | size_t v = 2; // distance between aggressors (within a pair) 112 | 113 | size_t low_row_no; 114 | void *low_row_vaddr; 115 | size_t high_row_no; 116 | void *high_row_vaddr; 117 | 118 | auto update_low_high = [&](DRAMAddr &dramAddr) { 119 | if (dramAddr.row < low_row_no) { 120 | low_row_no = dramAddr.row; 121 | low_row_vaddr = dramAddr.to_virt(); 122 | } 123 | if (dramAddr.row > high_row_no) { 124 | high_row_no = dramAddr.row; 125 | high_row_vaddr = dramAddr.to_virt(); 126 | } 127 | }; 128 | 129 | const auto TARGET_BANK = 0; 130 | const auto NUM_LOCATIONS = 10; 131 | const size_t MAX_AMPLITUDE = 6; 132 | 133 | for (size_t cur_location = 1; cur_location <= NUM_LOCATIONS; ++cur_location) { 134 | // start address/row 135 | DRAMAddr cur_next_addr(TARGET_BANK, dist(gen)%2048, 0); 136 | 137 | for (size_t cur_amplitude = 1; cur_amplitude <= MAX_AMPLITUDE; ++cur_amplitude) { 138 | 139 | for (size_t cur_offset = 75; cur_offset < pattern_length - (num_aggs - 1); ++cur_offset) { 140 | 141 | Logger::log_debug(format_string("Running: cur_offset = %lu, cur_amplitude = %lu, cur_location = %lu/%lu", 142 | cur_offset, cur_amplitude, cur_location, NUM_LOCATIONS)); 143 | 144 | low_row_no = std::numeric_limits::max(); 145 | low_row_vaddr = nullptr; 146 | high_row_no = std::numeric_limits::min(); 147 | high_row_vaddr = nullptr; 148 | 149 | std::vector aggressors; 150 | std::stringstream ss; 151 | 152 | // fill up the pattern with accesses 153 | ss << "agg row: "; 154 | for (size_t pos = 0; pos < pattern_length;) { 155 | if (pos==cur_offset) { 156 | // add the aggressor pair 157 | DRAMAddr agg1 = cur_next_addr; 158 | DRAMAddr agg2 = agg1.add(0, v, 0); 159 | ss << agg1.row << " "; 160 | ss << agg2.row << " "; 161 | update_low_high(agg1); 162 | update_low_high(agg2); 163 | for (size_t cnt = cur_amplitude; cnt > 0; --cnt) { 164 | aggressors.push_back((volatile char *) agg1.to_virt()); 165 | aggressors.push_back((volatile char *) agg2.to_virt()); 166 | pos += 2; 167 | } 168 | } else { 169 | // fill up the remaining accesses with random rows 170 | DRAMAddr agg(TARGET_BANK, dist(gen)%1024, 0); 171 | // update_low_high(agg); 172 | ss << agg.row << " "; 173 | aggressors.push_back((volatile char *) agg.to_virt()); 174 | pos++; 175 | } 176 | } 177 | Logger::log_data(ss.str()); 178 | Logger::log_debug(format_string("#aggs in pattern = %lu", aggressors.size())); 179 | 180 | // do the hammering 181 | if (!program_args.use_synchronization) { 182 | // CONVENTIONAL HAMMERING 183 | Logger::log_info(format_string("Hammering %d aggressors on bank %d", num_aggs, TARGET_BANK)); 184 | hammer(aggressors); 185 | } else if (program_args.use_synchronization) { 186 | // SYNCHRONIZED HAMMERING 187 | // uses one dummy that are hammered repeatedly until the refresh is detected 188 | cur_next_addr.add_inplace(0, 100, 0); 189 | auto d1 = cur_next_addr; 190 | cur_next_addr.add_inplace(0, 2, 0); 191 | auto d2 = cur_next_addr; 192 | Logger::log_info( 193 | format_string("d1 row %" PRIu64 " (%p) d2 row %" PRIu64 " (%p)", 194 | d1.row, d1.to_virt(), 195 | d2.row, d2.to_virt())); 196 | 197 | Logger::log_info(format_string("Hammering sync %d aggressors on bank %d", num_aggs, TARGET_BANK)); 198 | hammer_sync(aggressors, acts, (volatile char *) d1.to_virt(), (volatile char *) d2.to_virt()); 199 | } 200 | 201 | // check 20 rows before and after the placed aggressors for flipped bits 202 | Logger::log_debug("Checking for flipped bits..."); 203 | const auto check_rows_around = 20; 204 | auto num_bitflips = memory.check_memory((volatile char *) low_row_vaddr, (volatile char *) high_row_vaddr); 205 | #ifdef ENABLE_JSON 206 | current["cur_offset"] = cur_offset; 207 | current["cur_amplitude"] = cur_amplitude; 208 | current["location"] = cur_location; 209 | current["num_bitflips"] = num_bitflips; 210 | current["pattern_length"] = pattern_length; 211 | current["check_rows_around"] = check_rows_around; 212 | 213 | current["aggressors"] = nlohmann::json::array(); 214 | nlohmann::json agg_1; 215 | DRAMAddr d((void *) aggressors[cur_offset]); 216 | agg_1["bank"] = d.bank; 217 | agg_1["row"] = d.row; 218 | agg_1["col"] = d.col; 219 | current["aggressors"].push_back(agg_1); 220 | nlohmann::json agg_2; 221 | DRAMAddr d2((void *) aggressors[cur_offset + 1]); 222 | agg_2["bank"] = d2.bank; 223 | agg_2["row"] = d2.row; 224 | agg_2["col"] = d2.col; 225 | current["aggressors"].push_back(agg_2); 226 | 227 | all_results.push_back(current); 228 | #endif 229 | } 230 | } 231 | } 232 | 233 | #ifdef ENABLE_JSON 234 | // export result into JSON 235 | std::ofstream json_export("experiment-summary.json"); 236 | 237 | nlohmann::json meta; 238 | meta["start"] = start_ts; 239 | meta["end"] = get_timestamp_sec(); 240 | meta["memory_config"] = DRAMAddr::get_memcfg_json(); 241 | meta["dimm_id"] = program_args.dimm_id; 242 | meta["acts_per_tref"] = acts; 243 | meta["seed"] = start_ts; 244 | 245 | nlohmann::json root; 246 | root["metadata"] = meta; 247 | root["results"] = all_results; 248 | 249 | json_export << root << std::endl; 250 | json_export.close(); 251 | #endif 252 | } 253 | 254 | [[maybe_unused]] void TraditionalHammerer::n_sided_hammer(Memory &memory, int acts, long runtime_limit) { 255 | std::random_device rd; 256 | std::mt19937 gen(rd()); 257 | std::uniform_int_distribution dist(0, std::numeric_limits::max()); 258 | 259 | const auto execution_limit = get_timestamp_sec() + runtime_limit; 260 | while (get_timestamp_sec() < execution_limit) { 261 | size_t aggressor_rows_size = (dist(gen)%(MAX_ROWS - 3)) + 3; // number of aggressor rows 262 | size_t v = 2; // distance between aggressors (within a pair) 263 | size_t d = dist(gen)%16; // distance of each double-sided aggressor pair 264 | 265 | for (size_t ba = 0; ba < 4; ba++) { 266 | DRAMAddr cur_next_addr(ba, dist(gen)%4096, 0); 267 | 268 | std::vector aggressors; 269 | std::stringstream ss; 270 | 271 | ss << "agg row: "; 272 | for (size_t i = 1; i < aggressor_rows_size; i += 2) { 273 | cur_next_addr.add_inplace(0, d, 0); 274 | ss << cur_next_addr.row << " "; 275 | aggressors.push_back((volatile char *) cur_next_addr.to_virt()); 276 | 277 | cur_next_addr.add_inplace(0, v, 0); 278 | ss << cur_next_addr.row << " "; 279 | aggressors.push_back((volatile char *) cur_next_addr.to_virt()); 280 | } 281 | 282 | if ((aggressor_rows_size%2)!=0) { 283 | ss << cur_next_addr.row << " "; 284 | aggressors.push_back((volatile char *) cur_next_addr.to_virt()); 285 | } 286 | Logger::log_data(ss.str()); 287 | 288 | if (!program_args.use_synchronization) { 289 | // CONVENTIONAL HAMMERING 290 | Logger::log_info(format_string("Hammering %d aggressors with v=%d d=%d on bank %d", 291 | aggressor_rows_size, v, d, ba)); 292 | hammer(aggressors); 293 | } else if (program_args.use_synchronization) { 294 | // SYNCHRONIZED HAMMERING 295 | // uses two dummies that are hammered repeatedly until the refresh is detected 296 | cur_next_addr.add_inplace(0, 100, 0); 297 | auto d1 = cur_next_addr; 298 | cur_next_addr.add_inplace(0, v, 0); 299 | auto d2 = cur_next_addr; 300 | Logger::log_info(format_string("d1 row %" PRIu64 " (%p) d2 row %" PRIu64 " (%p)", 301 | d1.row, d1.to_virt(), d2.row, d2.to_virt())); 302 | if (ba==0) { 303 | Logger::log_info(format_string("sync: ref_rounds %lu, remainder %lu.", acts/aggressors.size(), 304 | acts - ((acts/aggressors.size())*aggressors.size()))); 305 | } 306 | Logger::log_info(format_string("Hammering sync %d aggressors on bank %d", aggressor_rows_size, ba)); 307 | hammer_sync(aggressors, acts, (volatile char *) d1.to_virt(), (volatile char *) d2.to_virt()); 308 | } 309 | 310 | // check 100 rows before and after for flipped bits 311 | memory.check_memory(aggressors[0], aggressors[aggressors.size() - 1]); 312 | } 313 | } 314 | } 315 | 316 | void TraditionalHammerer::n_sided_hammer_experiment_frequencies(Memory &memory) { 317 | #ifdef ENABLE_JSON 318 | nlohmann::json root; 319 | nlohmann::json all_results = nlohmann::json::array(); 320 | nlohmann::json current; 321 | #endif 322 | const auto start_ts = get_timestamp_sec(); 323 | 324 | std::random_device rd; 325 | std::mt19937 gen(rd()); 326 | 327 | const auto MAX_AGG_ROUNDS = 48; //16; // 1...MAX_AGG_ROUNDS 328 | const auto MIN_AGG_ROUNDS = 32; //16; // 1...MAX_AGG_ROUNDS 329 | 330 | const auto MAX_DMY_ROUNDS = 256; // 64; // 0...MAX_DMY_ROUNDS 331 | const auto MIN_DMY_ROUNDS = 110; // 64; // 0...MAX_DMY_ROUNDS 332 | 333 | const auto MAX_ROW = 4096; 334 | 335 | // auto agg1 = DRAMAddr(11, 5307, 0); 336 | // auto agg2 = DRAMAddr(11, 5309, 0); 337 | 338 | // auto agg1 = DRAMAddr(3, 3835, 0); 339 | // auto agg2 = DRAMAddr(3, 3837, 0); 340 | // 341 | // auto agg1 = DRAMAddr(15, 3778, 0); 342 | // auto agg2 = DRAMAddr(15, 3780, 0); 343 | 344 | // auto agg1 = DRAMAddr(14, 5729, 0); 345 | // auto agg2 = DRAMAddr(14, 5731, 0); 346 | 347 | 348 | #ifdef ENABLE_JSON 349 | 350 | #endif 351 | 352 | // randomly choose two dummies 353 | // auto dmy1 = DRAMAddr(agg1.bank, agg1.row + rand()%(MAX_ROW - agg1.row), 0); 354 | // auto dmy2 = DRAMAddr(agg1.bank, dmy1.row + 2, 0); 355 | 356 | // auto dmy1 = DRAMAddr(10, 408, 0); 357 | // auto dmy2 = DRAMAddr(10, 410, 0); 358 | 359 | //#ifdef ENABLE_JSON 360 | // root["dummies"] = nlohmann::json::array(); 361 | // for (const auto dmy: {dmy1, dmy2}) { 362 | // root["dummies"].push_back({{"bank", dmy.bank}, {"row", dmy.row}, {"col", dmy.col}}); 363 | // } 364 | //#endif 365 | 366 | // Logger::log_debug(format_string("agg rows: r%lu, r%lu", agg1.row, agg2.row)); 367 | // Logger::log_debug(format_string("dmy rows: r%lu, r%lu", dmy1.row, dmy2.row)); 368 | 369 | 370 | 371 | 372 | 373 | // std::shuffle(untested_vals.begin(), untested_vals.end(), gen); 374 | for (size_t r = 0; r < 10; ++ r) { 375 | 376 | // randomly choose two aggressors 377 | auto agg1 = DRAMAddr( 378 | Range(0, NUM_BANKS-1).get_random_number(gen), 379 | Range(0, MAX_ROW-1).get_random_number(gen), 380 | 0); 381 | auto agg2 = DRAMAddr(agg1.bank, agg1.row + 2, 0); 382 | 383 | std::vector> untested_vals; 384 | for (size_t agg_rounds = MIN_AGG_ROUNDS; agg_rounds < MAX_AGG_ROUNDS; ++agg_rounds) { 385 | for (size_t dummy_rounds = MIN_DMY_ROUNDS; dummy_rounds < MAX_DMY_ROUNDS; ++dummy_rounds) { 386 | untested_vals.emplace_back(agg_rounds, dummy_rounds); 387 | } 388 | } 389 | 390 | for (size_t i = 0; i < untested_vals.size(); ++i) { 391 | std::vector aggressors; 392 | 393 | // auto agg1 = DRAMAddr(Range(0, NUM_BANKS).get_random_number(gen), 394 | // Range(0, MAX_ROW).get_random_number(gen), 395 | // 0); 396 | // auto agg2 = agg1.add(0, 2, 0); 397 | auto dmy1 = DRAMAddr(agg1.bank, 398 | Range(0, MAX_ROW).get_random_number(gen), 399 | 0); 400 | auto dmy2 = dmy1.add(0, 2, 0); 401 | Logger::log_debug(format_string("aggs [%s, %s], dmys [%s, %s]", 402 | agg1.to_string_compact().c_str(), agg2.to_string_compact().c_str(), 403 | dmy1.to_string_compact().c_str(), dmy2.to_string_compact().c_str())); 404 | 405 | const auto tuple_vals = untested_vals.at(i); 406 | size_t agg_rounds = std::get<0>(tuple_vals); 407 | size_t dummy_rounds = std::get<1>(tuple_vals); 408 | 409 | Logger::log_debug(format_string("Running: location = %lu/10, agg_rounds = %lu, dummy_rounds = %lu. Remaining: %lu.", 410 | r+1, 411 | agg_rounds, 412 | dummy_rounds, 413 | untested_vals.size() - i)); 414 | 415 | for (size_t ard = 0; ard < agg_rounds; ++ard) { 416 | aggressors.push_back((volatile char *) agg1.to_virt()); 417 | aggressors.push_back((volatile char *) agg2.to_virt()); 418 | } 419 | 420 | for (size_t drd = 0; drd < dummy_rounds; ++drd) { 421 | // aggressors.push_back((volatile char *) dmy1.to_virt()); 422 | // aggressors.push_back((volatile char *) dmy2.to_virt()); 423 | auto dmy = DRAMAddr(Range(0, NUM_BANKS - 1).get_random_number(gen), 424 | Range(0, MAX_ROW - 1).get_random_number(gen), 425 | 0); 426 | aggressors.push_back((volatile char *) dmy.to_virt()); 427 | } 428 | 429 | // hammer the pattern 430 | Logger::log_info("Hammering..."); 431 | hammer_flush_early(aggressors, 8192*32); 432 | // hammer(aggressors, 5000000/aggressors.size()); 433 | // hammer(aggressors, 8192*32); 434 | // hammer_sync(aggressors, program_args.acts_per_trefi, 435 | // (volatile char *) dmy2.add(0, 111, 0).to_virt(), 436 | // (volatile char *) dmy2.add(0, 113, 0).to_virt()); 437 | 438 | // check rows before and after for flipped bits 439 | const auto check_rows_around = 15; 440 | Logger::log_info("Checking for flipped bits..."); 441 | auto sum_bitflips = memory.check_memory((volatile char *) agg1.to_virt(), 442 | (volatile char *) agg1.add(0, 1, 0).to_virt()); 443 | // sum_bitflips += memory.check_memory((volatile char *) agg2.to_virt(), 444 | // (volatile char *) agg2.add(0, 1, 0).to_virt(), 445 | // check_rows_around); 446 | // sum_bitflips += memory.check_memory((volatile char *) dmy1.to_virt(), 447 | // (volatile char *) dmy1.add(0, 1, 0).to_virt(), 448 | // check_rows_around); 449 | // sum_bitflips += memory.check_memory((volatile char *) dmy2.to_virt(), 450 | // (volatile char *) dmy2.add(0, 1, 0).to_virt(), 451 | // check_rows_around); 452 | 453 | // log results into JSON 454 | #ifdef ENABLE_JSON 455 | current["aggressors"] = nlohmann::json::array(); 456 | for (const auto agg: {agg1, agg2}) { 457 | current["aggressors"].push_back({{"bank", agg.bank}, {"row", agg.row}, {"col", agg.col}}); 458 | } 459 | current["agg_rounds"] = agg_rounds; 460 | current["dummy_rounds"] = dummy_rounds; 461 | current["num_bitflips"] = sum_bitflips; 462 | current["pattern_length"] = aggressors.size(); 463 | current["check_rows_around"] = check_rows_around; 464 | 465 | all_results.push_back(current); 466 | #endif 467 | // } 468 | // } 469 | } 470 | } 471 | // write JSON to disk 472 | #ifdef ENABLE_JSON 473 | // export result into JSON 474 | std::ofstream json_export("experiment-vendorC-summary.json"); 475 | 476 | nlohmann::json meta; 477 | meta["start"] = start_ts; 478 | meta["end"] = get_timestamp_sec(); 479 | meta["memory_config"] = DRAMAddr::get_memcfg_json(); 480 | meta["dimm_id"] = program_args.dimm_id; 481 | meta["seed"] = start_ts; 482 | 483 | root["metadata"] = meta; 484 | root["results"] = all_results; 485 | 486 | json_export << root << std::endl; 487 | json_export.close(); 488 | #endif 489 | } 490 | -------------------------------------------------------------------------------- /src/Fuzzer/Aggressor.cpp: -------------------------------------------------------------------------------- 1 | #include "Fuzzer/Aggressor.hpp" 2 | 3 | #include 4 | 5 | std::string Aggressor::to_string() const { 6 | if (id==ID_PLACEHOLDER_AGG) return "EMPTY"; 7 | std::stringstream ss; 8 | ss << "agg" << std::setfill('0') << std::setw(2) << id; 9 | return ss.str(); 10 | } 11 | 12 | #ifdef ENABLE_JSON 13 | 14 | void to_json(nlohmann::json &j, const Aggressor &p) { 15 | j = nlohmann::json{{"id", p.id}}; 16 | } 17 | 18 | void from_json(const nlohmann::json &j, Aggressor &p) { 19 | j.at("id").get_to(p.id); 20 | } 21 | 22 | #endif 23 | 24 | std::vector Aggressor::get_agg_ids(const std::vector &aggressors) { 25 | std::vector agg_ids; 26 | agg_ids.reserve(aggressors.size()); 27 | for (const auto &agg : aggressors) agg_ids.push_back(agg.id); 28 | return agg_ids; 29 | } 30 | 31 | Aggressor::Aggressor(int id) : id(id) {} 32 | 33 | std::vector Aggressor::create_aggressors(const std::vector &agg_ids) { 34 | std::vector result_list; 35 | std::unordered_map aggId_to_aggressor_map; 36 | 37 | for (const auto &id : agg_ids) { 38 | if (aggId_to_aggressor_map.count(id)==0) { 39 | aggId_to_aggressor_map[id] = Aggressor(id); 40 | } 41 | result_list.push_back(aggId_to_aggressor_map.at(id)); 42 | } 43 | 44 | return result_list; 45 | } 46 | 47 | Aggressor &Aggressor::operator=(const Aggressor &other) { 48 | if (this == &other) return *this; 49 | this->id = other.id; 50 | return *this; 51 | } 52 | -------------------------------------------------------------------------------- /src/Fuzzer/AggressorAccessPattern.cpp: -------------------------------------------------------------------------------- 1 | #include "Fuzzer/AggressorAccessPattern.hpp" 2 | 3 | #ifdef ENABLE_JSON 4 | 5 | void to_json(nlohmann::json &j, const AggressorAccessPattern &p) { 6 | j = nlohmann::json{{"frequency", p.frequency}, 7 | {"amplitude", p.amplitude}, 8 | {"start_offset", p.start_offset}, 9 | {"aggressors", Aggressor::get_agg_ids(p.aggressors)} 10 | }; 11 | } 12 | 13 | void from_json(const nlohmann::json &j, AggressorAccessPattern &p) { 14 | j.at("frequency").get_to(p.frequency); 15 | j.at("amplitude").get_to(p.amplitude); 16 | j.at("start_offset").get_to(p.start_offset); 17 | std::vector agg_ids; 18 | j.at("aggressors").get_to(agg_ids); 19 | p.aggressors = Aggressor::create_aggressors(agg_ids); 20 | } 21 | 22 | #endif 23 | 24 | bool operator==(const AggressorAccessPattern &lhs, const AggressorAccessPattern &rhs) { 25 | return 26 | lhs.frequency==rhs.frequency && 27 | lhs.amplitude==rhs.amplitude && 28 | lhs.start_offset==rhs.start_offset && 29 | // actually we should compare the aggressors here but we skip that because it would require us to implement a 30 | // comparison function for Aggressor too 31 | lhs.aggressors.size()==rhs.aggressors.size(); 32 | } 33 | 34 | std::string AggressorAccessPattern::to_string() const { 35 | // creates a string of aggressor IDs like (id1, id2, ...) 36 | std::stringstream aggs; 37 | aggs << "("; 38 | for (const auto &agg : aggressors) { 39 | aggs << agg.id; 40 | if (agg.id!=(*aggressors.rbegin()).id) aggs << ","; 41 | } 42 | aggs << "): "; 43 | 44 | std::stringstream ss; 45 | ss << aggs.str() << frequency << ", " << amplitude << "⨉, " << start_offset; 46 | return ss.str(); 47 | } 48 | 49 | AggressorAccessPattern &AggressorAccessPattern::operator=(const AggressorAccessPattern &other) { 50 | if (this == &other) return *this; 51 | this->frequency = other.frequency; 52 | this->amplitude = other.amplitude; 53 | this->start_offset = other.start_offset; 54 | this->aggressors = other.aggressors; 55 | return *this; 56 | } 57 | -------------------------------------------------------------------------------- /src/Fuzzer/BitFlip.cpp: -------------------------------------------------------------------------------- 1 | #include "Fuzzer/BitFlip.hpp" 2 | 3 | #include 4 | 5 | #ifdef ENABLE_JSON 6 | 7 | #include 8 | #include 9 | 10 | void to_json(nlohmann::json &j, const BitFlip &p) { 11 | std::stringstream addr; 12 | addr << "0x" << std::hex << (uint64_t)p.address.to_virt(); 13 | j = nlohmann::json{{"dram_addr", p.address}, 14 | {"bitmask", p.bitmask}, 15 | {"data", p.corrupted_data}, 16 | {"observed_at", p.observation_time}, 17 | {"addr", addr.str()}, 18 | {"page_offset", (uint64_t)p.address.to_virt()%getpagesize()} 19 | }; 20 | } 21 | 22 | void from_json(const nlohmann::json &j, BitFlip &p) { 23 | j.at("dram_addr").get_to(p.address); 24 | j.at("bitmask").get_to(p.bitmask); 25 | j.at("data").get_to(p.corrupted_data); 26 | // to preserve backward-compatibility 27 | if (j.contains("observed_at")) { 28 | j.at("observed_at").get_to(p.observation_time); 29 | } else { 30 | p.observation_time = 0; 31 | } 32 | } 33 | 34 | #endif 35 | 36 | BitFlip::BitFlip(const DRAMAddr &address, uint8_t flips_bitmask, uint8_t corrupted_data) 37 | : address(address), bitmask(flips_bitmask), corrupted_data(corrupted_data) { 38 | observation_time = time(nullptr); 39 | } 40 | 41 | BitFlip::BitFlip() { 42 | observation_time = time(nullptr); 43 | } 44 | 45 | size_t BitFlip::count_z2o_corruptions() const { 46 | const auto bitmask_nbits = sizeof(bitmask)*8; 47 | std::bitset mask_bits(bitmask); 48 | const auto data_nbits = sizeof(corrupted_data)*8; 49 | std::bitset data_bits(corrupted_data); 50 | // we assume that both (corrupted_data, bitmask) have the same no. of bits 51 | size_t z2o_corruptions = 0; 52 | for (size_t i = 0; i < mask_bits.size(); ++i) { 53 | if (mask_bits[i]==1 && data_bits[i]==1) 54 | z2o_corruptions++; 55 | } 56 | return z2o_corruptions; 57 | } 58 | 59 | size_t BitFlip::count_o2z_corruptions() const { 60 | const auto bitmask_nbits = sizeof(bitmask)*8; 61 | std::bitset mask_bits(bitmask); 62 | const auto data_nbits = sizeof(corrupted_data)*8; 63 | std::bitset data_bits(corrupted_data); 64 | // we assume that both (corrupted_data, bitmask) have the same no. of bits 65 | size_t o2z_corruptions = 0; 66 | for (size_t i = 0; i < mask_bits.size(); ++i) { 67 | if (mask_bits[i]==1 && data_bits[i]==0) 68 | o2z_corruptions++; 69 | } 70 | return o2z_corruptions; 71 | } 72 | 73 | size_t BitFlip::count_bit_corruptions() const { 74 | auto n = bitmask; 75 | unsigned int count = 0; 76 | // based on Brian Kernighan's algorithm (https://www.geeksforgeeks.org/count-set-bits-in-an-integer/) that counts the 77 | // number of set bits of an integer in O(log n) 78 | while (n > 0) { 79 | n &= (n - 1); 80 | count++; 81 | } 82 | return count; 83 | } 84 | -------------------------------------------------------------------------------- /src/Fuzzer/CodeJitter.cpp: -------------------------------------------------------------------------------- 1 | #include "Fuzzer/CodeJitter.hpp" 2 | 3 | CodeJitter::CodeJitter() 4 | : pattern_sync_each_ref(false), 5 | flushing_strategy(FLUSHING_STRATEGY::EARLIEST_POSSIBLE), 6 | fencing_strategy(FENCING_STRATEGY::LATEST_POSSIBLE), 7 | total_activations(5000000), 8 | num_aggs_for_sync(2) { 9 | #ifdef ENABLE_JITTING 10 | logger = new asmjit::StringLogger; 11 | #endif 12 | } 13 | 14 | CodeJitter::~CodeJitter() { 15 | cleanup(); 16 | } 17 | 18 | void CodeJitter::cleanup() { 19 | #ifdef ENABLE_JITTING 20 | if (fn!=nullptr) { 21 | runtime.release(fn); 22 | fn = nullptr; 23 | } 24 | if (logger!=nullptr) { 25 | delete logger; 26 | logger = nullptr; 27 | } 28 | #endif 29 | } 30 | 31 | int CodeJitter::hammer_pattern(FuzzingParameterSet &fuzzing_parameters, bool verbose) { 32 | if (fn==nullptr) { 33 | Logger::log_error("Skipping hammering pattern as pattern could not be created successfully."); 34 | return -1; 35 | } 36 | if (verbose) Logger::log_info("Hammering the last generated pattern."); 37 | int total_sync_acts = fn(); 38 | 39 | if (verbose) { 40 | Logger::log_info("Synchronization stats:"); 41 | Logger::log_data(format_string("Total sync acts: %d", total_sync_acts)); 42 | 43 | const auto total_acts_pattern = fuzzing_parameters.get_total_acts_pattern(); 44 | auto pattern_rounds = fuzzing_parameters.get_hammering_total_num_activations()/total_acts_pattern; 45 | auto acts_per_pattern_round = pattern_sync_each_ref 46 | // sync after each num_acts_per_tREFI: computes how many activations are necessary 47 | // by taking our pattern's length into account 48 | ? (total_acts_pattern/fuzzing_parameters.get_num_activations_per_t_refi()) 49 | // beginning and end of pattern; for simplicity we only consider the end of the 50 | // pattern here (=1) as this is the sync that is repeated after each hammering run 51 | : 1; 52 | auto num_synced_refs = pattern_rounds*acts_per_pattern_round; 53 | Logger::log_data(format_string("Number of pattern reps while hammering: %d", pattern_rounds)); 54 | Logger::log_data(format_string("Number of total synced REFs (est.): %d", num_synced_refs)); 55 | Logger::log_data(format_string("Avg. number of acts per sync: %d", total_sync_acts/num_synced_refs)); 56 | } 57 | 58 | return total_sync_acts; 59 | } 60 | 61 | void CodeJitter::jit_strict(int num_acts_per_trefi, 62 | FLUSHING_STRATEGY flushing, 63 | FENCING_STRATEGY fencing, 64 | const std::vector &aggressor_pairs, 65 | bool sync_each_ref, 66 | int num_aggressors_for_sync, 67 | int total_num_activations) { 68 | 69 | // this is used by hammer_pattern but only for some stats calculations 70 | this->pattern_sync_each_ref = sync_each_ref; 71 | this->flushing_strategy = flushing; 72 | this->fencing_strategy = fencing; 73 | this->total_activations = total_num_activations; 74 | this->num_aggs_for_sync = num_aggressors_for_sync; 75 | 76 | // decides the number of aggressors of the beginning/end to be used for detecting the refresh interval 77 | // e.g., 10 means use the first 10 aggs in aggressor_pairs (repeatedly, if necessary) to detect the start refresh 78 | // (i.e., at the beginning) and the last 10 aggs in aggressor_pairs to detect the last refresh (at the end); 79 | const auto NUM_TIMED_ACCESSES = num_aggressors_for_sync; 80 | 81 | // check whether the NUM_TIMED_ACCESSES value works at all - otherwise just return from this function 82 | // this is safe as hammer_pattern checks whether there's a valid jitted function 83 | if (static_cast(NUM_TIMED_ACCESSES) > aggressor_pairs.size()) { 84 | Logger::log_error(format_string("NUM_TIMED_ACCESSES (%d) is larger than #aggressor_pairs (%zu).", 85 | NUM_TIMED_ACCESSES, 86 | aggressor_pairs.size())); 87 | return; 88 | } 89 | 90 | // some sanity checks 91 | if (fn!=nullptr) { 92 | Logger::log_error( 93 | "Function pointer is not NULL, cannot continue jitting code without leaking memory. Did you forget to call cleanup() before?"); 94 | exit(1); 95 | } 96 | 97 | #ifdef ENABLE_JITTING 98 | asmjit::CodeHolder code; 99 | code.init(runtime.environment()); 100 | code.setLogger(logger); 101 | asmjit::x86::Assembler a(&code); 102 | 103 | asmjit::Label while1_begin = a.newLabel(); 104 | asmjit::Label while1_end = a.newLabel(); 105 | asmjit::Label for_begin = a.newLabel(); 106 | asmjit::Label for_end = a.newLabel(); 107 | 108 | // ==== here start's the actual program ==================================================== 109 | // The following JIT instructions are based on hammer_sync in blacksmith.cpp, git commit 624a6492. 110 | 111 | // ------- part 1: synchronize with the beginning of an interval --------------------------- 112 | 113 | // warmup 114 | for (int idx = 0; idx < NUM_TIMED_ACCESSES; idx++) { 115 | a.mov(asmjit::x86::rax, (uint64_t) aggressor_pairs[idx]); 116 | a.mov(asmjit::x86::rbx, asmjit::x86::ptr(asmjit::x86::rax)); 117 | } 118 | 119 | a.bind(while1_begin); 120 | // clflushopt addresses involved in sync 121 | for (int idx = 0; idx < NUM_TIMED_ACCESSES; idx++) { 122 | a.mov(asmjit::x86::rax, (uint64_t) aggressor_pairs[idx]); 123 | a.clflushopt(asmjit::x86::ptr(asmjit::x86::rax)); 124 | } 125 | a.mfence(); 126 | 127 | // retrieve timestamp 128 | a.rdtscp(); // result of rdtscp is in [edx:eax] 129 | a.lfence(); 130 | a.mov(asmjit::x86::ebx, asmjit::x86::eax); // discard upper 32 bits, store lower 32b in ebx for later 131 | 132 | // use first NUM_TIMED_ACCESSES addresses for sync 133 | for (int idx = 0; idx < NUM_TIMED_ACCESSES; idx++) { 134 | a.mov(asmjit::x86::rax, (uint64_t) aggressor_pairs[idx]); 135 | a.mov(asmjit::x86::rcx, asmjit::x86::ptr(asmjit::x86::rax)); 136 | } 137 | 138 | // if ((after - before) > 1000) break; 139 | a.rdtscp(); // result: edx:eax 140 | a.sub(asmjit::x86::eax, asmjit::x86::ebx); 141 | a.cmp(asmjit::x86::eax, (uint64_t) 1000); 142 | 143 | // depending on the cmp's outcome, jump out of loop or to the loop's beginning 144 | a.jg(while1_end); 145 | a.jmp(while1_begin); 146 | a.bind(while1_end); 147 | 148 | // ------- part 2: perform hammering --------------------------------------------------------------------------------- 149 | 150 | // initialize variables 151 | a.mov(asmjit::x86::rsi, total_num_activations); 152 | a.mov(asmjit::x86::edx, 0); // num activations counter 153 | 154 | a.bind(for_begin); 155 | a.cmp(asmjit::x86::rsi, 0); 156 | a.jle(for_end); 157 | 158 | // a map to keep track of aggressors that have been accessed before and need a fence before their next access 159 | std::unordered_map accessed_before; 160 | 161 | size_t cnt_total_activations = 0; 162 | 163 | // hammer each aggressor once 164 | for (int i = NUM_TIMED_ACCESSES; i < static_cast(aggressor_pairs.size()) - NUM_TIMED_ACCESSES; i++) { 165 | auto cur_addr = (uint64_t) aggressor_pairs[i]; 166 | 167 | if (accessed_before[cur_addr]) { 168 | // flush 169 | if (flushing==FLUSHING_STRATEGY::LATEST_POSSIBLE) { 170 | a.mov(asmjit::x86::rax, cur_addr); 171 | a.clflushopt(asmjit::x86::ptr(asmjit::x86::rax)); 172 | accessed_before[cur_addr] = false; 173 | } 174 | // fence to ensure flushing finished and defined order of aggressors is guaranteed 175 | if (fencing==FENCING_STRATEGY::LATEST_POSSIBLE) { 176 | a.mfence(); 177 | accessed_before[cur_addr] = false; 178 | } 179 | } 180 | 181 | // hammer 182 | a.mov(asmjit::x86::rax, cur_addr); 183 | a.mov(asmjit::x86::rcx, asmjit::x86::ptr(asmjit::x86::rax)); 184 | accessed_before[cur_addr] = true; 185 | a.dec(asmjit::x86::rsi); 186 | cnt_total_activations++; 187 | 188 | // flush 189 | if (flushing==FLUSHING_STRATEGY::EARLIEST_POSSIBLE) { 190 | a.mov(asmjit::x86::rax, cur_addr); 191 | a.clflushopt(asmjit::x86::ptr(asmjit::x86::rax)); 192 | } 193 | if (sync_each_ref 194 | && ((cnt_total_activations%num_acts_per_trefi)==0)) { 195 | std::vector aggs(aggressor_pairs.begin() + i, 196 | std::min(aggressor_pairs.begin() + i + NUM_TIMED_ACCESSES, aggressor_pairs.end())); 197 | sync_ref(aggs, a); 198 | } 199 | } 200 | 201 | // fences -> ensure that aggressors are not interleaved, i.e., we access aggressors always in same order 202 | a.mfence(); 203 | 204 | // ------- part 3: synchronize with the end ----------------------------------------------------------------------- 205 | std::vector last_aggs(aggressor_pairs.end() - NUM_TIMED_ACCESSES, aggressor_pairs.end()); 206 | sync_ref(last_aggs, a); 207 | 208 | a.jmp(for_begin); 209 | a.bind(for_end); 210 | 211 | // now move our counter for no. of activations in the end of interval sync. to the 1st output register %eax 212 | a.mov(asmjit::x86::eax, asmjit::x86::edx); 213 | a.ret(); // this is ESSENTIAL otherwise execution of jitted code creates a segfault 214 | 215 | // add the generated code to the runtime. 216 | asmjit::Error err = runtime.add(&fn, &code); 217 | if (err) throw std::runtime_error("[-] Error occurred while jitting code. Aborting execution!"); 218 | 219 | // uncomment the following line to see the jitted ASM code 220 | // printf("[DEBUG] asmjit logger content:\n%s\n", logger->corrupted_data()); 221 | #endif 222 | #ifndef ENABLE_JITTING 223 | Logger::log_error("Cannot do code jitting. Set option ENABLE_JITTING to ON in CMakeLists.txt and do a rebuild."); 224 | #endif 225 | } 226 | 227 | #ifdef ENABLE_JITTING 228 | void CodeJitter::sync_ref(const std::vector &aggressor_pairs, asmjit::x86::Assembler &assembler) { 229 | asmjit::Label wbegin = assembler.newLabel(); 230 | asmjit::Label wend = assembler.newLabel(); 231 | 232 | assembler.bind(wbegin); 233 | 234 | assembler.mfence(); 235 | assembler.lfence(); 236 | 237 | assembler.push(asmjit::x86::edx); 238 | assembler.rdtscp(); // result of rdtscp is in [edx:eax] 239 | // discard upper 32 bits and store lower 32 bits in ebx to compare later 240 | assembler.mov(asmjit::x86::ebx, asmjit::x86::eax); 241 | assembler.lfence(); 242 | assembler.pop(asmjit::x86::edx); 243 | 244 | for (auto agg : aggressor_pairs) { 245 | // flush 246 | assembler.mov(asmjit::x86::rax, (uint64_t) agg); 247 | assembler.clflushopt(asmjit::x86::ptr(asmjit::x86::rax)); 248 | 249 | // access 250 | assembler.mov(asmjit::x86::rax, (uint64_t) agg); 251 | assembler.mov(asmjit::x86::rcx, asmjit::x86::ptr(asmjit::x86::rax)); 252 | 253 | // we do not deduct the sync aggressors from the total number of activations because the number of sync activations 254 | // varies for different patterns; if we deduct it from the total number of activations, we cannot ensure anymore 255 | // that we are hammering long enough/as many times as needed to trigger bit flips 256 | // assembler.dec(asmjit::x86::rsi); 257 | 258 | // update counter that counts the number of activation in the trailing synchronization 259 | assembler.inc(asmjit::x86::edx); 260 | } 261 | 262 | assembler.push(asmjit::x86::edx); 263 | assembler.rdtscp(); // result: edx:eax 264 | assembler.lfence(); 265 | assembler.pop(asmjit::x86::edx); 266 | 267 | // if ((after - before) > 1000) break; 268 | assembler.sub(asmjit::x86::eax, asmjit::x86::ebx); 269 | assembler.cmp(asmjit::x86::eax, (uint64_t) 1000); 270 | 271 | // depending on the cmp's outcome... 272 | assembler.jg(wend); // ... jump out of the loop 273 | assembler.jmp(wbegin); // ... or jump back to the loop's beginning 274 | assembler.bind(wend); 275 | } 276 | #endif 277 | 278 | #ifdef ENABLE_JSON 279 | 280 | void to_json(nlohmann::json &j, const CodeJitter &p) { 281 | j = {{"pattern_sync_each_ref", p.pattern_sync_each_ref}, 282 | {"flushing_strategy", to_string(p.flushing_strategy)}, 283 | {"fencing_strategy", to_string(p.fencing_strategy)}, 284 | {"total_activations", p.total_activations}, 285 | {"num_aggs_for_sync", p.num_aggs_for_sync} 286 | }; 287 | } 288 | 289 | void from_json(const nlohmann::json &j, CodeJitter &p) { 290 | j.at("pattern_sync_each_ref").get_to(p.pattern_sync_each_ref); 291 | from_string(j.at("flushing_strategy"), p.flushing_strategy); 292 | from_string(j.at("fencing_strategy"), p.fencing_strategy); 293 | j.at("total_activations").get_to(p.total_activations); 294 | j.at("num_aggs_for_sync").get_to(p.num_aggs_for_sync); 295 | } 296 | 297 | #endif 298 | -------------------------------------------------------------------------------- /src/Fuzzer/FuzzingParameterSet.cpp: -------------------------------------------------------------------------------- 1 | #include "Fuzzer/FuzzingParameterSet.hpp" 2 | 3 | #include 4 | 5 | #ifdef ENABLE_JSON 6 | #include 7 | #endif 8 | 9 | #include "GlobalDefines.hpp" 10 | 11 | FuzzingParameterSet::FuzzingParameterSet(int measured_num_acts_per_ref) : /* NOLINT */ 12 | flushing_strategy(FLUSHING_STRATEGY::EARLIEST_POSSIBLE), 13 | fencing_strategy(FENCING_STRATEGY::LATEST_POSSIBLE) { 14 | std::random_device rd; 15 | gen = std::mt19937(rd()); // standard mersenne_twister_engine seeded with some random data 16 | 17 | set_num_activations_per_t_refi(measured_num_acts_per_ref); 18 | 19 | // call randomize_parameters once to initialize static values 20 | randomize_parameters(false); 21 | } 22 | 23 | void FuzzingParameterSet::print_static_parameters() const { 24 | Logger::log_info("Printing static hammering parameters:"); 25 | Logger::log_data(format_string("agg_intra_distance: %d", agg_intra_distance)); 26 | Logger::log_data(format_string("N_sided dist.: %s", get_dist_string().c_str())); 27 | Logger::log_data(format_string("hammering_total_num_activations: %d", hammering_total_num_activations)); 28 | Logger::log_data(format_string("max_row_no: %d", max_row_no)); 29 | } 30 | 31 | void FuzzingParameterSet::print_semi_dynamic_parameters() const { 32 | Logger::log_info("Printing pattern-specific fuzzing parameters:"); 33 | Logger::log_data(format_string("num_aggressors: %d", num_aggressors)); 34 | Logger::log_data(format_string("num_refresh_intervals: %d", num_refresh_intervals)); 35 | Logger::log_data(format_string("total_acts_pattern: %zu", total_acts_pattern)); 36 | Logger::log_data(format_string("base_period: %d", base_period)); 37 | Logger::log_data(format_string("agg_inter_distance: %d", agg_inter_distance)); 38 | Logger::log_data(format_string("flushing_strategy: %s", to_string(flushing_strategy).c_str())); 39 | Logger::log_data(format_string("fencing_strategy: %s", to_string(fencing_strategy).c_str())); 40 | } 41 | 42 | void FuzzingParameterSet::print_dynamic_parameters(const int bank, bool seq_addresses, int start_row) { 43 | Logger::log_info("Printing DRAM address-related fuzzing parameters:"); 44 | Logger::log_data(format_string("bank_no: %d", bank)); 45 | Logger::log_data(format_string("use_seq_addresses: %s", (seq_addresses ? "true" : "false"))); 46 | Logger::log_data(format_string("start_row: %d", start_row)); 47 | } 48 | 49 | void FuzzingParameterSet::print_dynamic_parameters2(bool sync_at_each_ref, 50 | int wait_until_hammering_us, 51 | int num_aggs_for_sync) { 52 | Logger::log_info("Printing code jitting-related fuzzing parameters:"); 53 | Logger::log_data(format_string("sync_each_ref: %s", (sync_at_each_ref ? "true" : "false"))); 54 | Logger::log_data(format_string("wait_until_start_hammering_refs: %d", wait_until_hammering_us)); 55 | Logger::log_data(format_string("num_aggressors_for_sync: %d", num_aggs_for_sync)); 56 | } 57 | 58 | void FuzzingParameterSet::set_distribution(Range range_N_sided, std::unordered_map probabilities) { 59 | std::vector dd; 60 | for (int i = 0; i <= range_N_sided.max; i += 1) { 61 | dd.push_back((probabilities.count(i) > 0) ? probabilities.at(i) : (int) 0); 62 | } 63 | N_sided_probabilities = std::discrete_distribution(dd.begin(), dd.end()); 64 | } 65 | 66 | int FuzzingParameterSet::get_random_even_divisior(int n, int min_value) { 67 | std::vector divisors; 68 | for (auto i = 1; i <= sqrt(n); i++) { 69 | if (n%i==0) { 70 | if ((n/i)==1 && (i%2)==0) { 71 | divisors.push_back(i); 72 | } else { 73 | if (i%2==0) divisors.push_back(i); 74 | if ((n/i)%2==0) divisors.push_back(n/i); 75 | } 76 | } 77 | } 78 | 79 | std::shuffle(divisors.begin(), divisors.end(), gen); 80 | for (const auto &e : divisors) { 81 | if (e >= min_value) return e; 82 | } 83 | 84 | Logger::log_error(format_string("Could not determine a random even divisor of n=%d. Using n.", n)); 85 | return n; 86 | } 87 | 88 | void FuzzingParameterSet::set_num_activations_per_t_refi(int num_activations_per_t_refi) { 89 | // make sure that the number of activations per tREFI is even: this is required for proper pattern generation 90 | this->num_activations_per_tREFI = (num_activations_per_t_refi/2)*2; 91 | } 92 | 93 | void FuzzingParameterSet::randomize_parameters(bool print) { 94 | if (num_activations_per_tREFI <= 0) { 95 | Logger::log_error( 96 | "Called FuzzingParameterSet::randomize_parameters without valid num_activations_per_tREFI."); 97 | return; 98 | } 99 | 100 | if (print) 101 | Logger::log_info("Randomizing fuzzing parameters."); 102 | 103 | // █████████ DYNAMIC FUZZING PARAMETERS ████████████████████████████████████████████████████ 104 | // are randomized for each added aggressor 105 | 106 | // [derivable from aggressors in AggressorAccessPattern] 107 | // note that in PatternBuilder::generate also uses 1-sided aggressors in case that the end of a base period needs to 108 | // be filled up 109 | N_sided = Range(1, 2); 110 | 111 | // [exported as part of AggressorAccessPattern] 112 | // choosing as max 'num_activations_per_tREFI/N_sided.min' allows hammering an agg pair for a whole REF interval; 113 | // we set the upper bound in dependent of N_sided.min but need to (manually) exclude 1 because an amplitude>1 does 114 | // not make sense for a single aggressor 115 | amplitude = Range(1, num_activations_per_tREFI/2); 116 | 117 | // == are randomized for each different set of addresses a pattern is probed with ====== 118 | 119 | // [derivable from aggressor_to_addr (DRAMAddr) in PatternAddressMapper] 120 | use_sequential_aggressors = Range(0, 1); 121 | 122 | // sync_each_ref = 1 means that we sync after every refresh interval, otherwise we only sync after hammering 123 | // the whole pattern (which may consists of more than one REF interval) 124 | sync_each_ref = Range(0, 0); 125 | 126 | // [CANNOT be derived from anywhere else - but does not fit anywhere: will print to stdout only, not include in json] 127 | wait_until_start_hammering_refs = Range(10, 128); 128 | 129 | // [CANNOT be derived from anywhere else - but does not fit anywhere: will print to stdout only, not include in json] 130 | num_aggressors_for_sync = Range(2, 2); 131 | 132 | // [derivable from aggressor_to_addr (DRAMAddr) in PatternAddressMapper] 133 | start_row = Range(0, 2048); 134 | 135 | // █████████ STATIC FUZZING PARAMETERS ████████████████████████████████████████████████████ 136 | // fix values/formulas that must be configured before running this program 137 | 138 | // [derivable from aggressor_to_addr (DRAMAddr) in PatternAddressMapper] 139 | agg_intra_distance = Range(2, 2).get_random_number(gen); 140 | 141 | // TODO: make this a dynamic fuzzing parameter that is randomized for each probed address set 142 | // [CANNOT be derived from anywhere else - but does not fit anywhere: will print to stdout only, not include in json] 143 | // auto strategy = get_valid_strategy_pair(); 144 | flushing_strategy = FLUSHING_STRATEGY::EARLIEST_POSSIBLE; 145 | fencing_strategy = FENCING_STRATEGY::LATEST_POSSIBLE; 146 | 147 | // [CANNOT be derived from anywhere else - must explicitly be exported] 148 | // if N_sided = (1,2) and this is {{1,2},{2,8}}, then this translates to: 149 | // pick a 1-sided pair with 20% probability and a 2-sided pair with 80% probability 150 | // Note if using N_sided = Range(min, max, step), then the X values provided here as (X, Y) correspond to 151 | // the multiplier (e.g., multiplier's minimum is min/step and multiplier's maximum is max/step) 152 | set_distribution(N_sided, {{1, 20}, {2, 80}}); 153 | 154 | // [CANNOT be derived from anywhere else - must explicitly be exported] 155 | // hammering_total_num_activations is derived as follow: 156 | // REF interval: 7.8 μs (tREFI), retention time: 64 ms => about 8k REFs per refresh window 157 | // num_activations_per_tREFI ≈100 => 8k * 100 ≈ 8M activations and we hammer for 5M acts. 158 | hammering_total_num_activations = 5000000; 159 | 160 | max_row_no = 8192; 161 | 162 | // █████████ SEMI-DYNAMIC FUZZING PARAMETERS ████████████████████████████████████████████████████ 163 | // are only randomized once when calling this function 164 | 165 | // [derivable from aggressors in AggressorAccessPattern, also not very expressive because different agg IDs can be 166 | // mapped to the same DRAM address] 167 | num_aggressors = Range(8, 96).get_random_number(gen); 168 | 169 | // [included in HammeringPattern] 170 | // it is important that this is a power of two, otherwise the aggressors in the pattern will not respect frequencies 171 | num_refresh_intervals = static_cast(std::pow(2, Range(0, 4).get_random_number(gen))); 172 | 173 | // [included in HammeringPattern] 174 | total_acts_pattern = num_activations_per_tREFI*num_refresh_intervals; 175 | 176 | // [included in HammeringPattern] 177 | base_period = get_random_even_divisior(total_acts_pattern, 4); 178 | 179 | // [derivable from aggressor_to_addr (DRAMAddr) in PatternAddressMapper] 180 | agg_inter_distance = Range(1, 24).get_random_number(gen); 181 | 182 | if (print) print_semi_dynamic_parameters(); 183 | } 184 | 185 | int FuzzingParameterSet::get_max_row_no() const { 186 | return max_row_no; 187 | } 188 | 189 | std::string FuzzingParameterSet::get_dist_string() const { 190 | std::stringstream ss; 191 | double total = 0; 192 | std::vector probs = N_sided_probabilities.probabilities(); 193 | for (const auto &d : probs) total += d; 194 | for (size_t i = 0; i < probs.size(); ++i) { 195 | if (probs[i]==0) continue; 196 | ss << i << "-sided: " << probs[i] << "/" << total << ", "; 197 | } 198 | return ss.str(); 199 | } 200 | 201 | int FuzzingParameterSet::get_hammering_total_num_activations() const { 202 | return hammering_total_num_activations; 203 | } 204 | 205 | int FuzzingParameterSet::get_num_aggressors() const { 206 | return num_aggressors; 207 | } 208 | 209 | int FuzzingParameterSet::get_random_N_sided() { 210 | return N_sided_probabilities(gen); 211 | } 212 | 213 | int FuzzingParameterSet::get_random_N_sided(int upper_bound_max) { 214 | if (N_sided.max > upper_bound_max) { 215 | return Range(N_sided.min, upper_bound_max).get_random_number(gen); 216 | } 217 | return get_random_N_sided(); 218 | } 219 | 220 | bool FuzzingParameterSet::get_random_use_seq_addresses() { 221 | return (bool) (use_sequential_aggressors.get_random_number(gen)); 222 | } 223 | 224 | int FuzzingParameterSet::get_total_acts_pattern() const { 225 | return total_acts_pattern; 226 | } 227 | 228 | int FuzzingParameterSet::get_base_period() const { 229 | return base_period; 230 | } 231 | 232 | int FuzzingParameterSet::get_num_base_periods() const { 233 | return (int)(get_total_acts_pattern()/(size_t)get_base_period()); 234 | } 235 | 236 | int FuzzingParameterSet::get_agg_intra_distance() { 237 | return agg_intra_distance; 238 | } 239 | 240 | int FuzzingParameterSet::get_agg_inter_distance() const { 241 | return agg_inter_distance; 242 | } 243 | 244 | int FuzzingParameterSet::get_random_amplitude(int max) { 245 | return Range<>(amplitude.min, std::min(amplitude.max, max)).get_random_number(gen); 246 | } 247 | 248 | int FuzzingParameterSet::get_random_wait_until_start_hammering_us() { 249 | // each REF interval has a length of 7.8 us 250 | return static_cast(static_cast(wait_until_start_hammering_refs.get_random_number(gen)) * 7.8); 251 | } 252 | 253 | bool FuzzingParameterSet::get_random_sync_each_ref() { 254 | return (bool) (sync_each_ref.get_random_number(gen)); 255 | } 256 | 257 | int FuzzingParameterSet::get_num_activations_per_t_refi() const { 258 | return num_activations_per_tREFI; 259 | } 260 | 261 | int FuzzingParameterSet::get_random_num_aggressors_for_sync() { 262 | return num_aggressors_for_sync.get_random_number(gen); 263 | } 264 | 265 | int FuzzingParameterSet::get_random_start_row() { 266 | return start_row.get_random_number(gen); 267 | } 268 | 269 | int FuzzingParameterSet::get_num_refresh_intervals() const { 270 | return num_refresh_intervals; 271 | } 272 | 273 | void FuzzingParameterSet::set_total_acts_pattern(int pattern_total_acts) { 274 | FuzzingParameterSet::total_acts_pattern = pattern_total_acts; 275 | } 276 | 277 | void FuzzingParameterSet::set_hammering_total_num_activations(int hammering_total_acts) { 278 | FuzzingParameterSet::hammering_total_num_activations = hammering_total_acts; 279 | } 280 | 281 | void FuzzingParameterSet::set_agg_intra_distance(int agg_intra_dist) { 282 | FuzzingParameterSet::agg_intra_distance = agg_intra_dist; 283 | } 284 | 285 | void FuzzingParameterSet::set_agg_inter_distance(int agg_inter_dist) { 286 | FuzzingParameterSet::agg_inter_distance = agg_inter_dist; 287 | } 288 | 289 | void FuzzingParameterSet::set_use_sequential_aggressors(const Range &use_seq_addresses) { 290 | FuzzingParameterSet::use_sequential_aggressors = use_seq_addresses; 291 | } 292 | -------------------------------------------------------------------------------- /src/Fuzzer/HammeringPattern.cpp: -------------------------------------------------------------------------------- 1 | #include "Fuzzer/FuzzingParameterSet.hpp" 2 | #include "Fuzzer/HammeringPattern.hpp" 3 | 4 | #ifdef ENABLE_JSON 5 | 6 | void to_json(nlohmann::json &j, const HammeringPattern &p) { 7 | j = nlohmann::json{{"id", p.instance_id}, 8 | {"base_period", p.base_period}, 9 | {"max_period", p.max_period}, 10 | {"total_activations", p.total_activations}, 11 | {"num_refresh_intervals", p.num_refresh_intervals}, 12 | {"access_ids", Aggressor::get_agg_ids(p.aggressors)}, 13 | {"agg_access_patterns", p.agg_access_patterns}, 14 | {"address_mappings", p.address_mappings}, 15 | {"is_location_dependent", p.is_location_dependent} 16 | }; 17 | } 18 | 19 | void from_json(const nlohmann::json &j, HammeringPattern &p) { 20 | j.at("id").get_to(p.instance_id); 21 | j.at("base_period").get_to(p.base_period); 22 | j.at("max_period").get_to(p.max_period); 23 | j.at("total_activations").get_to(p.total_activations); 24 | j.at("num_refresh_intervals").get_to(p.num_refresh_intervals); 25 | j.at("is_location_dependent").get_to(p.is_location_dependent); 26 | 27 | std::vector agg_ids; 28 | j.at("access_ids").get_to>(agg_ids); 29 | p.aggressors = Aggressor::create_aggressors(agg_ids); 30 | 31 | j.at("agg_access_patterns").get_to>(p.agg_access_patterns); 32 | j.at("address_mappings").get_to>(p.address_mappings); 33 | } 34 | 35 | #endif 36 | 37 | HammeringPattern::HammeringPattern(int base_period) 38 | : instance_id(uuid::gen_uuid()), 39 | base_period(base_period), 40 | max_period(0), 41 | total_activations(0), 42 | num_refresh_intervals(0), 43 | is_location_dependent(false) {} 44 | 45 | HammeringPattern::HammeringPattern() 46 | : instance_id(uuid::gen_uuid()), 47 | base_period(0), 48 | max_period(0), 49 | total_activations(0), 50 | num_refresh_intervals(0), 51 | is_location_dependent(false) {} 52 | 53 | int HammeringPattern::get_num_digits(size_t x) { 54 | return (x < 10 ? 1 : 55 | (x < 100 ? 2 : 56 | (x < 1000 ? 3 : 57 | (x < 10000 ? 4 : 58 | (x < 100000 ? 5 : 59 | (x < 1000000 ? 6 : 60 | (x < 10000000 ? 7 : 61 | (x < 100000000 ? 8 : 62 | (x < 1000000000 ? 9 : 10))))))))); 63 | } 64 | 65 | std::string HammeringPattern::get_pattern_text_repr() { 66 | std::stringstream ss; 67 | // depending on the number of aggressors, decide how many digits to use to represent each aggressor ID 68 | // we assume that if we have more than two AggressorAccessPatterns, then it is fully filled pattern and we can just 69 | // use the number of aggressors as a way to determine how many digits we need, otherwise it's probably a 70 | // empty/prefilled pattern and we assume 2 digits (as ID_PLACEHOLDER_AGG is -1) 71 | auto dwidth = (agg_access_patterns.size() > 2) ? get_num_digits(aggressors.size()) : 2; 72 | for (size_t i = 0; i < aggressors.size(); ++i) { 73 | // add a new line after each base period to make it easier to check a pattern's correctness 74 | if ((i%base_period)==0 && i > 0) ss << std::endl; 75 | ss << std::setfill('0') << std::setw(dwidth) << aggressors.at(i).id << " "; 76 | } 77 | return ss.str(); 78 | } 79 | 80 | std::string HammeringPattern::get_agg_access_pairs_text_repr() { 81 | std::stringstream ss; 82 | auto cnt = 0; 83 | for (const auto &agg_acc_pair : agg_access_patterns) { 84 | // add a new line after each three aggressor access patterns to avoid unintended text wrapping in terminal 85 | if (cnt > 0 && cnt%3==0) ss << std::endl; 86 | ss << std::setw(30) << std::setfill(' ') << std::left << agg_acc_pair.to_string(); 87 | cnt++; 88 | } 89 | return ss.str(); 90 | } 91 | 92 | AggressorAccessPattern &HammeringPattern::get_access_pattern_by_aggressor(Aggressor &agg) { 93 | // iterate over the AggressorAccessPatterns and return the *first* AggressorAccessPattern that has the given Aggressor 94 | // agg as its *first* Aggressor 95 | for (auto &aap : agg_access_patterns) { 96 | if (aap.aggressors[0].id==agg.id) return aap; 97 | } 98 | Logger::log_error(format_string("Could not find AggressorAccessPattern whose first aggressor has id %s.", agg.id)); 99 | exit(1); 100 | } 101 | 102 | PatternAddressMapper &HammeringPattern::get_most_effective_mapping() { 103 | if (address_mappings.empty()) { 104 | Logger::log_error("get_most_effective_mapping() failed: No mappings existing!"); 105 | exit(EXIT_FAILURE); 106 | } 107 | PatternAddressMapper &best_mapping = address_mappings.front(); 108 | for (const auto& mapping : address_mappings) { 109 | if (mapping.count_bitflips() > best_mapping.count_bitflips()) { 110 | best_mapping = mapping; 111 | } 112 | } 113 | return best_mapping; 114 | } 115 | 116 | void HammeringPattern::remove_mappings_without_bitflips() { 117 | for (auto it = address_mappings.begin(); it != address_mappings.end(); ) { 118 | if (it->count_bitflips() == 0) { 119 | it = address_mappings.erase(it); 120 | } else { 121 | it++; 122 | } 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/Fuzzer/PatternAddressMapper.cpp: -------------------------------------------------------------------------------- 1 | #include "Fuzzer/PatternAddressMapper.hpp" 2 | 3 | #include 4 | 5 | #include "GlobalDefines.hpp" 6 | #include "Utilities/Uuid.hpp" 7 | 8 | // initialize the bank_counter (static var) 9 | int PatternAddressMapper::bank_counter = 0; 10 | 11 | PatternAddressMapper::PatternAddressMapper() 12 | : instance_id(uuid::gen_uuid()) { /* NOLINT */ 13 | code_jitter = std::make_unique(); 14 | 15 | // standard mersenne_twister_engine seeded with rd() 16 | std::random_device rd; 17 | gen = std::mt19937(rd()); 18 | } 19 | 20 | void PatternAddressMapper::randomize_addresses(FuzzingParameterSet &fuzzing_params, 21 | const std::vector &agg_access_patterns, 22 | bool verbose) { 23 | // clear any already existing mapping 24 | aggressor_to_addr.clear(); 25 | 26 | // retrieve and then store randomized values as they should be the same for all added addresses 27 | // (store bank_no as field for get_random_nonaccessed_rows) 28 | bank_no = PatternAddressMapper::bank_counter; 29 | PatternAddressMapper::bank_counter = (PatternAddressMapper::bank_counter + 1) % NUM_BANKS; 30 | const bool use_seq_addresses = fuzzing_params.get_random_use_seq_addresses(); 31 | const int start_row = fuzzing_params.get_random_start_row(); 32 | if (verbose) FuzzingParameterSet::print_dynamic_parameters(bank_no, use_seq_addresses, start_row); 33 | 34 | auto cur_row = static_cast(start_row); 35 | 36 | // a set of DRAM rows that are already assigned to aggressors 37 | std::set occupied_rows; 38 | 39 | // we can make use here of the fact that each aggressor (identified by its ID) has a fixed N, that means, is 40 | // either accessed individually (N=1) or in a group of multiple aggressors (N>1; e.g., N=2 for double sided) 41 | // => if we already know the address of any aggressor in an aggressor access pattern, we already must know 42 | // addresses for all of them as we must have accessed all of them together before 43 | size_t row; 44 | int assignment_trial_cnt = 0; 45 | 46 | size_t total_abstract_aggs = 0; 47 | for (auto &acc_pattern : agg_access_patterns) total_abstract_aggs += acc_pattern.aggressors.size(); 48 | Logger::log_info(format_string("[PatternAddressMapper] Target no. of DRAM rows = %d", 49 | fuzzing_params.get_num_aggressors())); 50 | Logger::log_info(format_string("[PatternAddressMapper] Aggressors in AggressorAccessPattern = %d", 51 | total_abstract_aggs)); 52 | 53 | // probability to map aggressor to same row as another aggressor is already mapped to 54 | const int prob2 = 100 - ( 55 | static_cast( 56 | std::min(static_cast(fuzzing_params.get_num_aggressors())/static_cast(total_abstract_aggs),1.0)*100)); 57 | Logger::log_info(format_string("[PatternAddressMapper] Probability to map multiple AAPs to same DRAM row = %d", prob2)); 58 | 59 | std::random_device device; 60 | std::mt19937 engine(device()); // Seed the random number engine 61 | std::vector weights = std::vector({100-prob2, prob2}); 62 | std::discrete_distribution<> dist(weights.begin(), weights.end()); // Create the distribution 63 | 64 | Logger::log_info("[PatternAddressMapper] weights ="); 65 | for (const auto &w : weights) { 66 | Logger::log_data(format_string("%d", w)); 67 | } 68 | 69 | // Logger::log_info("Generating 1k random numbers to see how well distribution works "); 70 | // size_t cnt_0 = 0; 71 | // size_t cnt_1 = 0; 72 | // for (size_t i = 0; i < 1000; ++i) { 73 | // if (dist(engine) == 0) 74 | // cnt_0++; 75 | // else 76 | // cnt_1++; 77 | // } 78 | // Logger::log_info(format_string("cnt_0 = %lu", cnt_0)); 79 | // Logger::log_info(format_string("cnt_1 = %lu", cnt_1)); 80 | 81 | for (auto &acc_pattern : agg_access_patterns) { 82 | for (size_t i = 0; i < acc_pattern.aggressors.size(); i++) { 83 | const Aggressor ¤t_agg = acc_pattern.aggressors.at(i); 84 | 85 | // aggressor has existing row mapping OR 86 | if (aggressor_to_addr.count(current_agg.id) > 0) { 87 | row = aggressor_to_addr.at(current_agg.id).row; 88 | } else if (i > 0) { // aggressor is part of a n>1 aggressor tuple 89 | // we need to add the appropriate distance and cannot choose randomly 90 | auto last_addr = aggressor_to_addr.at(acc_pattern.aggressors.at(i - 1).id); 91 | // update cur_row for its next use (note that here it is: cur_row = last_addr.row) 92 | cur_row = (last_addr.row + (size_t) fuzzing_params.get_agg_intra_distance())%fuzzing_params.get_max_row_no(); 93 | row = cur_row; 94 | } else { 95 | // this is a new aggressor pair - we can choose where to place it 96 | // if use_seq_addresses is true, we use the last address and add the agg_inter_distance on top -> this is the 97 | // row of the next aggressor 98 | // if use_seq_addresses is false, we just pick any random row no. between [0, 8192] 99 | cur_row = (cur_row + (size_t) fuzzing_params.get_agg_inter_distance())%fuzzing_params.get_max_row_no(); 100 | 101 | bool map_to_existing_agg = dist(engine); 102 | if (map_to_existing_agg && !occupied_rows.empty()) { 103 | auto idx = Range(1, occupied_rows.size()).get_random_number(gen)-1; 104 | auto it = occupied_rows.begin(); 105 | while (idx--) it++; 106 | row = *it; 107 | } else { 108 | retry: 109 | row = use_seq_addresses ? 110 | cur_row : 111 | (Range(cur_row, cur_row + fuzzing_params.get_max_row_no()).get_random_number(gen) 112 | %fuzzing_params.get_max_row_no()); 113 | 114 | // check that we haven't assigned this address yet to another aggressor ID 115 | // if use_seq_addresses is True, the only way that the address is already assigned is that we already flipped 116 | // around the address range once (because of the modulo operator) so that retrying doesn't make sense 117 | if (!use_seq_addresses && occupied_rows.count(row) > 0) { 118 | assignment_trial_cnt++; 119 | if (assignment_trial_cnt < 7) goto retry; 120 | Logger::log_info(format_string( 121 | "Assigning unique addresses for Aggressor ID %d didn't succeed. Giving up after 3 trials.", 122 | current_agg.id)); 123 | } 124 | } 125 | } 126 | 127 | assignment_trial_cnt = 0; 128 | occupied_rows.insert(row); 129 | aggressor_to_addr.insert(std::make_pair(current_agg.id, DRAMAddr(static_cast(bank_no), row, 0))); 130 | } 131 | } 132 | 133 | // determine victim rows 134 | determine_victims(agg_access_patterns); 135 | 136 | // this works as sets are always ordered 137 | min_row = *occupied_rows.begin(); 138 | max_row = *occupied_rows.rbegin(); 139 | 140 | if (verbose) 141 | Logger::log_info(format_string("Found %d different aggressors (IDs) in pattern.", aggressor_to_addr.size())); 142 | } 143 | 144 | void PatternAddressMapper::determine_victims(const std::vector &agg_access_patterns) { 145 | // check ROW_THRESHOLD rows around the aggressors for flipped bits 146 | const int ROW_THRESHOLD = 5; 147 | // a set to make sure we add victims only once 148 | victim_rows.clear(); 149 | for (auto &acc_pattern : agg_access_patterns) { 150 | for (auto &agg : acc_pattern.aggressors) { 151 | 152 | if (aggressor_to_addr.count(agg.id)==0) { 153 | Logger::log_error(format_string("Could not find DRAMAddr mapping for Aggressor %d", agg.id)); 154 | exit(EXIT_FAILURE); 155 | } 156 | 157 | const auto dram_addr = aggressor_to_addr.at(agg.id); 158 | 159 | for (int delta_nrows = -ROW_THRESHOLD; delta_nrows <= ROW_THRESHOLD; ++delta_nrows) { 160 | auto cur_row_candidate = static_cast(dram_addr.row) + delta_nrows; 161 | 162 | // don't add the aggressor itself and ignore any non-existing (negative) row no. 163 | if (delta_nrows == 0 || cur_row_candidate < 0) 164 | continue; 165 | 166 | // ignore this victim if we already added it before 167 | auto victim_start = DRAMAddr(dram_addr.bank, static_cast(cur_row_candidate), 0); 168 | if (victim_rows.count(static_cast(victim_start.to_virt())) > 0) 169 | continue; 170 | 171 | victim_rows.insert(static_cast(victim_start.to_virt())); 172 | } 173 | } 174 | } 175 | } 176 | 177 | void PatternAddressMapper::export_pattern_internal( 178 | std::vector &aggressors, int base_period, 179 | std::vector &addresses, 180 | std::vector &rows) { 181 | 182 | bool invalid_aggs = false; 183 | std::stringstream pattern_str; 184 | for (size_t i = 0; i < aggressors.size(); ++i) { 185 | // for better visualization: add linebreak after each base period 186 | if (i!=0 && (i%base_period)==0) { 187 | pattern_str << std::endl; 188 | } 189 | 190 | // check whether this is a valid aggressor, i.e., the aggressor's ID != -1 191 | auto agg = aggressors[i]; 192 | if (agg.id==ID_PLACEHOLDER_AGG) { 193 | pattern_str << FC_RED << "-1" << F_RESET; 194 | invalid_aggs = true; 195 | continue; 196 | } 197 | 198 | // check whether there exists a aggressor ID -> address mapping before trying to access it 199 | if (aggressor_to_addr.count(agg.id)==0) { 200 | Logger::log_error(format_string("Could not find a valid address mapping for aggressor with ID %d.", agg.id)); 201 | continue; 202 | } 203 | 204 | // retrieve virtual address of current aggressor in pattern and add it to output vector 205 | addresses.push_back((volatile char *) aggressor_to_addr.at(agg.id).to_virt()); 206 | rows.push_back(static_cast(aggressor_to_addr.at(agg.id).row)); 207 | pattern_str << aggressor_to_addr.at(agg.id).row << " "; 208 | } 209 | 210 | // print string representation of pattern 211 | // Logger::log_info("Pattern filled by random DRAM rows:"); 212 | // Logger::log_data(pattern_str.str()); 213 | 214 | if (invalid_aggs) { 215 | Logger::log_error( 216 | "Found at least an invalid aggressor in the pattern. " 217 | "These aggressors were NOT added but printed to visualize their position."); 218 | Logger::log_data(pattern_str.str()); 219 | } 220 | } 221 | 222 | void PatternAddressMapper::export_pattern( 223 | std::vector &aggressors, int base_period, std::vector &addresses) { 224 | std::vector dummy_vector; 225 | export_pattern_internal(aggressors, base_period, addresses, dummy_vector); 226 | } 227 | 228 | void PatternAddressMapper::export_pattern( 229 | std::vector &aggressors, size_t base_period, int *rows, size_t max_rows) { 230 | std::vector rows_vector; 231 | std::vector dummy_vector; 232 | export_pattern_internal(aggressors, base_period, dummy_vector, rows_vector); 233 | 234 | if (max_rows < rows_vector.size()) { 235 | Logger::log_error("Exporting pattern failed! Given plain-C 'rows' array is too small to hold all aggressors."); 236 | } 237 | 238 | for (size_t i = 0; i < std::min(rows_vector.size(), max_rows); ++i) { 239 | rows[i] = rows_vector.at(i); 240 | } 241 | } 242 | 243 | std::string PatternAddressMapper::get_mapping_text_repr() { 244 | // get all keys (this is to not assume that keys always must start by 1) and sort them 245 | std::vector keys; 246 | for (auto const &map: aggressor_to_addr) keys.push_back(map.first); 247 | std::sort(keys.begin(), keys.end()); 248 | 249 | // iterate over keys and build text representation 250 | size_t cnt = 0; 251 | std::stringstream mapping_str; 252 | for (const auto &k : keys) { 253 | if (cnt > 0 && cnt%3==0) mapping_str << std::endl; 254 | mapping_str << std::setw(3) << std::left << k 255 | << " -> " 256 | << std::setw(13) << std::left << aggressor_to_addr.at(k).to_string_compact() 257 | << " "; 258 | cnt++; 259 | } 260 | 261 | return mapping_str.str(); 262 | } 263 | 264 | #ifdef ENABLE_JSON 265 | 266 | void to_json(nlohmann::json &j, const PatternAddressMapper &p) { 267 | if (p.code_jitter==nullptr) { 268 | Logger::log_error("CodeJitter is nullptr! Cannot serialize PatternAddressMapper without causing segfault."); 269 | return; 270 | } 271 | 272 | j = nlohmann::json{{"id", p.get_instance_id()}, 273 | {"aggressor_to_addr", p.aggressor_to_addr}, 274 | {"bit_flips", p.bit_flips}, 275 | {"min_row", p.min_row}, 276 | {"max_row", p.max_row}, 277 | {"bank_no", p.bank_no}, 278 | {"reproducibility_score", p.reproducibility_score}, 279 | {"code_jitter", *p.code_jitter} 280 | }; 281 | } 282 | 283 | void from_json(const nlohmann::json &j, PatternAddressMapper &p) { 284 | j.at("id").get_to(p.get_instance_id()); 285 | j.at("aggressor_to_addr").get_to(p.aggressor_to_addr); 286 | j.at("bit_flips").get_to(p.bit_flips); 287 | j.at("min_row").get_to(p.min_row); 288 | j.at("max_row").get_to(p.max_row); 289 | j.at("bank_no").get_to(p.bank_no); 290 | j.at("reproducibility_score").get_to(p.reproducibility_score); 291 | p.code_jitter = std::make_unique(); 292 | j.at("code_jitter").get_to(*p.code_jitter); 293 | } 294 | 295 | #endif 296 | 297 | const std::string &PatternAddressMapper::get_instance_id() const { 298 | return instance_id; 299 | } 300 | 301 | std::string &PatternAddressMapper::get_instance_id() { 302 | return instance_id; 303 | } 304 | 305 | const std::unordered_set &PatternAddressMapper::get_victim_rows() const { 306 | return victim_rows; 307 | } 308 | 309 | std::vector PatternAddressMapper::get_random_nonaccessed_rows(int row_upper_bound) { 310 | // we don't mind if addresses are added multiple times 311 | std::vector addresses; 312 | for (int i = 0; i < 1024; ++i) { 313 | auto row_no = Range(max_row, max_row + min_row).get_random_number(gen)%row_upper_bound; 314 | addresses.push_back( 315 | static_cast(DRAMAddr(static_cast(bank_no), static_cast(row_no), 0).to_virt())); 316 | } 317 | return addresses; 318 | } 319 | 320 | void PatternAddressMapper::shift_mapping(int rows, const std::unordered_set &aggs_to_move) { 321 | std::set occupied_rows; 322 | 323 | // collect the aggressor ID of the aggressors given in the aggs_to_move set 324 | std::unordered_set movable_ids; 325 | for (const auto &agg_pair : aggs_to_move) { 326 | for (const auto &agg : agg_pair.aggressors) { 327 | movable_ids.insert(agg.id); 328 | } 329 | } 330 | 331 | for (auto &agg_acc_patt : aggressor_to_addr) { 332 | // if aggs_to_move is empty, we consider it as 'move all aggressors'; otherwise we check whether the current 333 | // aggressor ID is in aggs_to_move prior shifting the aggressor by the given number of rows (param: rows) 334 | if (aggs_to_move.empty() || movable_ids.count(agg_acc_patt.first) > 0) { 335 | agg_acc_patt.second.row += rows; 336 | occupied_rows.insert(static_cast(agg_acc_patt.second.row)); 337 | } 338 | } 339 | 340 | // this works as sets are always ordered 341 | min_row = *occupied_rows.begin(); 342 | max_row = *occupied_rows.rbegin(); 343 | } 344 | 345 | CodeJitter &PatternAddressMapper::get_code_jitter() const { 346 | return *code_jitter; 347 | } 348 | 349 | PatternAddressMapper::PatternAddressMapper(const PatternAddressMapper &other) 350 | : victim_rows(other.victim_rows), 351 | instance_id(other.instance_id), 352 | min_row(other.min_row), 353 | max_row(other.max_row), 354 | bank_no(other.bank_no), 355 | aggressor_to_addr(other.aggressor_to_addr), 356 | bit_flips(other.bit_flips), 357 | reproducibility_score(other.reproducibility_score) { 358 | code_jitter = std::make_unique(); 359 | code_jitter->num_aggs_for_sync = other.get_code_jitter().num_aggs_for_sync; 360 | code_jitter->total_activations = other.get_code_jitter().total_activations; 361 | code_jitter->fencing_strategy = other.get_code_jitter().fencing_strategy; 362 | code_jitter->flushing_strategy = other.get_code_jitter().flushing_strategy; 363 | code_jitter->pattern_sync_each_ref = other.get_code_jitter().pattern_sync_each_ref; 364 | std::random_device rd; 365 | gen = std::mt19937(rd()); 366 | } 367 | 368 | PatternAddressMapper &PatternAddressMapper::operator=(const PatternAddressMapper &other) { 369 | if (this==&other) return *this; 370 | victim_rows = other.victim_rows; 371 | instance_id = other.instance_id; 372 | gen = other.gen; 373 | 374 | code_jitter = std::make_unique(); 375 | code_jitter->num_aggs_for_sync = other.get_code_jitter().num_aggs_for_sync; 376 | code_jitter->total_activations = other.get_code_jitter().total_activations; 377 | code_jitter->fencing_strategy = other.get_code_jitter().fencing_strategy; 378 | code_jitter->flushing_strategy = other.get_code_jitter().flushing_strategy; 379 | code_jitter->pattern_sync_each_ref = other.get_code_jitter().pattern_sync_each_ref; 380 | 381 | min_row = other.min_row; 382 | max_row = other.max_row; 383 | bank_no = other.bank_no; 384 | 385 | aggressor_to_addr = other.aggressor_to_addr; 386 | bit_flips = other.bit_flips; 387 | reproducibility_score = other.reproducibility_score; 388 | 389 | return *this; 390 | } 391 | 392 | void PatternAddressMapper::compute_mapping_stats(std::vector &agg_access_patterns, 393 | int &agg_intra_distance, int &agg_inter_distance, 394 | bool uses_seq_addresses) { 395 | Logger::log_info("Deriving mapping parameters from AggressorAccessPatterns."); 396 | 397 | // find first AggressorAccessPattern with more than one aggressor, then compute distance in-between aggressors 398 | agg_intra_distance = 0; 399 | for (auto &agg_access_pattern : agg_access_patterns) { 400 | if (agg_access_pattern.aggressors.size() > 1) { 401 | auto r1 = aggressor_to_addr.at(agg_access_pattern.aggressors.at(1).id).row; 402 | auto r0 = aggressor_to_addr.at(agg_access_pattern.aggressors.at(0).id).row; 403 | agg_intra_distance = static_cast(r1-r0); 404 | break; 405 | } 406 | } 407 | 408 | // if all consecutive AggressorAccessPatterns have the same inter-distance, then they use "sequential addresses" 409 | uses_seq_addresses = true; 410 | agg_inter_distance = -1; 411 | for (auto it = agg_access_patterns.begin(); it+1 != agg_access_patterns.end(); ++it) { 412 | auto this_size = it->aggressors.size(); 413 | auto this_row = aggressor_to_addr.at(it->aggressors.at(this_size-1).id).row; 414 | auto next_row = aggressor_to_addr.at((it+1)->aggressors.at(0).id).row; 415 | auto distance = static_cast(next_row - this_row); 416 | if (agg_inter_distance == -1) { 417 | agg_inter_distance = distance; 418 | } else if (agg_inter_distance != distance) { 419 | uses_seq_addresses = false; 420 | break; 421 | } 422 | } 423 | 424 | Logger::log_data(format_string("inter-distance v = %d", agg_inter_distance)); 425 | Logger::log_data(format_string("intra-distance d = %d", agg_intra_distance)); 426 | Logger::log_data(format_string("use_seq_addresses = %s", (uses_seq_addresses ? "true" : "false"))); 427 | } 428 | 429 | size_t PatternAddressMapper::count_bitflips() const { 430 | size_t sum = 0; 431 | for (const auto &bf : bit_flips) sum += bf.size(); 432 | return sum; 433 | } 434 | 435 | void PatternAddressMapper::remap_aggressors(DRAMAddr &new_location) { 436 | // determine the mapping with the smallest row no -- this is the start point where we apply our new location on 437 | size_t smallest_row_no = std::numeric_limits::max(); 438 | for (const auto &[id, addr]: aggressor_to_addr) { 439 | smallest_row_no = std::min(smallest_row_no, addr.row); 440 | } 441 | 442 | // compute offset between old start row and new start row 443 | size_t offset = new_location.row - smallest_row_no; 444 | 445 | // now update each mapping's address 446 | for (auto &[id, addr]: aggressor_to_addr) { 447 | // we just overwrite the bank 448 | addr.bank = new_location.bank; 449 | // for the row, we need to shift accordingly to preserve the distances between aggressors 450 | addr.row += offset; 451 | } 452 | } 453 | -------------------------------------------------------------------------------- /src/Fuzzer/PatternBuilder.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "Fuzzer/FuzzingParameterSet.hpp" 4 | #include "Fuzzer/PatternBuilder.hpp" 5 | 6 | PatternBuilder::PatternBuilder(HammeringPattern &hammering_pattern) 7 | : pattern(hammering_pattern), aggressor_id_counter(1) { 8 | std::random_device rd; 9 | gen = std::mt19937(rd()); 10 | } 11 | 12 | size_t PatternBuilder::get_random_gaussian(std::vector &list) { 13 | // this 'repeat until we produce a valid value' approach is not very effective 14 | size_t result; 15 | do { 16 | auto mean = static_cast((list.size()%2==0) ? list.size()/2 - 1 : (list.size() - 1)/2); 17 | std::normal_distribution<> d(mean, 1); 18 | result = (size_t) d(gen); 19 | } while (result >= list.size()); 20 | return result; 21 | } 22 | 23 | void PatternBuilder::remove_smaller_than(std::vector &vec, int N) { 24 | for (auto it = vec.begin(); it != vec.end(); ) { 25 | if (*it < N) { 26 | it = vec.erase(it); 27 | } else { 28 | ++it; 29 | } 30 | } 31 | } 32 | 33 | int PatternBuilder::all_slots_full(size_t offset, size_t period, int pattern_length, std::vector &aggs) { 34 | for (size_t i = 0; i < aggs.size(); ++i) { 35 | auto idx = (offset + i*period)%pattern_length; 36 | if (aggs[idx].id==ID_PLACEHOLDER_AGG) return static_cast(idx); 37 | } 38 | return -1; 39 | } 40 | 41 | void PatternBuilder::fill_slots(const size_t start_period, 42 | const size_t period_length, 43 | const size_t amplitude, 44 | std::vector &aggressors, 45 | std::vector &accesses, 46 | size_t pattern_length) { 47 | 48 | // the "break"s are important here as the function we use to compute the next target index is not continuously 49 | // increasing, i.e., if we computed an invalid index in the innermost loop, increasing the loop in the middle may 50 | // produce an index that is still valid, therefore we need to "break" instead of returning directly 51 | 52 | // in each period_length... 53 | for (size_t period = start_period; period < pattern_length; period += period_length) { 54 | // .. for each amplitdue ... 55 | for (size_t amp = 0; amp < amplitude; ++amp) { 56 | if (period + (aggressors.size()*amp) >= pattern_length) break; 57 | // .. fill in the aggressors 58 | for (size_t agg_idx = 0; agg_idx < aggressors.size(); ++agg_idx) { 59 | auto next_target = period + (aggressors.size()*amp) + agg_idx; 60 | if (next_target >= accesses.size()) { 61 | break; 62 | } 63 | accesses[next_target] = aggressors.at(agg_idx); 64 | } 65 | } 66 | } 67 | } 68 | 69 | void PatternBuilder::get_n_aggressors(size_t N, std::vector &aggs) { 70 | // clean any existing aggressor in the given vector 71 | aggs.clear(); 72 | 73 | // increment the aggressor ID cyclically, up to max_num_aggressors 74 | // for (size_t added_aggs = 0; added_aggs < N; aggressor_id_counter = ((aggressor_id_counter + 1)%max_num_aggressors)) { 75 | 76 | // increment the aggressor ID so that all aggressors in the abstract pattern are unique 77 | for (size_t added_aggs = 0; added_aggs < N; aggressor_id_counter = ((aggressor_id_counter + 1))) { 78 | aggs.emplace_back(aggressor_id_counter); 79 | added_aggs++; 80 | } 81 | } 82 | 83 | std::vector PatternBuilder::get_available_multiplicators(FuzzingParameterSet &fuzzing_params) { 84 | return get_available_multiplicators(fuzzing_params.get_num_base_periods()); 85 | } 86 | 87 | std::vector PatternBuilder::get_available_multiplicators(int num_base_periods) { 88 | // a multiplicator M is an integer such that 89 | // [1] (M * base_period) is a valid frequency 90 | // [2] M^2 is smaller-equal to num_base_periods 91 | std::vector allowed_multiplicators; 92 | for (size_t i = 0; static_cast(std::pow(2, i)) <= num_base_periods; ++i) { 93 | allowed_multiplicators.push_back(static_cast(std::pow(2, i))); 94 | } 95 | return allowed_multiplicators; 96 | } 97 | 98 | int PatternBuilder::get_next_prefilled_slot(size_t cur_idx, std::vector start_indices_prefilled_slots, int base_period, 99 | int &cur_prefilled_slots_idx) { 100 | // no prefilled pattern: use base_period as bound 101 | if (start_indices_prefilled_slots.empty()) 102 | return base_period; 103 | 104 | // prefilled pattern 105 | if ((int) cur_idx < start_indices_prefilled_slots[cur_prefilled_slots_idx]) { 106 | // keep using the current index of the next occupied slot 107 | return start_indices_prefilled_slots[cur_prefilled_slots_idx]; 108 | } else if ((size_t)cur_prefilled_slots_idx+1 < start_indices_prefilled_slots.size()) { 109 | // increment the index by one as we still didn't reach the end 110 | cur_prefilled_slots_idx++; 111 | return start_indices_prefilled_slots[cur_prefilled_slots_idx]; 112 | } else { 113 | // we already reached the end, from now on only the base period is our bound 114 | return base_period; 115 | } 116 | } 117 | 118 | void PatternBuilder::generate_frequency_based_pattern(FuzzingParameterSet ¶ms, 119 | int pattern_length, 120 | int base_period) { 121 | std::vector start_indices_prefilled_slots; 122 | auto cur_prefilled_slots_idx = 0; 123 | // this is a helper function that takes the current index (of base_period) and then returns the index of either the 124 | // next prefilled slot (if pattern was prefilled) or just returns the last index of the base period 125 | 126 | // we call this method also for filling up a prefilled pattern (during analysis stage) that already contains some 127 | // aggressor accesses, in that case we should not clear the aggressors vector 128 | if (pattern.aggressors.empty()) { 129 | pattern.aggressors = std::vector(pattern_length, Aggressor()); 130 | } else { 131 | // go through aggressors list and figure out prefilled slots but only keep the index of the start slot of a 132 | // prefilled contiguous area (e.g., "_ _ _ A1 A2 A3 _ _ A4 A5 _ _ _" would only record index of A1 and A4) 133 | bool in_prefilled_area = false; 134 | for (auto i = 0; i < base_period; ++i) { 135 | if (pattern.aggressors[i].id!=ID_PLACEHOLDER_AGG) { 136 | if (!in_prefilled_area) { 137 | in_prefilled_area = true; 138 | start_indices_prefilled_slots.push_back(i); 139 | } 140 | } else { 141 | in_prefilled_area = false; 142 | } 143 | } 144 | } 145 | 146 | // the multiplicators are only dependent on the base period, i.e., we can precompute them once here 147 | std::vector allowed_multiplicators = get_available_multiplicators(params); 148 | pattern.max_period = allowed_multiplicators.back()*base_period; 149 | 150 | int cur_amplitude; 151 | int num_aggressors; 152 | auto cur_period = 0; 153 | 154 | // fill the "first" slot in the base period: this is the one that can have any possible frequency 155 | for (auto k = 0; k < base_period; k += (num_aggressors*cur_amplitude)) { 156 | std::vector aggressors; 157 | std::vector cur_multiplicators(allowed_multiplicators.begin(), allowed_multiplicators.end()); 158 | // if this slot is not filled yet -> we are generating a new pattern 159 | if (pattern.aggressors[k].id==ID_PLACEHOLDER_AGG) { 160 | auto cur_m = cur_multiplicators.at(get_random_gaussian(cur_multiplicators)); 161 | remove_smaller_than(cur_multiplicators, cur_m); 162 | cur_period = base_period*cur_m; 163 | 164 | if (start_indices_prefilled_slots.empty()) { 165 | // if there are no prefilled slots at any index: we are only limited by the base period 166 | num_aggressors = ((base_period - k)==1) ? 1 : params.get_random_N_sided(base_period - k); 167 | cur_amplitude = params.get_random_amplitude((base_period - k)/num_aggressors); 168 | } else { 169 | // if there are prefilled slots we need to pay attention to not overwrite them either by choosing too many 170 | // aggressors or by choosing an amplitude that is too large 171 | auto next_prefilled_idx = get_next_prefilled_slot(k, start_indices_prefilled_slots, 172 | base_period, cur_prefilled_slots_idx); 173 | num_aggressors = ((next_prefilled_idx - k)==1) ? 1 : params.get_random_N_sided(next_prefilled_idx - k); 174 | cur_amplitude = params.get_random_amplitude((int) std::floor((next_prefilled_idx - k)/num_aggressors)); 175 | } 176 | get_n_aggressors(num_aggressors, aggressors); 177 | 178 | pattern.agg_access_patterns.emplace_back(cur_period, cur_amplitude, aggressors, k); 179 | fill_slots(k, cur_period, cur_amplitude, aggressors, pattern.aggressors, pattern_length); 180 | } else { // this slot is already filled -> this is a prefilled pattern 181 | // determine the number of aggressors (num_aggressors) and the amplitude (cur_amplitude) based on the information 182 | // in the associated AggressorAccessPattern of this Aggressor 183 | auto agg_acc_patt = pattern.get_access_pattern_by_aggressor(pattern.aggressors[k]); 184 | remove_smaller_than(cur_multiplicators, static_cast(agg_acc_patt.frequency)/base_period); 185 | num_aggressors = static_cast(agg_acc_patt.aggressors.size()); 186 | cur_amplitude = agg_acc_patt.amplitude; 187 | } 188 | 189 | // fill all the remaining slots, i.e., slots at the same offset but in those base period that were not filled up 190 | // by the previously added aggressor pair. if frequency = base period, then there's nothing to do here as everything 191 | // is already filled up. 192 | // example where previously added aggressor pair (A1,A2) has frequency = base_period/2: 193 | // | A1 A2 _ _ _ _ | _ _ _ _ _ _ | A1 A2 _ _ _ _ | _ _ _ _ _ _ | A1 A2 _ _ _ _ | 194 | // ^ ^ ^ ^ 195 | // the slots marked by '^' are the ones that we are filling up in the following loop 196 | for (auto next_slot = all_slots_full(k, base_period, pattern_length, pattern.aggressors); 197 | next_slot!=-1; 198 | next_slot = all_slots_full(k, base_period, pattern_length, pattern.aggressors)) { 199 | auto cur_m2 = cur_multiplicators.at(get_random_gaussian(cur_multiplicators)); 200 | remove_smaller_than(cur_multiplicators, cur_m2); 201 | cur_period = base_period*cur_m2; 202 | get_n_aggressors(num_aggressors, aggressors); 203 | pattern.agg_access_patterns.emplace_back(cur_period, cur_amplitude, aggressors, next_slot); 204 | fill_slots(static_cast(next_slot), cur_period, cur_amplitude, aggressors, pattern.aggressors, pattern_length); 205 | } 206 | } 207 | 208 | // update information in HammeringPattern s.t. it will be included into the JSON export 209 | pattern.total_activations = static_cast(pattern.aggressors.size()); 210 | pattern.num_refresh_intervals = params.get_num_refresh_intervals(); 211 | } 212 | 213 | void PatternBuilder::generate_frequency_based_pattern(FuzzingParameterSet ¶ms) { 214 | generate_frequency_based_pattern(params, params.get_total_acts_pattern(), params.get_base_period()); 215 | } 216 | 217 | void PatternBuilder::prefill_pattern(int pattern_total_acts, 218 | std::vector &fixed_aggs) { 219 | aggressor_id_counter = 1; 220 | pattern.aggressors = std::vector(static_cast(pattern_total_acts), Aggressor()); 221 | for (auto &aap : fixed_aggs) { 222 | for (auto &agg : aap.aggressors) agg.id = aggressor_id_counter++; 223 | fill_slots(aap.start_offset, aap.frequency, aap.amplitude, aap.aggressors, pattern.aggressors, 224 | static_cast(pattern_total_acts)); 225 | pattern.agg_access_patterns.push_back(aap); 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /src/Memory/DRAMAddr.cpp: -------------------------------------------------------------------------------- 1 | #include "Memory/DRAMAddr.hpp" 2 | #include "GlobalDefines.hpp" 3 | 4 | // initialize static variable 5 | std::map DRAMAddr::Configs; 6 | 7 | void DRAMAddr::initialize(uint64_t num_bank_rank_functions, volatile char *start_address) { 8 | // TODO: This is a shortcut to check if it's a single rank dimm or dual rank in order to load the right memory 9 | // configuration. We should get these infos from dmidecode to do it properly, but for now this is easier. 10 | size_t num_ranks; 11 | if (num_bank_rank_functions==5) { 12 | num_ranks = RANKS(2); 13 | } else if (num_bank_rank_functions==4) { 14 | num_ranks = RANKS(1); 15 | } else { 16 | Logger::log_error("Could not initialize DRAMAddr as #ranks seems not to be 1 or 2."); 17 | exit(1); 18 | } 19 | DRAMAddr::load_mem_config((CHANS(CHANNEL) | DIMMS(DIMM) | num_ranks | BANKS(NUM_BANKS))); 20 | DRAMAddr::set_base_msb((void *) start_address); 21 | } 22 | 23 | void DRAMAddr::set_base_msb(void *buff) { 24 | base_msb = (size_t) buff & (~((size_t) (1ULL << 30UL) - 1UL)); // get higher order bits above the super page 25 | } 26 | 27 | // TODO we can create a DRAMconfig class to load the right matrix depending on 28 | // the configuration. You could also test it by checking if you can trigger bank conflcits 29 | void DRAMAddr::load_mem_config(mem_config_t cfg) { 30 | DRAMAddr::initialize_configs(); 31 | MemConfig = Configs[cfg]; 32 | } 33 | 34 | DRAMAddr::DRAMAddr() = default; 35 | 36 | DRAMAddr::DRAMAddr(size_t bk, size_t r, size_t c) { 37 | bank = bk; 38 | row = r; 39 | col = c; 40 | } 41 | 42 | DRAMAddr::DRAMAddr(void *addr) { 43 | auto p = (size_t) addr; 44 | size_t res = 0; 45 | for (unsigned long i : MemConfig.DRAM_MTX) { 46 | res <<= 1ULL; 47 | res |= (size_t) __builtin_parityl(p & i); 48 | } 49 | bank = (res >> MemConfig.BK_SHIFT) & MemConfig.BK_MASK; 50 | row = (res >> MemConfig.ROW_SHIFT) & MemConfig.ROW_MASK; 51 | col = (res >> MemConfig.COL_SHIFT) & MemConfig.COL_MASK; 52 | } 53 | 54 | size_t DRAMAddr::linearize() const { 55 | return (this->bank << MemConfig.BK_SHIFT) | (this->row << MemConfig.ROW_SHIFT) | (this->col << MemConfig.COL_SHIFT); 56 | } 57 | 58 | void *DRAMAddr::to_virt() { 59 | return const_cast(this)->to_virt(); 60 | } 61 | 62 | void *DRAMAddr::to_virt() const { 63 | size_t res = 0; 64 | size_t l = this->linearize(); 65 | for (unsigned long i : MemConfig.ADDR_MTX) { 66 | res <<= 1ULL; 67 | res |= (size_t) __builtin_parityl(l & i); 68 | } 69 | void *v_addr = (void *) (base_msb | res); 70 | return v_addr; 71 | } 72 | 73 | std::string DRAMAddr::to_string() { 74 | char buff[1024]; 75 | sprintf(buff, "DRAMAddr(b: %zu, r: %zu, c: %zu) = %p", 76 | this->bank, 77 | this->row, 78 | this->col, 79 | this->to_virt()); 80 | return std::string(buff); 81 | } 82 | 83 | std::string DRAMAddr::to_string_compact() const { 84 | char buff[1024]; 85 | sprintf(buff, "(%ld,%ld,%ld)", 86 | this->bank, 87 | this->row, 88 | this->col); 89 | return std::string(buff); 90 | } 91 | 92 | DRAMAddr DRAMAddr::add(size_t bank_increment, size_t row_increment, size_t column_increment) const { 93 | return {bank + bank_increment, row + row_increment, col + column_increment}; 94 | } 95 | 96 | void DRAMAddr::add_inplace(size_t bank_increment, size_t row_increment, size_t column_increment) { 97 | bank += bank_increment; 98 | row += row_increment; 99 | col += column_increment; 100 | } 101 | 102 | // Define the static DRAM configs 103 | MemConfiguration DRAMAddr::MemConfig; 104 | size_t DRAMAddr::base_msb; 105 | 106 | #ifdef ENABLE_JSON 107 | 108 | nlohmann::json DRAMAddr::get_memcfg_json() { 109 | std::map memcfg_to_json = { 110 | {(CHANS(1UL) | DIMMS(1UL) | RANKS(1UL) | BANKS(16UL)), 111 | nlohmann::json{ 112 | {"channels", 1}, 113 | {"dimms", 1}, 114 | {"ranks", 1}, 115 | {"banks", 16}}}, 116 | {(CHANS(1UL) | DIMMS(1UL) | RANKS(2UL) | BANKS(16UL)), 117 | nlohmann::json{ 118 | {"channels", 1}, 119 | {"dimms", 1}, 120 | {"ranks", 2}, 121 | {"banks", 16}}} 122 | }; 123 | return memcfg_to_json[MemConfig.IDENTIFIER]; 124 | } 125 | 126 | #endif 127 | 128 | void DRAMAddr::initialize_configs() { 129 | struct MemConfiguration single_rank = { 130 | .IDENTIFIER = (CHANS(1UL) | DIMMS(1UL) | RANKS(1UL) | BANKS(16UL)), 131 | .BK_SHIFT = 26, 132 | .BK_MASK = (0b1111), 133 | .ROW_SHIFT = 0, 134 | .ROW_MASK = (0b1111111111111), 135 | .COL_SHIFT = 13, 136 | .COL_MASK = (0b1111111111111), 137 | /* maps a virtual addr -> DRAM addr: bank (4 bits) | col (13 bits) | row (13 bits) */ 138 | .DRAM_MTX = { 139 | 0b000000000000000010000001000000, /* 0x02040 bank b3 = addr b6 + b13 */ 140 | 0b000000000000100100000000000000, /* 0x24000 bank b2 = addr b14 + b17 */ 141 | 0b000000000001001000000000000000, /* 0x48000 bank b1 = addr b15 + b18 */ 142 | 0b000000000010010000000000000000, /* 0x90000 bank b0 = addr b16 + b19 */ 143 | 0b000000000000000010000000000000, /* col b12 = addr b13 */ 144 | 0b000000000000000001000000000000, /* col b11 = addr b12 */ 145 | 0b000000000000000000100000000000, /* col b10 = addr b11 */ 146 | 0b000000000000000000010000000000, /* col b9 = addr b10 */ 147 | 0b000000000000000000001000000000, /* col b8 = addr b9 */ 148 | 0b000000000000000000000100000000, /* col b7 = addr b8*/ 149 | 0b000000000000000000000010000000, /* col b6 = addr b7 */ 150 | 0b000000000000000000000000100000, /* col b5 = addr b5 */ 151 | 0b000000000000000000000000010000, /* col b4 = addr b4*/ 152 | 0b000000000000000000000000001000, /* col b3 = addr b3 */ 153 | 0b000000000000000000000000000100, /* col b2 = addr b2 */ 154 | 0b000000000000000000000000000010, /* col b1 = addr b1 */ 155 | 0b000000000000000000000000000001, /* col b0 = addr b0*/ 156 | 0b100000000000000000000000000000, /* row b12 = addr b29 */ 157 | 0b010000000000000000000000000000, /* row b11 = addr b28 */ 158 | 0b001000000000000000000000000000, /* row b10 = addr b27 */ 159 | 0b000100000000000000000000000000, /* row b9 = addr b26 */ 160 | 0b000010000000000000000000000000, /* row b8 = addr b25 */ 161 | 0b000001000000000000000000000000, /* row b7 = addr b24 */ 162 | 0b000000100000000000000000000000, /* row b6 = addr b23 */ 163 | 0b000000010000000000000000000000, /* row b5 = addr b22 */ 164 | 0b000000001000000000000000000000, /* row b4 = addr b21 */ 165 | 0b000000000100000000000000000000, /* row b3 = addr b20 */ 166 | 0b000000000010000000000000000000, /* row b2 = addr b19 */ 167 | 0b000000000001000000000000000000, /* row b1 = addr b18 */ 168 | 0b000000000000100000000000000000, /* row b0 = addr b17 */ 169 | }, 170 | /* maps a DRAM addr (bank | col | row) --> virtual addr */ 171 | .ADDR_MTX = { 172 | 0b000000000000000001000000000000, /* addr b29 = row b12 */ 173 | 0b000000000000000000100000000000, /* addr b28 = row b11 */ 174 | 0b000000000000000000010000000000, /* addr b27 = row b10 */ 175 | 0b000000000000000000001000000000, /* addr b26 = row b9 */ 176 | 0b000000000000000000000100000000, /* addr b25 = row b8 */ 177 | 0b000000000000000000000010000000, /* addr b24 = row b7 */ 178 | 0b000000000000000000000001000000, /* addr b23 = row b6 */ 179 | 0b000000000000000000000000100000, /* addr b22 = row b5 */ 180 | 0b000000000000000000000000010000, /* addr b21 = row b4 */ 181 | 0b000000000000000000000000001000, /* addr b20 = row b3 */ 182 | 0b000000000000000000000000000100, /* addr b19 = row b2 */ 183 | 0b000000000000000000000000000010, /* addr b18 = row b1 */ 184 | 0b000000000000000000000000000001, /* addr b17 = row b0 */ 185 | 0b000100000000000000000000000100, /* addr b16 = bank b0 + row b2 (addr b19) */ 186 | 0b001000000000000000000000000010, /* addr b15 = bank b1 + row b1 (addr b18) */ 187 | 0b010000000000000000000000000001, /* addr b14 = bank b2 + row b0 (addr b17) */ 188 | 0b000010000000000000000000000000, /* addr b13 = col b12 */ 189 | 0b000001000000000000000000000000, /* addr b12 = col b11 */ 190 | 0b000000100000000000000000000000, /* addr b11 = col b10 */ 191 | 0b000000010000000000000000000000, /* addr b10 = col b9 */ 192 | 0b000000001000000000000000000000, /* addr b9 = col b8 */ 193 | 0b000000000100000000000000000000, /* addr b8 = col b7 */ 194 | 0b000000000010000000000000000000, /* addr b7 = col b6 */ 195 | 0b100010000000000000000000000000, /* addr b6 = bank b3 + col b12 (addr b13)*/ 196 | 0b000000000001000000000000000000, /* addr b5 = col b5 */ 197 | 0b000000000000100000000000000000, /* addr b4 = col b4 */ 198 | 0b000000000000010000000000000000, /* addr b3 = col b3 */ 199 | 0b000000000000001000000000000000, /* addr b2 = col b2 */ 200 | 0b000000000000000100000000000000, /* addr b1 = col b1 */ 201 | 0b000000000000000010000000000000 /* addr b0 = col b0 */ 202 | } 203 | }; 204 | struct MemConfiguration dual_rank = { 205 | .IDENTIFIER = (CHANS(1UL) | DIMMS(1UL) | RANKS(2UL) | BANKS(16UL)), 206 | .BK_SHIFT = 25, 207 | .BK_MASK = (0b11111), 208 | .ROW_SHIFT = 0, 209 | .ROW_MASK = (0b111111111111), 210 | .COL_SHIFT = 12, 211 | .COL_MASK = (0b1111111111111), 212 | .DRAM_MTX = { 213 | 0b000000000000000010000001000000, 214 | 0b000000000001000100000000000000, 215 | 0b000000000010001000000000000000, 216 | 0b000000000100010000000000000000, 217 | 0b000000001000100000000000000000, 218 | 0b000000000000000010000000000000, 219 | 0b000000000000000001000000000000, 220 | 0b000000000000000000100000000000, 221 | 0b000000000000000000010000000000, 222 | 0b000000000000000000001000000000, 223 | 0b000000000000000000000100000000, 224 | 0b000000000000000000000010000000, 225 | 0b000000000000000000000000100000, 226 | 0b000000000000000000000000010000, 227 | 0b000000000000000000000000001000, 228 | 0b000000000000000000000000000100, 229 | 0b000000000000000000000000000010, 230 | 0b000000000000000000000000000001, 231 | 0b100000000000000000000000000000, 232 | 0b010000000000000000000000000000, 233 | 0b001000000000000000000000000000, 234 | 0b000100000000000000000000000000, 235 | 0b000010000000000000000000000000, 236 | 0b000001000000000000000000000000, 237 | 0b000000100000000000000000000000, 238 | 0b000000010000000000000000000000, 239 | 0b000000001000000000000000000000, 240 | 0b000000000100000000000000000000, 241 | 0b000000000010000000000000000000, 242 | 0b000000000001000000000000000000 243 | }, 244 | .ADDR_MTX = { 245 | 0b000000000000000000100000000000, 246 | 0b000000000000000000010000000000, 247 | 0b000000000000000000001000000000, 248 | 0b000000000000000000000100000000, 249 | 0b000000000000000000000010000000, 250 | 0b000000000000000000000001000000, 251 | 0b000000000000000000000000100000, 252 | 0b000000000000000000000000010000, 253 | 0b000000000000000000000000001000, 254 | 0b000000000000000000000000000100, 255 | 0b000000000000000000000000000010, 256 | 0b000000000000000000000000000001, 257 | 0b000010000000000000000000001000, 258 | 0b000100000000000000000000000100, 259 | 0b001000000000000000000000000010, 260 | 0b010000000000000000000000000001, 261 | 0b000001000000000000000000000000, 262 | 0b000000100000000000000000000000, 263 | 0b000000010000000000000000000000, 264 | 0b000000001000000000000000000000, 265 | 0b000000000100000000000000000000, 266 | 0b000000000010000000000000000000, 267 | 0b000000000001000000000000000000, 268 | 0b100001000000000000000000000000, 269 | 0b000000000000100000000000000000, 270 | 0b000000000000010000000000000000, 271 | 0b000000000000001000000000000000, 272 | 0b000000000000000100000000000000, 273 | 0b000000000000000010000000000000, 274 | 0b000000000000000001000000000000 275 | } 276 | }; 277 | DRAMAddr::Configs = { 278 | {(CHANS(1UL) | DIMMS(1UL) | RANKS(1UL) | BANKS(16UL)), single_rank}, 279 | {(CHANS(1UL) | DIMMS(1UL) | RANKS(2UL) | BANKS(16UL)), dual_rank} 280 | }; 281 | } 282 | 283 | #ifdef ENABLE_JSON 284 | 285 | void to_json(nlohmann::json &j, const DRAMAddr &p) { 286 | j = {{"bank", p.bank}, 287 | {"row", p.row}, 288 | {"col", p.col} 289 | }; 290 | } 291 | 292 | void from_json(const nlohmann::json &j, DRAMAddr &p) { 293 | j.at("bank").get_to(p.bank); 294 | j.at("row").get_to(p.row); 295 | j.at("col").get_to(p.col); 296 | } 297 | 298 | #endif 299 | -------------------------------------------------------------------------------- /src/Memory/DramAnalyzer.cpp: -------------------------------------------------------------------------------- 1 | #include "Memory/DramAnalyzer.hpp" 2 | 3 | #include 4 | #include 5 | 6 | void DramAnalyzer::find_bank_conflicts() { 7 | size_t nr_banks_cur = 0; 8 | int remaining_tries = NUM_BANKS*256; // experimentally determined, may be unprecise 9 | while (nr_banks_cur < NUM_BANKS && remaining_tries > 0) { 10 | reset: 11 | remaining_tries--; 12 | auto a1 = start_address + (dist(gen)%(MEM_SIZE/64))*64; 13 | auto a2 = start_address + (dist(gen)%(MEM_SIZE/64))*64; 14 | auto ret1 = measure_time(a1, a2); 15 | auto ret2 = measure_time(a1, a2); 16 | 17 | if ((ret1 > THRESH) && (ret2 > THRESH)) { 18 | bool all_banks_set = true; 19 | for (size_t i = 0; i < NUM_BANKS; i++) { 20 | if (banks.at(i).empty()) { 21 | all_banks_set = false; 22 | } else { 23 | auto bank = banks.at(i); 24 | ret1 = measure_time(a1, bank[0]); 25 | ret2 = measure_time(a2, bank[0]); 26 | if ((ret1 > THRESH) || (ret2 > THRESH)) { 27 | // possibly noise if only exactly one is true, 28 | // i.e., (ret1 > THRESH) or (ret2 > THRESH) 29 | goto reset; 30 | } 31 | } 32 | } 33 | 34 | // stop if we already determined addresses for each bank 35 | if (all_banks_set) return; 36 | 37 | // store addresses found for each bank 38 | assert(banks.at(nr_banks_cur).empty() && "Bank not empty"); 39 | banks.at(nr_banks_cur).push_back(a1); 40 | banks.at(nr_banks_cur).push_back(a2); 41 | nr_banks_cur++; 42 | } 43 | if (remaining_tries==0) { 44 | Logger::log_error(format_string( 45 | "Could not find conflicting address sets. Is the number of banks (%d) defined correctly?", 46 | (int) NUM_BANKS)); 47 | exit(1); 48 | } 49 | } 50 | 51 | Logger::log_info("Found bank conflicts."); 52 | for (auto &bank : banks) { 53 | find_targets(bank); 54 | } 55 | Logger::log_info("Populated addresses from different banks."); 56 | } 57 | 58 | void DramAnalyzer::find_targets(std::vector &target_bank) { 59 | // create an unordered set of the addresses in the target bank for a quick lookup 60 | // std::unordered_set tmp; tmp.insert(target_bank.begin(), target_bank.end()); 61 | std::unordered_set tmp(target_bank.begin(), target_bank.end()); 62 | target_bank.clear(); 63 | size_t num_repetitions = 5; 64 | while (tmp.size() < 10) { 65 | auto a1 = start_address + (dist(gen)%(MEM_SIZE/64))*64; 66 | if (tmp.count(a1) > 0) continue; 67 | uint64_t cumulative_times = 0; 68 | for (size_t i = 0; i < num_repetitions; i++) { 69 | for (const auto &addr : tmp) { 70 | cumulative_times += measure_time(a1, addr); 71 | } 72 | } 73 | cumulative_times /= num_repetitions; 74 | if ((cumulative_times/tmp.size()) > THRESH) { 75 | tmp.insert(a1); 76 | target_bank.push_back(a1); 77 | } 78 | } 79 | } 80 | 81 | DramAnalyzer::DramAnalyzer(volatile char *target) : 82 | row_function(0), start_address(target) { 83 | std::random_device rd; 84 | gen = std::mt19937(rd()); 85 | dist = std::uniform_int_distribution<>(0, std::numeric_limits::max()); 86 | banks = std::vector>(NUM_BANKS, std::vector()); 87 | } 88 | 89 | std::vector DramAnalyzer::get_bank_rank_functions() { 90 | return bank_rank_functions; 91 | } 92 | 93 | void DramAnalyzer::load_known_functions(int num_ranks) { 94 | if (num_ranks==1) { 95 | bank_rank_functions = std::vector({0x2040, 0x24000, 0x48000, 0x90000}); 96 | row_function = 0x3ffe0000; 97 | } else if (num_ranks==2) { 98 | bank_rank_functions = std::vector({0x2040, 0x44000, 0x88000, 0x110000, 0x220000}); 99 | row_function = 0x3ffc0000; 100 | } else { 101 | Logger::log_error("Cannot load bank/rank and row function if num_ranks is not 1 or 2."); 102 | exit(1); 103 | } 104 | 105 | Logger::log_info("Loaded bank/rank and row function:"); 106 | Logger::log_data(format_string("Row function 0x%" PRIx64, row_function)); 107 | std::stringstream ss; 108 | ss << "Bank/rank functions (" << bank_rank_functions.size() << "): "; 109 | for (auto bank_rank_function : bank_rank_functions) { 110 | ss << "0x" << std::hex << bank_rank_function << " "; 111 | } 112 | Logger::log_data(ss.str()); 113 | } 114 | 115 | size_t DramAnalyzer::count_acts_per_trefi() { 116 | size_t skip_first_N = 50; 117 | // pick two random same-bank addresses 118 | volatile char *a = banks.at(0).at(0); 119 | volatile char *b = banks.at(0).at(1); 120 | 121 | std::vector acts; 122 | uint64_t running_sum = 0; 123 | uint64_t before; 124 | uint64_t after; 125 | uint64_t count = 0; 126 | uint64_t count_old = 0; 127 | 128 | // computes the standard deviation 129 | auto compute_std = [](std::vector &values, uint64_t running_sum, size_t num_numbers) { 130 | double mean = static_cast(running_sum)/static_cast(num_numbers); 131 | double var = 0; 132 | for (const auto &num : values) { 133 | if (static_cast(num) < mean) continue; 134 | var += std::pow(static_cast(num) - mean, 2); 135 | } 136 | auto val = std::sqrt(var/static_cast(num_numbers)); 137 | return val; 138 | }; 139 | 140 | for (size_t i = 0;; i++) { 141 | // flush a and b from caches 142 | clflushopt(a); 143 | clflushopt(b); 144 | mfence(); 145 | 146 | // get start timestamp and wait until we retrieved it 147 | before = rdtscp(); 148 | lfence(); 149 | 150 | // do DRAM accesses 151 | (void)*a; 152 | (void)*b; 153 | 154 | // get end timestamp 155 | after = rdtscp(); 156 | 157 | count++; 158 | if ((after - before) > 1000) { 159 | if (i > skip_first_N && count_old!=0) { 160 | // multiply by 2 to account for both accesses we do (a, b) 161 | uint64_t value = (count - count_old)*2; 162 | acts.push_back(value); 163 | running_sum += value; 164 | // check after each 200 data points if our standard deviation reached 1 -> then stop collecting measurements 165 | if ((acts.size()%200)==0 && compute_std(acts, running_sum, acts.size())<3.0) break; 166 | } 167 | count_old = count; 168 | } 169 | } 170 | 171 | auto activations = (running_sum/acts.size()); 172 | Logger::log_info("Determined the number of possible ACTs per refresh interval."); 173 | Logger::log_data(format_string("num_acts_per_tREFI: %lu", activations)); 174 | 175 | return activations; 176 | } 177 | -------------------------------------------------------------------------------- /src/Memory/Memory.cpp: -------------------------------------------------------------------------------- 1 | #include "Memory/Memory.hpp" 2 | 3 | #include 4 | 5 | /// Allocates a MEM_SIZE bytes of memory by using super or huge pages. 6 | void Memory::allocate_memory(size_t mem_size) { 7 | this->size = mem_size; 8 | volatile char *target = nullptr; 9 | FILE *fp; 10 | 11 | if (superpage) { 12 | // allocate memory using super pages 13 | fp = fopen(hugetlbfs_mountpoint.c_str(), "w+"); 14 | if (fp==nullptr) { 15 | Logger::log_info(format_string("Could not mount superpage from %s. Error:", hugetlbfs_mountpoint.c_str())); 16 | Logger::log_data(std::strerror(errno)); 17 | exit(EXIT_FAILURE); 18 | } 19 | auto mapped_target = mmap((void *) start_address, MEM_SIZE, PROT_READ | PROT_WRITE, 20 | MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | (30UL << MAP_HUGE_SHIFT), fileno(fp), 0); 21 | if (mapped_target==MAP_FAILED) { 22 | perror("mmap"); 23 | exit(EXIT_FAILURE); 24 | } 25 | target = (volatile char*) mapped_target; 26 | } else { 27 | // allocate memory using huge pages 28 | assert(posix_memalign((void **) &target, MEM_SIZE, MEM_SIZE)==0); 29 | assert(madvise((void *) target, MEM_SIZE, MADV_HUGEPAGE)==0); 30 | memset((char *) target, 'A', MEM_SIZE); 31 | // for khugepaged 32 | Logger::log_info("Waiting for khugepaged."); 33 | sleep(10); 34 | } 35 | 36 | if (target!=start_address) { 37 | Logger::log_error(format_string("Could not create mmap area at address %p, instead using %p.", 38 | start_address, target)); 39 | start_address = target; 40 | } 41 | 42 | // initialize memory with random but reproducible sequence of numbers 43 | initialize(DATA_PATTERN::RANDOM); 44 | } 45 | 46 | void Memory::initialize(DATA_PATTERN data_pattern) { 47 | Logger::log_info("Initializing memory with pseudorandom sequence."); 48 | 49 | // for each page in the address space [start, end] 50 | for (uint64_t cur_page = 0; cur_page < size; cur_page += getpagesize()) { 51 | // reseed rand to have a sequence of reproducible numbers, using this we can compare the initialized values with 52 | // those after hammering to see whether bit flips occurred 53 | srand(static_cast(cur_page*getpagesize())); 54 | for (uint64_t cur_pageoffset = 0; cur_pageoffset < (uint64_t) getpagesize(); cur_pageoffset += sizeof(int)) { 55 | 56 | int fill_value = 0; 57 | if (data_pattern == DATA_PATTERN::RANDOM) { 58 | fill_value = rand(); 59 | } else if (data_pattern == DATA_PATTERN::ZEROES) { 60 | fill_value = 0; 61 | } else if (data_pattern == DATA_PATTERN::ONES) { 62 | fill_value = 1; 63 | } else { 64 | Logger::log_error("Could not initialize memory with given (unknown) DATA_PATTERN."); 65 | } 66 | 67 | // write (pseudo)random 4 bytes 68 | uint64_t offset = cur_page + cur_pageoffset; 69 | *((int *) (start_address + offset)) = fill_value; 70 | } 71 | } 72 | } 73 | 74 | size_t Memory::check_memory(PatternAddressMapper &mapping, bool reproducibility_mode, bool verbose) { 75 | flipped_bits.clear(); 76 | 77 | auto victim_rows = mapping.get_victim_rows(); 78 | if (verbose) Logger::log_info(format_string("Checking %zu victims for bit flips.", victim_rows.size())); 79 | 80 | size_t sum_found_bitflips = 0; 81 | for (const auto &victim_row : victim_rows) { 82 | auto victim_dram_addr = DRAMAddr((char*)victim_row); 83 | victim_dram_addr.add_inplace(0, 1, 0); 84 | sum_found_bitflips += check_memory_internal(mapping, victim_row, 85 | (volatile char *) victim_dram_addr.to_virt(), reproducibility_mode, verbose); 86 | } 87 | return sum_found_bitflips; 88 | } 89 | 90 | size_t Memory::check_memory(const volatile char *start, const volatile char *end) { 91 | flipped_bits.clear(); 92 | // create a "fake" pattern mapping to keep this method for backward compatibility 93 | PatternAddressMapper pattern_mapping; 94 | return check_memory_internal(pattern_mapping, start, end, false, true); 95 | } 96 | 97 | size_t Memory::check_memory_internal(PatternAddressMapper &mapping, 98 | const volatile char *start, 99 | const volatile char *end, 100 | bool reproducibility_mode, 101 | bool verbose) { 102 | // counter for the number of found bit flips in the memory region [start, end] 103 | size_t found_bitflips = 0; 104 | 105 | if (start==nullptr || end==nullptr || ((uint64_t) start >= (uint64_t) end)) { 106 | Logger::log_error("Function check_memory called with invalid arguments."); 107 | Logger::log_data(format_string("Start addr.: %s", DRAMAddr((void *) start).to_string().c_str())); 108 | Logger::log_data(format_string("End addr.: %s", DRAMAddr((void *) end).to_string().c_str())); 109 | return found_bitflips; 110 | } 111 | 112 | auto start_offset = (uint64_t) (start - start_address); 113 | 114 | const auto pagesize = static_cast(getpagesize()); 115 | start_offset = (start_offset/pagesize)*pagesize; 116 | 117 | auto end_offset = start_offset + (uint64_t) (end - start); 118 | end_offset = (end_offset/pagesize)*pagesize; 119 | 120 | void *page_raw = malloc(pagesize); 121 | if (page_raw == nullptr) { 122 | Logger::log_error("Could not create temporary page for memory comparison."); 123 | exit(EXIT_FAILURE); 124 | } 125 | memset(page_raw, 0, pagesize); 126 | int *page = (int*)page_raw; 127 | 128 | // for each page (4K) in the address space [start, end] 129 | for (uint64_t i = start_offset; i < end_offset; i += pagesize) { 130 | // reseed rand to have the desired sequence of reproducible numbers 131 | srand(static_cast(i*pagesize)); 132 | 133 | // fill comparison page with expected values generated by rand() 134 | for (size_t j = 0; j < (unsigned long) pagesize/sizeof(int); ++j) 135 | page[j] = rand(); 136 | 137 | uint64_t addr = ((uint64_t)start_address+i); 138 | 139 | // check if any bit flipped in the page using the fast memcmp function, if any flip occurred we need to iterate over 140 | // each byte one-by-one (much slower), otherwise we just continue with the next page 141 | if ((addr+ pagesize) < ((uint64_t)start_address+size) && memcmp((void*)addr, (void*)page, pagesize) == 0) 142 | continue; 143 | 144 | // iterate over blocks of 4 bytes (=sizeof(int)) 145 | for (uint64_t j = 0; j < (uint64_t) pagesize; j += sizeof(int)) { 146 | uint64_t offset = i + j; 147 | volatile char *cur_addr = start_address + offset; 148 | 149 | // if this address is outside the superpage we must not proceed to avoid segfault 150 | if ((uint64_t)cur_addr >= ((uint64_t)start_address+size)) 151 | continue; 152 | 153 | // clear the cache to make sure we do not access a cached value 154 | clflushopt(cur_addr); 155 | mfence(); 156 | 157 | // if the bit did not flip -> continue checking next block 158 | int expected_rand_value = page[j/sizeof(int)]; 159 | if (*((int *) cur_addr)==expected_rand_value) 160 | continue; 161 | 162 | // if the bit flipped -> compare byte per byte 163 | for (unsigned long c = 0; c < sizeof(int); c++) { 164 | volatile char *flipped_address = cur_addr + c; 165 | if (*flipped_address != ((char *) &expected_rand_value)[c]) { 166 | const auto flipped_addr_dram = DRAMAddr((void *) flipped_address); 167 | assert(flipped_address == (volatile char*)flipped_addr_dram.to_virt()); 168 | const auto flipped_addr_value = *(unsigned char *) flipped_address; 169 | const auto expected_value = ((unsigned char *) &expected_rand_value)[c]; 170 | if (verbose) { 171 | Logger::log_bitflip(flipped_address, flipped_addr_dram.row, 172 | expected_value, flipped_addr_value, (size_t) time(nullptr), true); 173 | } 174 | // store detailed information about the bit flip 175 | BitFlip bitflip(flipped_addr_dram, (expected_value ^ flipped_addr_value), flipped_addr_value); 176 | // ..in the mapping that triggered this bit flip 177 | if (!reproducibility_mode) { 178 | if (mapping.bit_flips.empty()) { 179 | Logger::log_error("Cannot store bit flips found in given address mapping.\n" 180 | "You need to create an empty vector in PatternAddressMapper::bit_flips before calling " 181 | "check_memory."); 182 | } 183 | mapping.bit_flips.back().push_back(bitflip); 184 | } 185 | // ..in an attribute of this class so that it can be retrived by the caller 186 | flipped_bits.push_back(bitflip); 187 | found_bitflips += bitflip.count_bit_corruptions(); 188 | } 189 | } 190 | 191 | // restore original (unflipped) value 192 | *((int *) cur_addr) = expected_rand_value; 193 | 194 | // flush this address so that value is committed before hammering again there 195 | clflushopt(cur_addr); 196 | mfence(); 197 | } 198 | } 199 | 200 | free(page); 201 | return found_bitflips; 202 | } 203 | 204 | Memory::Memory(bool use_superpage) : size(0), superpage(use_superpage) { 205 | } 206 | 207 | Memory::~Memory() { 208 | if (munmap((void *) start_address, size)==-1) { 209 | Logger::log_error("munmap failed with error:"); 210 | Logger::log_data(std::strerror(errno)); 211 | } 212 | } 213 | 214 | volatile char *Memory::get_starting_address() const { 215 | return start_address; 216 | } 217 | 218 | std::string Memory::get_flipped_rows_text_repr() { 219 | // first extract all rows, otherwise it will not be possible to know in advance whether we we still 220 | // need to add a separator (comma) to the string as upcoming DRAMAddr instances might refer to the same row 221 | std::set flipped_rows; 222 | for (const auto &da : flipped_bits) { 223 | flipped_rows.insert(da.address.row); 224 | } 225 | 226 | std::stringstream ss; 227 | for (const auto &row : flipped_rows) { 228 | ss << row; 229 | if (row!=*flipped_rows.rbegin()) ss << ","; 230 | } 231 | return ss.str(); 232 | } 233 | 234 | 235 | -------------------------------------------------------------------------------- /src/Utilities/Enums.cpp: -------------------------------------------------------------------------------- 1 | #include "Utilities/Enums.hpp" 2 | 3 | #include 4 | #include 5 | 6 | std::string to_string(FLUSHING_STRATEGY strategy) { 7 | std::map map = 8 | { 9 | {FLUSHING_STRATEGY::EARLIEST_POSSIBLE, "EARLIEST_POSSIBLE"}, 10 | {FLUSHING_STRATEGY::LATEST_POSSIBLE, "LATEST_POSSIBLE"} 11 | }; 12 | return map.at(strategy); 13 | } 14 | 15 | void from_string(const std::string &strategy, FLUSHING_STRATEGY &dest) { 16 | std::map map = 17 | { 18 | {"EARLIEST_POSSIBLE", FLUSHING_STRATEGY::EARLIEST_POSSIBLE}, 19 | {"LATEST_POSSIBLE", FLUSHING_STRATEGY::LATEST_POSSIBLE} 20 | }; 21 | dest = map.at(strategy); 22 | } 23 | 24 | std::string to_string(FENCING_STRATEGY strategy) { 25 | std::map map = 26 | { 27 | {FENCING_STRATEGY::LATEST_POSSIBLE, "LATEST_POSSIBLE"}, 28 | {FENCING_STRATEGY::EARLIEST_POSSIBLE, "EARLIEST_POSSIBLE"}, 29 | {FENCING_STRATEGY::OMIT_FENCING, "OMIT_FENCING"} 30 | }; 31 | return map.at(strategy); 32 | } 33 | 34 | void from_string(const std::string &strategy, FENCING_STRATEGY &dest) { 35 | std::map map = 36 | { 37 | {"LATEST_POSSIBLE", FENCING_STRATEGY::LATEST_POSSIBLE}, 38 | {"EARLIEST_POSSIBLE", FENCING_STRATEGY::EARLIEST_POSSIBLE}, 39 | {"OMIT_FENCING", FENCING_STRATEGY::OMIT_FENCING} 40 | }; 41 | dest = map.at(strategy); 42 | } 43 | 44 | [[maybe_unused]] std::pair get_valid_strategy_pair() { 45 | auto valid_strategies = get_valid_strategies(); 46 | auto num_strategies = valid_strategies.size(); 47 | std::random_device rd; 48 | std::mt19937 gen(rd()); 49 | auto strategy_idx = Range(0, num_strategies - 1).get_random_number(gen); 50 | return valid_strategies.at(strategy_idx); 51 | } 52 | 53 | std::vector> get_valid_strategies() { 54 | return std::vector>({ 55 | std::make_pair(FLUSHING_STRATEGY::EARLIEST_POSSIBLE, FENCING_STRATEGY::OMIT_FENCING), 56 | std::make_pair(FLUSHING_STRATEGY::EARLIEST_POSSIBLE, FENCING_STRATEGY::LATEST_POSSIBLE), 57 | std::make_pair(FLUSHING_STRATEGY::LATEST_POSSIBLE, FENCING_STRATEGY::LATEST_POSSIBLE), 58 | }); 59 | } 60 | -------------------------------------------------------------------------------- /src/Utilities/Logger.cpp: -------------------------------------------------------------------------------- 1 | #include "Utilities/Logger.hpp" 2 | 3 | #include 4 | #include 5 | 6 | // initialize the singleton instance 7 | Logger Logger::instance; /* NOLINT */ 8 | 9 | Logger::Logger() = default; 10 | 11 | void Logger::initialize() { 12 | instance.logfile = std::ofstream(); 13 | 14 | std::string logfile_filename = "stdout.log"; 15 | std::cout << "Writing into logfile " FF_BOLD << logfile_filename << F_RESET << std::endl; 16 | // we need to open the log file in append mode because the run_benchmark script writes values into it 17 | instance.logfile.open(logfile_filename, std::ios::out | std::ios::app); 18 | instance.timestamp_start = (unsigned long) time(nullptr); 19 | } 20 | 21 | void Logger::close() { 22 | instance.logfile << std::endl; 23 | instance.logfile.close(); 24 | } 25 | 26 | void Logger::log_info(const std::string &message, bool newline) { 27 | instance.logfile << FC_CYAN "[+] " << message; 28 | instance.logfile << F_RESET; 29 | if (newline) instance.logfile << "\n"; 30 | } 31 | 32 | void Logger::log_highlight(const std::string &message, bool newline) { 33 | instance.logfile << FC_MAGENTA << FF_BOLD << "[+] " << message; 34 | instance.logfile << F_RESET; 35 | if (newline) instance.logfile << "\n"; 36 | } 37 | 38 | void Logger::log_error(const std::string &message, bool newline) { 39 | instance.logfile << FC_RED "[-] " << message; 40 | instance.logfile << F_RESET; 41 | if (newline) instance.logfile << "\n"; 42 | } 43 | 44 | void Logger::log_data(const std::string &message, bool newline) { 45 | instance.logfile << message; 46 | if (newline) instance.logfile << "\n"; 47 | } 48 | 49 | void Logger::log_analysis_stage(const std::string &message, bool newline) { 50 | std::stringstream ss; 51 | ss << FC_CYAN_BRIGHT "████ " << message << " "; 52 | // this makes sure that all log analysis stage messages have the same length 53 | auto remaining_chars = 80-message.length(); 54 | while (remaining_chars--) ss << "█"; 55 | instance.logfile << ss.str(); 56 | instance.logfile << F_RESET; 57 | if (newline) instance.logfile << "\n"; 58 | } 59 | 60 | void Logger::log_debug(const std::string &message, bool newline) { 61 | #ifdef DEBUG 62 | instance.logfile << FC_YELLOW "[DEBUG] " << message; 63 | instance.logfile << F_RESET; 64 | if (newline) instance.logfile << std::endl; 65 | #else 66 | // this is just to ignore complaints of the compiler about unused params 67 | std::ignore = message; 68 | std::ignore = newline; 69 | #endif 70 | } 71 | 72 | std::string Logger::format_timestamp(unsigned long ts) { 73 | auto minutes = ts/60; 74 | auto hours = minutes/60; 75 | std::stringstream ss; 76 | ss << int(hours) << " hours " 77 | << int(minutes%60) << " minutes " 78 | << int(ts%60) << " seconds"; 79 | return ss.str(); 80 | } 81 | 82 | void Logger::log_timestamp() { 83 | std::stringstream ss; 84 | auto current_time = (unsigned long) time(nullptr); 85 | ss << "Time elapsed: " 86 | << format_timestamp(current_time - instance.timestamp_start) 87 | << "."; 88 | log_info(ss.str()); 89 | } 90 | 91 | void Logger::log_bitflip(volatile char *flipped_address, uint64_t row_no, unsigned char actual_value, 92 | unsigned char expected_value, unsigned long timestamp, bool newline) { 93 | instance.logfile << FC_GREEN 94 | << "[!] Flip " << std::hex << (void *) flipped_address << ", " 95 | << std::dec << "row " << row_no << ", " 96 | << "page offset: " << (uint64_t)flipped_address%(uint64_t)getpagesize() << ", " 97 | << "byte offset: " << (uint64_t)flipped_address%(uint64_t)8 << ", " 98 | << std::hex << "from " << (int) expected_value << " to " << (int) actual_value << ", " 99 | << std::dec << "detected after " << format_timestamp(timestamp - instance.timestamp_start) << "."; 100 | instance.logfile << F_RESET; 101 | if (newline) instance.logfile << "\n"; 102 | } 103 | 104 | void Logger::log_success(const std::string &message, bool newline) { 105 | instance.logfile << FC_GREEN << "[!] " << message; 106 | instance.logfile << F_RESET; 107 | if (newline) instance.logfile << "\n"; 108 | } 109 | 110 | void Logger::log_failure(const std::string &message, bool newline) { 111 | instance.logfile << FC_RED_BRIGHT << "[-] " << message; 112 | instance.logfile << F_RESET; 113 | if (newline) instance.logfile << "\n"; 114 | } 115 | 116 | void Logger::log_metadata(const char *commit_hash, unsigned long run_time_limit_seconds) { 117 | Logger::log_info("General information about this fuzzing run:"); 118 | 119 | char name[1024] = ""; 120 | gethostname(name, sizeof name); 121 | 122 | std::stringstream ss; 123 | ss << "Start timestamp:: " << instance.timestamp_start << "\n" 124 | << "Hostname: " << name << "\n" 125 | << "Commit SHA: " << commit_hash << "\n" 126 | << "Run time limit: " << run_time_limit_seconds << " (" << format_timestamp(run_time_limit_seconds) << ")"; 127 | Logger::log_data(ss.str()); 128 | 129 | log_global_defines(); 130 | } 131 | 132 | void Logger::log_global_defines() { 133 | Logger::log_info("Printing run configuration (GlobalDefines.hpp):"); 134 | std::stringstream ss; 135 | ss << "DRAMA_ROUNDS: " << DRAMA_ROUNDS << "\n" 136 | << "CACHELINE_SIZE: " << CACHELINE_SIZE << "\n" 137 | << "HAMMER_ROUNDS: " << HAMMER_ROUNDS << "\n" 138 | << "THRESH: " << THRESH << "\n" 139 | << "NUM_TARGETS: " << NUM_TARGETS << "\n" 140 | << "MAX_ROWS: " << MAX_ROWS << "\n" 141 | << "NUM_BANKS: " << NUM_BANKS << "\n" 142 | << "DIMM: " << DIMM << "\n" 143 | << "CHANNEL: " << CHANNEL << "\n" 144 | << "MEM_SIZE: " << MEM_SIZE << "\n" 145 | << "PAGE_SIZE: " << getpagesize() << std::endl; 146 | Logger::log_data(ss.str()); 147 | } 148 | --------------------------------------------------------------------------------