├── LICENSE
├── Makefile
├── README.md
├── example
├── Makefile
├── demo.c
├── fuzz.c
├── input.txt
└── sizes.c
├── install
├── common.sh
├── honggfuzz.sh
├── llvm.sh
├── patches
│ ├── README.md
│ ├── honggfuzz
│ │ ├── fuzz.c
│ │ ├── honggfuzz.h
│ │ ├── instrument.c
│ │ └── trace.c
│ └── llvm
│ │ ├── CMakeLists.txt
│ │ ├── X86.h
│ │ ├── X86SpeculativeLoadHardening.cpp
│ │ ├── X86TargetMachine.cpp
│ │ ├── asan_poisoning.cc
│ │ ├── asan_rtl.cc
│ │ └── sanitizer_coverage_libcdep_new.cc
└── wrapper.sh
├── postprocessing
├── __init__.py
├── aggregate_rlbk_stats.awk
├── analyzer.py
└── build_report.awk
├── src
├── SpecFuzzPass.cpp
├── specfuzz_cov.c
├── specfuzz_init.c
├── specfuzz_rtl.S
└── specfuzz_rtl.h
└── tests
├── Makefile
├── acceptance-basic.c
├── acceptance-mmul.c
├── analyzer_unit.py
├── common
├── header.S
└── process_state.S
├── dummy.c
├── rtl_chkp.S
├── rtl_chkp_rlbk.S
├── rtl_report.S
├── rtl_rlbk.S
└── run.bats
/LICENSE:
--------------------------------------------------------------------------------
1 | SpecFuzz: A tool to enable fuzzing for Spectre vulnerabilities
2 | Copyright (C) 2020 Oleksii Oleksenko
3 |
4 | This program is free software: you can redistribute it and/or modify
5 | it under the terms of the GNU General Public License as published by
6 | the Free Software Foundation, either version 3 of the License, or
7 | (at your option) any later version.
8 |
9 | This program is distributed in the hope that it will be useful,
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | GNU General Public License for more details.
13 |
14 | You should have received a copy of the GNU General Public License
15 | along with this program. If not, see .
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # Configuration
2 | ENABLE_PRIORITEZED_SIMULATION ?= 1
3 | ENABLE_PRINT ?= 1
4 | ENABLE_PRINT_OFFSET ?= 0
5 | ENABLE_COVERAGE ?= 1
6 | ENABLE_SANITY_CHECKS ?= 1
7 | ENABLE_STATS ?= 0
8 | ENABLE_SEQUENTIAL_SIMULATION ?= 0
9 | DUMP_COVERAGE_AT_EXIT ?= 0
10 | PRINT_ROLLABACK_STATS ?= 0
11 |
12 | RUNTIME_CONFIGURATION := -DMAX_NESTING_LEVEL=$(MAX_NESTING_LEVEL)\
13 | -DENABLE_PRIORITEZED_SIMULATION=$(ENABLE_PRIORITEZED_SIMULATION)\
14 | -DENABLE_PRINT=$(ENABLE_PRINT) -DENABLE_PRINT_OFFSET=$(ENABLE_PRINT_OFFSET)\
15 | -DENABLE_COVERAGE=$(ENABLE_COVERAGE) -DENABLE_SANITY_CHECKS=$(ENABLE_SANITY_CHECKS)\
16 | -DENABLE_STATS=$(ENABLE_STATS) -DENABLE_SEQUENTIAL_SIMULATION=$(ENABLE_SEQUENTIAL_SIMULATION)\
17 | -DDUMP_COVERAGE_AT_EXIT=$(DUMP_COVERAGE_AT_EXIT) -DPRINT_ROLLABACK_STATS=$(PRINT_ROLLABACK_STATS)
18 |
19 | # Paths
20 | LLVM_CONFIG ?= llvm-7.0.1-config
21 | LLVM_SRC := $(shell $(LLVM_CONFIG) --src-root)
22 | COMPILER_RT_SRC ?= $(LLVM_SRC)/tools/compiler-rt-7.0.1.src
23 | LLVM_BUILD := $(shell $(LLVM_CONFIG) --bindir)/..
24 | CLANG := $(shell $(LLVM_CONFIG) --bindir)/clang
25 | INSTALL_DIR := $(LLVM_SRC)/lib/Target/X86/
26 | export INSTALL_DIR
27 |
28 | # Files by categories
29 | RUNTIME := src/specfuzz_rtl.S src/specfuzz_init.c src/specfuzz_cov.c
30 | LLVM_PATCH := $(wildcard install/patches/llvm/*)
31 | HONGG_PATCH := $(wildcard install/patches/honggfuzz/*)
32 |
33 | # =============
34 | # Targets
35 | # =============
36 | all: pass runtime patch_llvm rebuild_llvm
37 | install: install_specfuzz install_tools
38 |
39 | pass: src/SpecFuzzPass.cpp
40 | cp $< $(INSTALL_DIR)/SpecFuzzPass.cpp
41 |
42 | runtime: $(RUNTIME)
43 | ${CLANG} -O3 src/specfuzz_init.c -o specfuzz_init.o -c -ggdb3 $(RUNTIME_CONFIGURATION)
44 | ${CLANG} -O3 src/specfuzz_rtl.S -o specfuzz_rtl.o -c -ggdb3 $(RUNTIME_CONFIGURATION)
45 | ${CLANG} -O3 src/specfuzz_cov.c -o specfuzz_cov.o -c -ggdb3 $(RUNTIME_CONFIGURATION)
46 | ar rc $(LLVM_BUILD)/lib/libspecfuzz.a specfuzz_init.o specfuzz_rtl.o specfuzz_cov.o
47 | rm specfuzz_rtl.o specfuzz_init.o
48 |
49 | patch_llvm: $(LLVM_PATCH)
50 | # Connect SpecFuzz
51 | cp install/patches/llvm/CMakeLists.txt install/patches/llvm/X86.h install/patches/llvm/X86TargetMachine.cpp $(LLVM_SRC)/lib/Target/X86/
52 |
53 | # ASan patch
54 | cp install/patches/llvm/asan_poisoning.cc install/patches/llvm/asan_rtl.cc $(COMPILER_RT_SRC)/lib/asan/
55 | cp install/patches/llvm/sanitizer_coverage_libcdep_new.cc $(COMPILER_RT_SRC)/lib/sanitizer_common/
56 |
57 | # SLH patch
58 | cp install/patches/llvm/X86SpeculativeLoadHardening.cpp $(LLVM_SRC)/lib/Target/X86/
59 |
60 | rebuild_llvm:
61 | make -j -C $(LLVM_BUILD)
62 |
63 | install_specfuzz:
64 | cp -u install/wrapper.sh /usr/bin/clang-sf
65 | cp -u install/wrapper.sh /usr/bin/clang-sf++
66 | sed -i -e 's:/clang$$:/clang++:g' /usr/bin/clang-sf++
67 |
68 | install_tools: analyzer hongg
69 |
70 | analyzer: postprocessing/analyzer.py
71 | cp $< /usr/bin/analyzer
72 |
73 | hongg: check_hongg_path patch_hongg rebuild_hongg
74 |
75 | check_hongg_path:
76 | ifndef HONGG_SRC
77 | $(error HONGG_SRC is not set)
78 | else
79 | @echo ""
80 | endif
81 |
82 | patch_hongg: $(HONGG_PATCH)
83 | cp install/patches/honggfuzz/instrument.c $(HONGG_SRC)/libhfuzz/instrument.c
84 | cp install/patches/honggfuzz/fuzz.c $(HONGG_SRC)/fuzz.c
85 | cp install/patches/honggfuzz/honggfuzz.h $(HONGG_SRC)/honggfuzz.h
86 | cp install/patches/honggfuzz/trace.c $(HONGG_SRC)/linux/trace.c
87 | sed -i -e 's:_HF_PERSISTENT_SIG:"":g' $(HONGG_SRC)/libhfuzz/fetch.c
88 |
89 | rebuild_hongg:
90 | CC=${CLANG} CFLAGS=-ggdb make -C $(HONGG_SRC) -j4
91 | make -C $(HONGG_SRC) install
92 |
93 | test:
94 | cd tests && ./run.bats
95 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # SpecFuzz
2 | A tool to enable fuzzing for Spectre vulnerabilities. See our [Technical Report](https://arxiv.org/abs/1905.10311) for details.
3 |
4 | # Have trouble using the tool? Open an issue!
5 | The tool is relatively new and you might have trouble when installing or using it. If so, do not hesitate to open an issue.
6 |
7 | # Getting started
8 |
9 | ## Dependencies
10 | * Python 3.6+: [Install Python](https://www.python.org/downloads/)
11 | * Cmake: [Install CMake](https://cmake.org/install/)
12 | * LLVM 7.0.1., built from sources:
13 | ```bash
14 | $ INSTALL_DIR=/llvm/installation/directory/ ./install/llvm.sh
15 | $ /llvm/installation/directory/clang -v
16 | clang version 7.0.1 (tags/RELEASE_701/final)
17 | ...
18 | ```
19 | * HonggFuzz, built from sources:
20 | ```bash
21 | $ apt-get install -y libbfd-dev libunwind8-dev binutils-dev libblocksruntime-dev
22 | $ INSTALL_DIR=/honggfuzz/installation/directory/ ./install/honggfuzz.sh
23 | $ honggfuzz
24 | Usage: honggfuzz [options] -- path_to_command [args]
25 | Options:
26 | ...
27 | ```
28 | ## Build it
29 | ```bash
30 | $ make
31 | $ export HONGG_SRC=/honggfuzz/installation/directory/src/
32 | $ make install
33 | $ make install_tools
34 | ```
35 | ## Try it
36 | Build a sample vulnerable program:
37 | ```bash
38 | $ cd example
39 | $ make sf
40 | clang-sf -fsanitize=address -O1 main.c -c -o main.sf.o
41 | clang-sf -fsanitize=address -O1 sizes.c -c -o sizes.sf.o
42 | clang-sf -fsanitize=address -O1 main.sf.o sizes.sf.o -o demo-sf
43 | ```
44 | Try running it:
45 | ```bash
46 | $ ./demo-sf 11
47 | [SF] Starting
48 | [SF], 1, 0x123, 0x456, 0, 0x789
49 | r = 0
50 | ```
51 | Here, the line `[SF], 1, 0x123, 0x456, 0, 0x52b519` means that SpecFuzz detected that the instruction
52 | at address `0x123` tried to access an invalid address `0x456`, and the speculation was triggered
53 | by a misprediction of a branch at the address `0x789`.
54 | ## Fuzz it
55 | Build a fuzzing driver:
56 | ```bash
57 | $ cd example
58 | $ export HONGG_SRC=/honggfuzz/installation/directory/src/
59 | $ make fuzz
60 | ```
61 | Fuzzing:
62 | ```bash
63 | $ honggfuzz --run_time 10 -Q -n 1 -f ./ -l fuzzing.log -- ./fuzz ___FILE___ 2>&1 | analyzer collect -r fuzzing.log -o results.json -b ./fuzz
64 | $ cat results.json # raw results of fuzzing
65 | {
66 | "errors": [],
67 | "statistics": {
68 | "coverage": [
69 | 75.0,
70 | 6
71 | ],
72 | "branches": 6,
73 | "faults": 1
74 | },
75 | "branches": {
76 | "5443896": {
77 | "address": "0x531138",
78 | "faults": [
79 | "0x530a48"
80 | ```
81 | Process the results:
82 | ```bash
83 | $ analyzer aggregate results.json -s $(llvm-7.0.1-config --bindir)/llvm-symbolizer -b ./fuzz -o aggregated.json
84 | ```
85 | The final, aggregated results are in `aggregated.json`.
86 |
87 | # Development
88 |
89 | ## Testing
90 | Tests depend on bats ([Install bats](https://github.com/sstephenson/bats/wiki/Install-Bats-Using-a-Package)).
91 | ```bash
92 | $ cd tests
93 | $ ./run.sh
94 | ```
95 |
--------------------------------------------------------------------------------
/example/Makefile:
--------------------------------------------------------------------------------
1 | NAME := demo
2 | SRC := demo sizes
3 |
4 | CC = clang
5 | CXX = clang++
6 | CFLAGS := -fsanitize=address -O3 -ggdb
7 |
8 | CCSF = clang-sf
9 | CXXSF = clang-sf++
10 | SF_CFLAGS := -fsanitize=address -O3 -ggdb --enable-coverage
11 | HONGGFUZZ_LDFLAGS := -L${HONGG_SRC}/libhfuzz/ -L${HONGG_SRC}/libhfcommon -lhfuzz -lhfcommon
12 |
13 | all: sf
14 |
15 | native: $(addsuffix .o, $(SRC))
16 | $(CC) $(CFLAGS) $? -o $(NAME)-native
17 |
18 | %.o: %.c
19 | $(CC) $(CFLAGS) $< -c -o $@
20 |
21 | sf: $(addsuffix .sf.o, $(SRC))
22 | $(CCSF) $(SF_CFLAGS) $? -o $(NAME)-sf
23 |
24 | %.sf.o: %.c
25 | $(CCSF) $(SF_CFLAGS) $< -c -o $@
26 |
27 | fuzz: fuzz.sf.o sizes.sf.o
28 | $(CCSF) $(HONGGFUZZ_LDFLAGS) $(SF_CFLAGS) $? -o fuzz
29 |
30 | clean:
31 | rm -f *.o $(NAME)-native $(NAME)-sf fuzz
--------------------------------------------------------------------------------
/example/demo.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 |
5 | extern size_t array1_size, array2_size;
6 | extern uint8_t temp, array2[], array1[];
7 |
8 | void victim_function(size_t x) {
9 | if (x < array1_size) {
10 | temp &= array2[array1[x] * 512];
11 | }
12 | }
13 |
14 | int main(int argc, char **argv) {
15 | if (argc != 2) {
16 | printf("USAGE: %s \n", argv[0]);
17 | exit(1);
18 | }
19 |
20 | int index = atoi(argv[1]);
21 | victim_function(index);
22 | printf("r = %d\n", temp);
23 | return 0;
24 | }
25 |
--------------------------------------------------------------------------------
/example/fuzz.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 |
5 | extern size_t array1_size, array2_size;
6 | extern uint8_t temp, array2[], array1[];
7 |
8 | void victim_function(size_t x) {
9 | if (x < array1_size) {
10 | temp &= array2[array1[x] * 512];
11 | }
12 | }
13 |
14 | int main(int argc, char **argv) {
15 | if (argc != 2) {
16 | printf("USAGE: %s \n", argv[0]);
17 | exit(1);
18 | }
19 |
20 | FILE *f = fopen(argv[1], "r");
21 | if (!f) {
22 | fprintf(stderr, "Failed to open input file.");
23 | exit(1);
24 | }
25 |
26 | char value[1024];
27 | fscanf(f, " %1023s", value);
28 | if (ferror(f)) {
29 | fclose(f);
30 | fprintf(stderr, "Failed read input file.");
31 | exit(1);
32 | }
33 |
34 | int index = atoi(value);
35 | victim_function(index);
36 | printf("r = %d\n", temp);
37 | return 0;
38 | }
39 |
--------------------------------------------------------------------------------
/example/input.txt:
--------------------------------------------------------------------------------
1 | 1
--------------------------------------------------------------------------------
/example/sizes.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | size_t array1_size = 10;
5 | size_t array2_size = 10;
6 |
7 | uint8_t array1[10] = {0};
8 | uint8_t array2[10] = {0};
9 | uint8_t temp = 0;
--------------------------------------------------------------------------------
/install/common.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # == Prepare a safe scripting environment ==
4 | set -euo pipefail
5 | IFS=$'\n\t'
6 |
7 | # == Define common functions ==
8 | function required_str {
9 | if [ -z $1 ]; then
10 | echo "The string argument is empty!"
11 | exit 1
12 | fi
13 | }
14 |
15 | # Download a tar archive from URL $1
16 | # and unpack it to $2
17 | # Set $3 to 1 to skip the uppermost directory of the archive. Otherwise, set to 0 or skip
18 | function download_and_untar {
19 | local url=$1 ;
20 | if [ -z ${url} ]; then
21 | echo "The string argument is empty!"
22 | exit 1
23 | fi
24 | local unpack_path=$2 ;
25 | if [ -z ${unpack_path} ]; then
26 | echo "The string argument is empty!"
27 | exit 1
28 | fi
29 | local strip=${3:-0} ;
30 |
31 | if [ -d ${unpack_path} ] && [ -n "$(ls -A ${unpack_path})" ]; then
32 | echo "The directory ${unpack_path} already exist."
33 | while true; do
34 | read -rp "Do you wish to reinstall ${unpack_path} [Yn]?" yn
35 | case $yn in
36 | [Yy]* ) rm -rf ${unpack_path}; break;;
37 | [Nn]* ) echo "Skip"; return;;
38 | * ) echo "Please answer 'y' or 'n'.";;
39 | esac
40 | done
41 | fi
42 |
43 | wget -N -O tmp.tar ${url}
44 | mkdir -p ${unpack_path}
45 | tar xf tmp.tar -C ${unpack_path} --strip-components=${strip}
46 | rm tmp.tar
47 | }
48 |
49 | # Clone a git repo from URL $1
50 | # to directory $2
51 | # Optionally, checkout $3
52 | # Optionally, apply path $4
53 | function clone_git_repo {
54 | local url=$1 ; required_str ${url}
55 | local path=$2 ; required_str ${path}
56 | local checkout=$3
57 | local applypatch=$4
58 |
59 | if [ -d ${path} ] && [ -n "$(ls -A ${path})" ]; then
60 | echo "The directory ${path} already exist."
61 | while true; do
62 | read -rp "Do you wish to reinstall ${path} [Yn]?" yn
63 | case $yn in
64 | [Yy]* ) rm -rf ${path}; break;;
65 | [Nn]* ) echo "Skip"; return;;
66 | * ) echo "Please answer 'y' or 'n'.";;
67 | esac
68 | done
69 | fi
70 |
71 | set +e
72 | git clone ${url} ${path}
73 | set -e
74 |
75 | pushd ${path}
76 | if [ -n "${checkout}" ]; then
77 | git checkout ${checkout}
78 | fi
79 | if [ -n "${applypatch}" ]; then
80 | git apply ${applypatch}
81 | fi
82 | popd
83 | }
84 |
--------------------------------------------------------------------------------
/install/honggfuzz.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | echo "Installing HonggFuzz..."
4 | if [ -z ${INSTALL_DIR} ] ; then echo "Env. variable INSTALL_DIR must be set!" ; exit 1; fi
5 | # shellcheck source=common.sh
6 | source "$(dirname "$0")"/common.sh
7 |
8 | NAME="honggfuzz"
9 | VERSION="589a9fb92"
10 |
11 | WORK_DIR="${INSTALL_DIR}/${NAME}-${VERSION}" # the directory where we link the sources and build them
12 | SRC_DIR="${WORK_DIR}/src"
13 | BUILD_DIR="${WORK_DIR}/build"
14 |
15 | mkdir -p ${WORK_DIR}
16 |
17 | # download
18 | clone_git_repo https://github.com/google/honggfuzz.git ${SRC_DIR} ${VERSION} ""
19 |
20 | # configure
21 | mkdir -p ${BUILD_DIR}
22 | cd ${SRC_DIR} || exit
23 |
24 | # install
25 | make -j8
26 | make -j8 install
27 |
28 | echo "HonggFuzz is installed"
29 |
--------------------------------------------------------------------------------
/install/llvm.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | echo "Installing LLVM..."
4 | if [ -z ${INSTALL_DIR} ] ; then echo "Env. variable INSTALL_DIR must be set!" ; exit 1; fi
5 | # shellcheck source=common.sh
6 | source "$(dirname "$0")"/common.sh
7 |
8 | # == Defaults ==
9 | NAME=${NAME:-"llvm"}
10 | VERSION=${VERSION:-"7.0.1"}
11 | CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-"Debug"}
12 |
13 | # ============
14 | # LLVM
15 | # ============
16 | WORK_DIR="${INSTALL_DIR}/${NAME}-${VERSION}"
17 | SRC_DIR="${WORK_DIR}/src"
18 | BUILD_DIR="${WORK_DIR}/build"
19 |
20 | mkdir -p ${WORK_DIR}
21 |
22 | # download
23 | download_and_untar http://llvm.org/releases/${VERSION}/llvm-${VERSION}.src.tar.xz ${SRC_DIR} 1
24 |
25 | # configure
26 | mkdir -p ${BUILD_DIR}
27 | cd ${BUILD_DIR} || exit
28 | cmake -G "Unix Makefiles" -DBUILD_SHARED_LIBS=ON -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DLLVM_TARGETS_TO_BUILD="X86" -DCMAKE_INSTALL_PREFIX=${BUILD_DIR} ../src
29 |
30 | # install
31 | make -j8
32 | make -j8 install
33 |
34 | # ============
35 | # CLang
36 | # ============
37 | CLANG_DIR="${SRC_DIR}/tools/cfe-${VERSION}.src"
38 | RT_DIR="${SRC_DIR}/tools/compiler-rt-${VERSION}.src"
39 |
40 | # download
41 | download_and_untar http://llvm.org/releases/${VERSION}/cfe-${VERSION}.src.tar.xz ${CLANG_DIR} 1
42 | download_and_untar http://llvm.org/releases/${VERSION}/compiler-rt-${VERSION}.src.tar.xz ${RT_DIR} 1
43 |
44 | # configure
45 | mkdir -p ${BUILD_DIR}
46 | cd ${BUILD_DIR} || exit
47 | cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DLLVM_TARGETS_TO_BUILD="X86" -DCMAKE_INSTALL_PREFIX=${BUILD_DIR} ../src
48 |
49 | # install
50 | make -j8
51 | make -j8 install
52 | ln -sf ${BUILD_DIR}/bin/clang /usr/bin/clang
53 | ln -sf ${BUILD_DIR}/bin/clang++ /usr/bin/clang++
54 |
55 | # make the LLVM installation directory discoverable
56 | ln -sf ${BUILD_DIR}/bin/llvm-config /usr/bin/${NAME}-${VERSION}-config
57 |
58 | echo "LLVM installed"
59 |
--------------------------------------------------------------------------------
/install/patches/README.md:
--------------------------------------------------------------------------------
1 | This directory contains patches for tools that require tweaking to be used with SpecFuzz.
--------------------------------------------------------------------------------
/install/patches/honggfuzz/fuzz.c:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * honggfuzz - fuzzing routines
4 | * -----------------------------------------
5 | *
6 | * Authors: Robert Swiecki
7 | * Felix Gröbert
8 | *
9 | * Copyright 2010-2018 by Google Inc. All Rights Reserved.
10 | *
11 | * Licensed under the Apache License, Version 2.0 (the "License"); you may
12 | * not use this file except in compliance with the License. You may obtain
13 | * a copy of the License at
14 | *
15 | * http://www.apache.org/licenses/LICENSE-2.0
16 | *
17 | * Unless required by applicable law or agreed to in writing, software
18 | * distributed under the License is distributed on an "AS IS" BASIS,
19 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
20 | * implied. See the License for the specific language governing
21 | * permissions and limitations under the License.
22 | *
23 | */
24 |
25 | #include "fuzz.h"
26 |
27 | #include
28 | #include
29 | #include
30 | #include
31 | #include
32 | #include
33 | #include
34 | #include
35 | #include
36 | #include
37 | #include
38 | #include
39 | #include
40 | #include
41 | #include
42 | #include
43 | #include
44 | #include
45 |
46 | #include "arch.h"
47 | #include "honggfuzz.h"
48 | #include "input.h"
49 | #include "libhfcommon/common.h"
50 | #include "libhfcommon/files.h"
51 | #include "libhfcommon/log.h"
52 | #include "libhfcommon/util.h"
53 | #include "mangle.h"
54 | #include "report.h"
55 | #include "sanitizers.h"
56 | #include "socketfuzzer.h"
57 | #include "subproc.h"
58 |
59 | static time_t termTimeStamp = 0;
60 |
61 | bool fuzz_isTerminating(void) {
62 | if (ATOMIC_GET(termTimeStamp) != 0) {
63 | return true;
64 | }
65 | return false;
66 | }
67 |
68 | void fuzz_setTerminating(void) {
69 | if (ATOMIC_GET(termTimeStamp) != 0) {
70 | return;
71 | }
72 | ATOMIC_SET(termTimeStamp, time(NULL));
73 | }
74 |
75 | bool fuzz_shouldTerminate() {
76 | if (ATOMIC_GET(termTimeStamp) == 0) {
77 | return false;
78 | }
79 | if ((time(NULL) - ATOMIC_GET(termTimeStamp)) > 5) {
80 | return true;
81 | }
82 | return false;
83 | }
84 |
85 | static fuzzState_t fuzz_getState(honggfuzz_t* hfuzz) {
86 | return ATOMIC_GET(hfuzz->feedback.state);
87 | }
88 |
89 | static bool fuzz_writeCovFile(const char* dir, const uint8_t* data, size_t len) {
90 | char fname[PATH_MAX];
91 |
92 | uint64_t crc64f = util_CRC64(data, len);
93 | uint64_t crc64r = util_CRC64Rev(data, len);
94 | snprintf(fname, sizeof(fname), "%s/%016" PRIx64 "%016" PRIx64 ".%08" PRIx32 ".honggfuzz.cov",
95 | dir, crc64f, crc64r, (uint32_t)len);
96 |
97 | if (files_exists(fname)) {
98 | LOG_D("File '%s' already exists in the output corpus directory '%s'", fname, dir);
99 | return true;
100 | }
101 |
102 | LOG_D("Adding file '%s' to the corpus directory '%s'", fname, dir);
103 |
104 | if (!files_writeBufToFile(fname, data, len, O_WRONLY | O_CREAT | O_EXCL | O_CLOEXEC)) {
105 | LOG_W("Couldn't write buffer to file '%s'", fname);
106 | return false;
107 | }
108 |
109 | return true;
110 | }
111 |
112 | static void fuzz_addFileToFileQ(honggfuzz_t* hfuzz, const uint8_t* data, size_t len) {
113 | ATOMIC_SET(hfuzz->timing.lastCovUpdate, time(NULL));
114 |
115 | struct dynfile_t* dynfile = (struct dynfile_t*)util_Malloc(sizeof(struct dynfile_t));
116 | dynfile->size = len;
117 | dynfile->data = (uint8_t*)util_Malloc(len);
118 | memcpy(dynfile->data, data, len);
119 |
120 | MX_SCOPED_RWLOCK_WRITE(&hfuzz->io.dynfileq_mutex);
121 | TAILQ_INSERT_TAIL(&hfuzz->io.dynfileq, dynfile, pointers);
122 | hfuzz->io.dynfileqCnt++;
123 |
124 | if (hfuzz->socketFuzzer.enabled) {
125 | /* Don't add coverage data to files in socketFuzzer mode */
126 | return;
127 | }
128 |
129 | if (!fuzz_writeCovFile(hfuzz->io.covDirAll, data, len)) {
130 | LOG_E("Couldn't save the coverage data to '%s'", hfuzz->io.covDirAll);
131 | }
132 |
133 | /* No need to add files to the new coverage dir, if this is just the dry-run phase */
134 | if (fuzz_getState(hfuzz) == _HF_STATE_DYNAMIC_DRY_RUN || hfuzz->io.covDirNew == NULL) {
135 | return;
136 | }
137 |
138 | if (!fuzz_writeCovFile(hfuzz->io.covDirNew, data, len)) {
139 | LOG_E("Couldn't save the new coverage data to '%s'", hfuzz->io.covDirNew);
140 | }
141 | }
142 |
143 | static void fuzz_setDynamicMainState(run_t* run) {
144 | /* All threads need to indicate willingness to switch to the DYNAMIC_MAIN state. Count them! */
145 | static uint32_t cnt = 0;
146 | ATOMIC_PRE_INC(cnt);
147 |
148 | static pthread_mutex_t state_mutex = PTHREAD_MUTEX_INITIALIZER;
149 | MX_SCOPED_LOCK(&state_mutex);
150 |
151 | if (fuzz_getState(run->global) == _HF_STATE_DYNAMIC_MAIN) {
152 | return;
153 | }
154 |
155 | LOG_I("Entering phase 2/3: Switching to Dynamic Main (Feedback Driven Mode)");
156 | ATOMIC_SET(run->global->feedback.state, _HF_STATE_DYNAMIC_SWITCH_TO_MAIN);
157 |
158 | for (;;) {
159 | /* Check if all threads have already reported in for changing state */
160 | if (ATOMIC_GET(cnt) == run->global->threads.threadsMax) {
161 | break;
162 | }
163 | if (fuzz_isTerminating()) {
164 | return;
165 | }
166 | util_sleepForMSec(10); /* Check every 10ms */
167 | }
168 |
169 | LOG_I("Entering phase 3/3: Dynamic Main (Feedback Driven Mode)");
170 | snprintf(run->origFileName, sizeof(run->origFileName), "[DYNAMIC]");
171 | ATOMIC_SET(run->global->feedback.state, _HF_STATE_DYNAMIC_MAIN);
172 |
173 | /*
174 | * If the initial fuzzing yielded no useful coverage, just add a single 1-byte file to the
175 | * dynamic corpus, so the dynamic phase doesn't fail because of lack of useful inputs
176 | */
177 | if (run->global->io.dynfileqCnt == 0) {
178 | const char* single_byte = run->global->cfg.only_printable ? " " : "\0";
179 | fuzz_addFileToFileQ(run->global, (const uint8_t*)single_byte, 1U);
180 | }
181 | }
182 |
183 | static void fuzz_perfFeedback(run_t* run) {
184 | if (run->global->feedback.skipFeedbackOnTimeout && run->tmOutSignaled) {
185 | return;
186 | }
187 |
188 | LOG_D("New file size: %zu, Perf feedback new/cur (instr,branch): %" PRIu64 "/%" PRIu64
189 | "/%" PRIu64 "/%" PRIu64 ", BBcnt new/total: %" PRIu64 "/%" PRIu64,
190 | run->dynamicFileSz, run->linux.hwCnts.cpuInstrCnt, run->global->linux.hwCnts.cpuInstrCnt,
191 | run->linux.hwCnts.cpuBranchCnt, run->global->linux.hwCnts.cpuBranchCnt,
192 | run->linux.hwCnts.newBBCnt, run->global->linux.hwCnts.bbCnt);
193 |
194 | MX_SCOPED_LOCK(&run->global->feedback.feedback_mutex);
195 | defer {
196 | wmb();
197 | };
198 |
199 | uint64_t softCntPc = 0;
200 | uint64_t softCntEdge = 0;
201 | uint64_t softCntCmp = 0;
202 | if (run->global->feedback.bbFd != -1) {
203 | softCntPc = ATOMIC_GET(run->global->feedback.feedbackMap->pidFeedbackPc[run->fuzzNo]);
204 | ATOMIC_CLEAR(run->global->feedback.feedbackMap->pidFeedbackPc[run->fuzzNo]);
205 | softCntEdge = ATOMIC_GET(run->global->feedback.feedbackMap->pidFeedbackEdge[run->fuzzNo]);
206 | ATOMIC_CLEAR(run->global->feedback.feedbackMap->pidFeedbackEdge[run->fuzzNo]);
207 | softCntCmp = ATOMIC_GET(run->global->feedback.feedbackMap->pidFeedbackCmp[run->fuzzNo]);
208 | ATOMIC_CLEAR(run->global->feedback.feedbackMap->pidFeedbackCmp[run->fuzzNo]);
209 | }
210 |
211 | int64_t diff0 = run->global->linux.hwCnts.cpuInstrCnt - run->linux.hwCnts.cpuInstrCnt;
212 | int64_t diff1 = run->global->linux.hwCnts.cpuBranchCnt - run->linux.hwCnts.cpuBranchCnt;
213 |
214 | /* Any increase in coverage (edge, pc, cmp, hw) counters forces adding input to the corpus */
215 | if (run->linux.hwCnts.newBBCnt > 0 || softCntPc > 0 || softCntEdge > 0 || softCntCmp > 0 ||
216 | diff0 < 0 || diff1 < 0) {
217 | if (diff0 < 0) {
218 | run->global->linux.hwCnts.cpuInstrCnt = run->linux.hwCnts.cpuInstrCnt;
219 | }
220 | if (diff1 < 0) {
221 | run->global->linux.hwCnts.cpuBranchCnt = run->linux.hwCnts.cpuBranchCnt;
222 | }
223 | run->global->linux.hwCnts.bbCnt += run->linux.hwCnts.newBBCnt;
224 | run->global->linux.hwCnts.softCntPc += softCntPc;
225 | run->global->linux.hwCnts.softCntEdge += softCntEdge;
226 | run->global->linux.hwCnts.softCntCmp += softCntCmp;
227 |
228 | LOG_I("Size:%zu (i,b,hw,edge,ip,cmp): %" PRIu64 "/%" PRIu64 "/%" PRIu64 "/%" PRIu64
229 | "/%" PRIu64 "/%" PRIu64 ", Tot:%" PRIu64 "/%" PRIu64 "/%" PRIu64 "/%" PRIu64
230 | "/%" PRIu64 "/%" PRIu64,
231 | run->dynamicFileSz, run->linux.hwCnts.cpuInstrCnt, run->linux.hwCnts.cpuBranchCnt,
232 | run->linux.hwCnts.newBBCnt, softCntEdge, softCntPc, softCntCmp,
233 | run->global->linux.hwCnts.cpuInstrCnt, run->global->linux.hwCnts.cpuBranchCnt,
234 | run->global->linux.hwCnts.bbCnt, run->global->linux.hwCnts.softCntEdge,
235 | run->global->linux.hwCnts.softCntPc, run->global->linux.hwCnts.softCntCmp);
236 |
237 | fuzz_addFileToFileQ(run->global, run->dynamicFile, run->dynamicFileSz);
238 |
239 | if (run->global->socketFuzzer.enabled) {
240 | LOG_D("SocketFuzzer: fuzz: new BB (perf)");
241 | fuzz_notifySocketFuzzerNewCov(run->global);
242 | }
243 | }
244 | }
245 |
246 | /* Return value indicates whether report file should be updated with the current verified crash */
247 | static bool fuzz_runVerifier(run_t* run) {
248 | if (!run->crashFileName[0] || !run->backtrace) {
249 | return false;
250 | }
251 |
252 | uint64_t backtrace = run->backtrace;
253 |
254 | char origCrashPath[PATH_MAX];
255 | snprintf(origCrashPath, sizeof(origCrashPath), "%s", run->crashFileName);
256 | /* Workspace is inherited, just append a extra suffix */
257 | char verFile[PATH_MAX];
258 | snprintf(verFile, sizeof(verFile), "%s.verified", origCrashPath);
259 |
260 | if (files_exists(verFile)) {
261 | LOG_D("Crash file to verify '%s' is already verified as '%s'", origCrashPath, verFile);
262 | return false;
263 | }
264 |
265 | for (int i = 0; i < _HF_VERIFIER_ITER; i++) {
266 | LOG_I("Launching verifier for HASH: %" PRIx64 " (iteration: %d out of %d)", run->backtrace,
267 | i + 1, _HF_VERIFIER_ITER);
268 | run->timeStartedMillis = 0;
269 | run->backtrace = 0;
270 | run->access = 0;
271 | run->exception = 0;
272 | run->mainWorker = false;
273 |
274 | if (!subproc_Run(run)) {
275 | LOG_F("subproc_Run()");
276 | }
277 |
278 | /* If stack hash doesn't match skip name tag and exit */
279 | if (run->backtrace != backtrace) {
280 | LOG_E("Verifier stack mismatch: (original) %" PRIx64 " != (new) %" PRIx64, backtrace,
281 | run->backtrace);
282 | run->backtrace = backtrace;
283 | return true;
284 | }
285 |
286 | LOG_I("Verifier for HASH: %" PRIx64 " (iteration: %d, left: %d). MATCH!", run->backtrace,
287 | i + 1, _HF_VERIFIER_ITER - i - 1);
288 | }
289 |
290 | /* Copy file with new suffix & remove original copy */
291 | int fd = TEMP_FAILURE_RETRY(open(verFile, O_CREAT | O_EXCL | O_WRONLY, 0600));
292 | if (fd == -1 && errno == EEXIST) {
293 | LOG_I("It seems that '%s' already exists, skipping", verFile);
294 | return false;
295 | }
296 | if (fd == -1) {
297 | PLOG_E("Couldn't create '%s'", verFile);
298 | return true;
299 | }
300 | defer {
301 | close(fd);
302 | };
303 | if (!files_writeToFd(fd, run->dynamicFile, run->dynamicFileSz)) {
304 | LOG_E("Couldn't save verified file as '%s'", verFile);
305 | unlink(verFile);
306 | return true;
307 | }
308 |
309 | LOG_I("Verified crash for HASH: %" PRIx64 " and saved it as '%s'", backtrace, verFile);
310 | ATOMIC_PRE_INC(run->global->cnts.verifiedCrashesCnt);
311 |
312 | return true;
313 | }
314 |
315 | static bool fuzz_fetchInput(run_t* run) {
316 | {
317 | fuzzState_t st = fuzz_getState(run->global);
318 | if (st == _HF_STATE_DYNAMIC_DRY_RUN || st == _HF_STATE_DYNAMIC_SWITCH_TO_MAIN) {
319 | run->mutationsPerRun = 0U;
320 | if (input_prepareStaticFile(run, /* rewind= */ false, true)) {
321 | return true;
322 | }
323 | fuzz_setDynamicMainState(run);
324 | run->mutationsPerRun = run->global->mutate.mutationsPerRun;
325 | }
326 | }
327 |
328 | if (fuzz_getState(run->global) == _HF_STATE_DYNAMIC_MAIN) {
329 | if (run->global->exe.externalCommand) {
330 | if (!input_prepareExternalFile(run)) {
331 | LOG_E("input_prepareFileExternally() failed");
332 | return false;
333 | }
334 | } else if (run->global->exe.feedbackMutateCommand) {
335 | if (!input_prepareDynamicInput(run, false)) {
336 | LOG_E("input_prepareFileDynamically() failed");
337 | return false;
338 | }
339 | } else if (!input_prepareDynamicInput(run, true)) {
340 | LOG_E("input_prepareFileDynamically() failed");
341 | return false;
342 | }
343 | }
344 |
345 | if (fuzz_getState(run->global) == _HF_STATE_STATIC) {
346 | if (run->global->exe.externalCommand) {
347 | if (!input_prepareExternalFile(run)) {
348 | LOG_E("input_prepareFileExternally() failed");
349 | return false;
350 | }
351 | } else if (run->global->exe.feedbackMutateCommand) {
352 | if (!input_prepareStaticFile(run, true, false)) {
353 | LOG_E("input_prepareFileDynamically() failed");
354 | return false;
355 | }
356 | } else if (!input_prepareStaticFile(run, true /* rewind */, true)) {
357 | LOG_E("input_prepareFile() failed");
358 | return false;
359 | }
360 | }
361 |
362 | if (run->global->exe.postExternalCommand && !input_postProcessFile(run)) {
363 | LOG_E("input_postProcessFile() failed");
364 | return false;
365 | }
366 |
367 | if (run->global->exe.feedbackMutateCommand && !input_feedbackMutateFile(run)) {
368 | LOG_E("input_feedbackMutateFile() failed");
369 | return false;
370 | }
371 |
372 | return true;
373 | }
374 |
375 | static void fuzz_fuzzLoop(run_t* run) {
376 | run->timeStartedMillis = 0;
377 | run->crashFileName[0] = '\0';
378 | run->pc = 0;
379 | run->backtrace = 0;
380 | run->access = 0;
381 | run->exception = 0;
382 | run->report[0] = '\0';
383 | run->mainWorker = true;
384 | run->mutationsPerRun = run->global->mutate.mutationsPerRun;
385 | run->dynamicFileSz = 0;
386 | run->dynamicFileCopyFd = -1;
387 | run->tmOutSignaled = false;
388 |
389 | run->linux.hwCnts.cpuInstrCnt = 0;
390 | run->linux.hwCnts.cpuBranchCnt = 0;
391 | run->linux.hwCnts.bbCnt = 0;
392 | run->linux.hwCnts.newBBCnt = 0;
393 |
394 | if (!fuzz_fetchInput(run)) {
395 | LOG_F("Cound't prepare input for fuzzing");
396 | }
397 | if (!subproc_Run(run)) {
398 | LOG_F("Couldn't run fuzzed command");
399 | }
400 |
401 | if (run->global->feedback.dynFileMethod != _HF_DYNFILE_NONE) {
402 | fuzz_perfFeedback(run);
403 | }
404 | if (run->global->cfg.useVerifier && !fuzz_runVerifier(run)) {
405 | return;
406 | }
407 | report_Report(run);
408 | }
409 |
410 | static void fuzz_fuzzLoopSocket(run_t* run) {
411 | run->timeStartedMillis = 0;
412 | run->crashFileName[0] = '\0';
413 | run->pc = 0;
414 | run->backtrace = 0;
415 | run->access = 0;
416 | run->exception = 0;
417 | run->report[0] = '\0';
418 | run->mainWorker = true;
419 | run->mutationsPerRun = run->global->mutate.mutationsPerRun;
420 | run->dynamicFileSz = 0;
421 | run->dynamicFileCopyFd = -1;
422 | run->tmOutSignaled = false;
423 |
424 | run->linux.hwCnts.cpuInstrCnt = 0;
425 | run->linux.hwCnts.cpuBranchCnt = 0;
426 | run->linux.hwCnts.bbCnt = 0;
427 | run->linux.hwCnts.newBBCnt = 0;
428 |
429 | LOG_I("------------------------------------------------------");
430 |
431 | /* First iteration: Start target
432 | Other iterations: re-start target, if necessary
433 | subproc_Run() will decide by itself if a restart is necessary, via
434 | subproc_New()
435 | */
436 | LOG_D("------[ 1: subproc_run");
437 | if (!subproc_Run(run)) {
438 | LOG_W("Couldn't run server");
439 | }
440 |
441 | /* Tell the external fuzzer to send data to target
442 | The fuzzer will notify us when finished; block until then.
443 | */
444 | LOG_D("------[ 2: fetch input");
445 | if (!fuzz_waitForExternalInput(run)) {
446 | /* Fuzzer could not connect to target, and told us to
447 | restart it. Do it on the next iteration.
448 | or: it crashed by fuzzing. Restart it too.
449 | */
450 | LOG_D("------[ 2.1: Target down, will restart it");
451 | run->pid = 0; // make subproc_Run() restart it on next iteration
452 | return;
453 | }
454 |
455 | LOG_D("------[ 3: feedback");
456 | if (run->global->feedback.dynFileMethod != _HF_DYNFILE_NONE) {
457 | fuzz_perfFeedback(run);
458 | }
459 | if (run->global->cfg.useVerifier && !fuzz_runVerifier(run)) {
460 | return;
461 | }
462 |
463 | report_Report(run);
464 | }
465 |
466 | static void* fuzz_threadNew(void* arg) {
467 | honggfuzz_t* hfuzz = (honggfuzz_t*)arg;
468 | unsigned int fuzzNo = ATOMIC_POST_INC(hfuzz->threads.threadsActiveCnt);
469 | LOG_I("Launched new fuzzing thread, no. #%" PRId32, fuzzNo);
470 |
471 | run_t run = {
472 | .global = hfuzz,
473 | .pid = 0,
474 | .dynfileqCurrent = NULL,
475 | .dynamicFile = NULL,
476 | .dynamicFileFd = -1,
477 | .fuzzNo = fuzzNo,
478 | .persistentSock = -1,
479 | .tmOutSignaled = false,
480 | .origFileName = "[DYNAMIC]",
481 | };
482 |
483 | /* Do not try to handle input files with socketfuzzer */
484 | if (!hfuzz->socketFuzzer.enabled) {
485 | if (!(run.dynamicFile = files_mapSharedMem(hfuzz->mutate.maxFileSz, &run.dynamicFileFd,
486 | "hfuzz-input", run.global->io.workDir))) {
487 | LOG_F("Couldn't create an input file of size: %zu", hfuzz->mutate.maxFileSz);
488 | }
489 | }
490 | defer {
491 | if (run.dynamicFileFd != -1) {
492 | close(run.dynamicFileFd);
493 | }
494 | };
495 |
496 | if (!arch_archThreadInit(&run)) {
497 | LOG_F("Could not initialize the thread");
498 | }
499 |
500 | for (;;) {
501 | /* Check if dry run mode with verifier enabled */
502 | if (run.global->mutate.mutationsPerRun == 0U && run.global->cfg.useVerifier &&
503 | !hfuzz->socketFuzzer.enabled) {
504 | if (ATOMIC_POST_INC(run.global->cnts.mutationsCnt) >= run.global->io.fileCnt) {
505 | break;
506 | }
507 | }
508 | /* Check for max iterations limit if set */
509 | else if ((ATOMIC_POST_INC(run.global->cnts.mutationsCnt) >=
510 | run.global->mutate.mutationsMax) &&
511 | run.global->mutate.mutationsMax) {
512 | break;
513 | }
514 |
515 | if (hfuzz->socketFuzzer.enabled) {
516 | fuzz_fuzzLoopSocket(&run);
517 | } else {
518 | input_setSize(&run, run.global->mutate.maxFileSz);
519 | fuzz_fuzzLoop(&run);
520 | }
521 |
522 | if (fuzz_isTerminating()) {
523 | break;
524 | }
525 |
526 | if (run.global->cfg.exitUponCrash && ATOMIC_GET(run.global->cnts.crashesCnt) > 0) {
527 | LOG_I("Seen a crash. Terminating all fuzzing threads");
528 | fuzz_setTerminating();
529 | break;
530 | }
531 | }
532 |
533 | if (run.pid) {
534 | kill(run.pid, SIGKILL);
535 | }
536 |
537 | /* Report SpecFuzz coverage */
538 | if (fuzzNo == 0) {
539 | LOG_I("Coverage:");
540 | map_entry_t *coverage_map = run.global->feedback.feedbackMap->cmpMapPc;
541 | for (int i = 0; i < (int) COVERAGE_MAP_HASHMAP_SIZE ; i++) {
542 | map_entry_t entry = coverage_map[i];
543 | if (entry.count == 0)
544 | continue;
545 | uint64_t address = (entry.tag << COVERAGE_INDEX_WIDTH) + i;
546 | LOG_I("[SF], 0x%lx: %d", address, entry.count);
547 | }
548 | map_entry_t *coverage_map_conflicts = &coverage_map[COVERAGE_MAP_HASHMAP_SIZE];
549 | for (int i = 0; i < (int) COVERAGE_MAP_CONFLICTS_SIZE ; i++) {
550 | map_entry_t entry = coverage_map_conflicts[i];
551 | if (entry.count == 0)
552 | continue;
553 | uint64_t address = (entry.tag << COVERAGE_INDEX_WIDTH) + i;
554 | LOG_I("[SF], 0x%lx: %d", address, entry.count);
555 | }
556 | }
557 | LOG_I("Terminating thread no. #%" PRId32 ", left: %zu", fuzzNo,
558 | hfuzz->threads.threadsMax - ATOMIC_GET(run.global->threads.threadsFinished));
559 | ATOMIC_POST_INC(run.global->threads.threadsFinished);
560 | return NULL;
561 | }
562 |
563 | void fuzz_threadsStart(honggfuzz_t* hfuzz) {
564 | if (!arch_archInit(hfuzz)) {
565 | LOG_F("Couldn't prepare arch for fuzzing");
566 | }
567 | if (!sanitizers_Init(hfuzz)) {
568 | LOG_F("Couldn't prepare sanitizer options");
569 | }
570 |
571 | if (hfuzz->socketFuzzer.enabled) {
572 | /* Don't do dry run with socketFuzzer */
573 | LOG_I("Entering phase - Feedback Driven Mode (SocketFuzzer)");
574 | hfuzz->feedback.state = _HF_STATE_DYNAMIC_MAIN;
575 | } else if (hfuzz->feedback.dynFileMethod != _HF_DYNFILE_NONE) {
576 | LOG_I("Entering phase 1/3: Dry Run");
577 | hfuzz->feedback.state = _HF_STATE_DYNAMIC_DRY_RUN;
578 | } else {
579 | LOG_I("Entering phase: Static");
580 | hfuzz->feedback.state = _HF_STATE_STATIC;
581 | }
582 |
583 | for (size_t i = 0; i < hfuzz->threads.threadsMax; i++) {
584 | if (!subproc_runThread(
585 | hfuzz, &hfuzz->threads.threads[i], fuzz_threadNew, /* joinable= */ true)) {
586 | PLOG_F("Couldn't run a thread #%zu", i);
587 | }
588 | }
589 | }
590 |
--------------------------------------------------------------------------------
/install/patches/honggfuzz/honggfuzz.h:
--------------------------------------------------------------------------------
1 | /*
2 | *
3 | * honggfuzz - core structures and macros
4 | * -----------------------------------------
5 | *
6 | * Author: Robert Swiecki
7 | *
8 | * Copyright 2010-2018 by Google Inc. All Rights Reserved.
9 | *
10 | * Licensed under the Apache License, Version 2.0 (the "License"); you may
11 | * not use this file except in compliance with the License. You may obtain
12 | * a copy of the License at
13 | *
14 | * http://www.apache.org/licenses/LICENSE-2.0
15 | *
16 | * Unless required by applicable law or agreed to in writing, software
17 | * distributed under the License is distributed on an "AS IS" BASIS,
18 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
19 | * implied. See the License for the specific language governing
20 | * permissions and limitations under the License.
21 | *
22 | */
23 |
24 | #ifndef _HF_HONGGFUZZ_H_
25 | #define _HF_HONGGFUZZ_H_
26 |
27 | #include
28 | #include
29 | #include
30 | #include
31 | #include
32 | #include
33 | #include
34 | #include
35 | #include
36 | #include
37 |
38 | #include "libhfcommon/util.h"
39 |
40 | #define PROG_NAME "honggfuzz"
41 | #define PROG_VERSION "1.9"
42 |
43 | /* Name of the template which will be replaced with the proper name of the file */
44 | #define _HF_FILE_PLACEHOLDER "___FILE___"
45 |
46 | /* Default name of the report created with some architectures */
47 | #define _HF_REPORT_FILE "HONGGFUZZ.REPORT.TXT"
48 |
49 | /* Default stack-size of created threads. */
50 | #define _HF_PTHREAD_STACKSIZE (1024ULL * 1024ULL * 2ULL) /* 2MB */
51 |
52 | /* Name of envvar which indicates sequential number of fuzzer */
53 | #define _HF_THREAD_NO_ENV "HFUZZ_THREAD_NO"
54 |
55 | /* Name of envvar which indicates that the netDriver should be used */
56 | #define _HF_THREAD_NETDRIVER_ENV "HFUZZ_USE_NETDRIVER"
57 |
58 | /* Name of envvar which indicates honggfuzz's log level in use */
59 | #define _HF_LOG_LEVEL_ENV "HFUZZ_LOG_LEVEL"
60 |
61 | /* Number of crash verifier iterations before tag crash as stable */
62 | #define _HF_VERIFIER_ITER 5
63 |
64 | /* Size (in bytes) for report data to be stored in stack before written to file */
65 | #define _HF_REPORT_SIZE 8192
66 |
67 | /* Perf bitmap size */
68 | #define _HF_PERF_BITMAP_SIZE_16M (1024U * 1024U * 16U)
69 | #define _HF_PERF_BITMAP_BITSZ_MASK 0x7FFFFFFULL
70 | /* Maximum number of PC guards (=trace-pc-guard) we support */
71 | #define _HF_PC_GUARD_MAX (1024ULL * 1024ULL * 64ULL)
72 |
73 | /* Maximum size of the input file in bytes (128 MiB) */
74 | #define _HF_INPUT_MAX_SIZE (1024ULL * 1024ULL * 128ULL)
75 |
76 | /* FD used to log inside the child process */
77 | #define _HF_LOG_FD 1020
78 | /* FD used to represent the input file */
79 | #define _HF_INPUT_FD 1021
80 | /* FD used to pass feedback bitmap a process */
81 | #define _HF_BITMAP_FD 1022
82 | /* FD used to pass data to a persistent process */
83 | #define _HF_PERSISTENT_FD 1023
84 |
85 | /* Message indicating that the fuzzed process is ready for new data */
86 | static const uint8_t HFReadyTag = 'R';
87 |
88 | /* Maximum number of active fuzzing threads */
89 | #define _HF_THREAD_MAX 1024U
90 |
91 | /* Persistent-binary signature - if found within file, it means it's a persistent mode binary */
92 | #define _HF_PERSISTENT_SIG "\x01_LIBHFUZZ_PERSISTENT_BINARY_SIGNATURE_\x02\xFF"
93 | /* HF NetDriver signature - if found within file, it means it's a NetDriver-based binary */
94 | #define _HF_NETDRIVER_SIG "\x01_LIBHFUZZ_NETDRIVER_BINARY_SIGNATURE_\x02\xFF"
95 |
96 | typedef enum {
97 | _HF_DYNFILE_NONE = 0x0,
98 | _HF_DYNFILE_INSTR_COUNT = 0x1,
99 | _HF_DYNFILE_BRANCH_COUNT = 0x2,
100 | _HF_DYNFILE_BTS_EDGE = 0x10,
101 | _HF_DYNFILE_IPT_BLOCK = 0x20,
102 | _HF_DYNFILE_SOFT = 0x40,
103 | } dynFileMethod_t;
104 |
105 | typedef struct {
106 | uint64_t cpuInstrCnt;
107 | uint64_t cpuBranchCnt;
108 | uint64_t bbCnt;
109 | uint64_t newBBCnt;
110 | uint64_t softCntPc;
111 | uint64_t softCntEdge;
112 | uint64_t softCntCmp;
113 | } hwcnt_t;
114 |
115 | typedef struct {
116 | uint32_t capacity;
117 | uint32_t* pChunks;
118 | uint32_t nChunks;
119 | } bitmap_t;
120 |
121 | /* Memory map struct */
122 | typedef struct __attribute__((packed)) {
123 | uint64_t start; // region start addr
124 | uint64_t end; // region end addr
125 | uint64_t base; // region base addr
126 | char mapName[NAME_MAX]; // bin/DSO name
127 | uint64_t bbCnt;
128 | uint64_t newBBCnt;
129 | } memMap_t;
130 |
131 | /* Trie node data struct */
132 | typedef struct __attribute__((packed)) {
133 | bitmap_t* pBM;
134 | } trieData_t;
135 |
136 | /* Trie node struct */
137 | typedef struct node {
138 | char key;
139 | trieData_t data;
140 | struct node* next;
141 | struct node* prev;
142 | struct node* children;
143 | struct node* parent;
144 | } node_t;
145 |
146 | /* EOF Sanitizer coverage specific data structures */
147 |
148 | typedef enum {
149 | _HF_STATE_UNSET = 0,
150 | _HF_STATE_STATIC = 1,
151 | _HF_STATE_DYNAMIC_DRY_RUN = 2,
152 | _HF_STATE_DYNAMIC_SWITCH_TO_MAIN = 3,
153 | _HF_STATE_DYNAMIC_MAIN = 4,
154 | } fuzzState_t;
155 |
156 | struct dynfile_t {
157 | uint8_t* data;
158 | size_t size;
159 | TAILQ_ENTRY(dynfile_t)
160 | pointers;
161 | };
162 |
163 | struct strings_t {
164 | char* s;
165 | size_t len;
166 | TAILQ_ENTRY(strings_t)
167 | pointers;
168 | };
169 |
170 |
171 | /* Coverage map interface */
172 | #define COVERAGE_INDEX_WIDTH 24U
173 | #define COVERAGE_TAG_WIDTH (48U - COVERAGE_INDEX_WIDTH)
174 | #define COVERAGE_CONFLICTS_INDEX_WIDTH 8U
175 | #define COVERAGE_MAP_HASHMAP_SIZE (1U << COVERAGE_INDEX_WIDTH)
176 | #define COVERAGE_MAP_CONFLICTS_SIZE (1U << COVERAGE_CONFLICTS_INDEX_WIDTH)
177 | #define COVERAGE_MAP_SIZE (COVERAGE_MAP_HASHMAP_SIZE + COVERAGE_MAP_CONFLICTS_SIZE)
178 | #define COVERAGE_INDEX_MASK ((1U << COVERAGE_INDEX_WIDTH) - 1)
179 | #define COVERAGE_TAG_MASK (0xffffffffffffffU - COVERAGE_INDEX_MASK)
180 | typedef struct {
181 | unsigned long tag : COVERAGE_TAG_WIDTH;
182 | uint8_t next : COVERAGE_CONFLICTS_INDEX_WIDTH;
183 | unsigned count : (64U - COVERAGE_TAG_WIDTH - COVERAGE_CONFLICTS_INDEX_WIDTH);
184 | } map_entry_t;
185 |
186 | /* Vulnerability map interface */
187 | #define VULN_MAP_INDEX_WIDTH 16U
188 | #define VULN_MAP_INDEX_OFFSET 5U
189 | #define VULN_MAP_INDEX_MASK (((1U << VULN_MAP_INDEX_WIDTH) - 1) << VULN_MAP_INDEX_OFFSET)
190 | #define VULN_MAP_SIZE (1U << VULN_MAP_INDEX_WIDTH)
191 |
192 | typedef struct {
193 | map_entry_t cmpMapPc[COVERAGE_MAP_SIZE];
194 | uint8_t vulnMap[VULN_MAP_SIZE];
195 | uint8_t bbMapPc[_HF_PERF_BITMAP_SIZE_16M];
196 | uint64_t pidFeedbackPc[_HF_THREAD_MAX];
197 | uint64_t pidFeedbackEdge[_HF_THREAD_MAX];
198 | uint64_t pidFeedbackCmp[_HF_THREAD_MAX];
199 | } feedback_t;
200 |
201 | typedef struct {
202 | struct {
203 | size_t threadsMax;
204 | size_t threadsFinished;
205 | uint32_t threadsActiveCnt;
206 | pthread_t mainThread;
207 | pid_t mainPid;
208 | pthread_t threads[_HF_THREAD_MAX];
209 | } threads;
210 | struct {
211 | const char* inputDir;
212 | DIR* inputDirPtr;
213 | size_t fileCnt;
214 | const char* fileExtn;
215 | bool fileCntDone;
216 | const char* workDir;
217 | const char* crashDir;
218 | const char* covDirAll;
219 | const char* covDirNew;
220 | bool saveUnique;
221 | size_t dynfileqCnt;
222 | pthread_rwlock_t dynfileq_mutex;
223 | TAILQ_HEAD(dyns_t, dynfile_t) dynfileq;
224 | } io;
225 | struct {
226 | int argc;
227 | const char* const* cmdline;
228 | bool nullifyStdio;
229 | bool fuzzStdin;
230 | const char* externalCommand;
231 | const char* postExternalCommand;
232 | const char* feedbackMutateCommand;
233 | bool netDriver;
234 | bool persistent;
235 | uint64_t asLimit;
236 | uint64_t rssLimit;
237 | uint64_t dataLimit;
238 | uint64_t coreLimit;
239 | bool clearEnv;
240 | char* envs[128];
241 | sigset_t waitSigSet;
242 | } exe;
243 | struct {
244 | time_t timeStart;
245 | time_t runEndTime;
246 | time_t tmOut;
247 | time_t lastCovUpdate;
248 | bool tmoutVTALRM;
249 | } timing;
250 | struct {
251 | const char* dictionaryFile;
252 | TAILQ_HEAD(strq_t, strings_t) dictq;
253 | size_t dictionaryCnt;
254 | size_t mutationsMax;
255 | unsigned mutationsPerRun;
256 | size_t maxFileSz;
257 | } mutate;
258 | struct {
259 | bool useScreen;
260 | char cmdline_txt[65];
261 | int64_t lastDisplayMillis;
262 | } display;
263 | struct {
264 | bool useVerifier;
265 | bool exitUponCrash;
266 | const char* reportFile;
267 | pthread_mutex_t report_mutex;
268 | bool monitorSIGABRT;
269 | size_t dynFileIterExpire;
270 | bool only_printable;
271 | } cfg;
272 | struct {
273 | bool enable;
274 | } sanitizer;
275 | struct {
276 | fuzzState_t state;
277 | feedback_t* feedbackMap;
278 | int bbFd;
279 | pthread_mutex_t feedback_mutex;
280 | const char* blacklistFile;
281 | uint64_t* blacklist;
282 | size_t blacklistCnt;
283 | bool skipFeedbackOnTimeout;
284 | dynFileMethod_t dynFileMethod;
285 | } feedback;
286 | struct {
287 | size_t mutationsCnt;
288 | size_t crashesCnt;
289 | size_t uniqueCrashesCnt;
290 | size_t verifiedCrashesCnt;
291 | size_t blCrashesCnt;
292 | size_t timeoutedCnt;
293 | } cnts;
294 | struct {
295 | bool enabled;
296 | int serverSocket;
297 | int clientSocket;
298 | } socketFuzzer;
299 | /* For the Linux code */
300 | struct {
301 | int exeFd;
302 | hwcnt_t hwCnts;
303 | uint64_t dynamicCutOffAddr;
304 | bool disableRandomization;
305 | void* ignoreAddr;
306 | size_t numMajorFrames;
307 | const char* symsBlFile;
308 | char** symsBl;
309 | size_t symsBlCnt;
310 | const char* symsWlFile;
311 | char** symsWl;
312 | size_t symsWlCnt;
313 | uintptr_t cloneFlags;
314 | bool kernelOnly;
315 | bool useClone;
316 | } linux;
317 | /* For the NetBSD code */
318 | struct {
319 | void* ignoreAddr;
320 | size_t numMajorFrames;
321 | const char* symsBlFile;
322 | char** symsBl;
323 | size_t symsBlCnt;
324 | const char* symsWlFile;
325 | char** symsWl;
326 | size_t symsWlCnt;
327 | } netbsd;
328 | } honggfuzz_t;
329 |
330 | typedef enum {
331 | _HF_RS_UNKNOWN = 0,
332 | _HF_RS_WAITING_FOR_INITIAL_READY = 1,
333 | _HF_RS_WAITING_FOR_READY = 2,
334 | _HF_RS_SEND_DATA = 3,
335 | } runState_t;
336 |
337 | typedef struct {
338 | honggfuzz_t* global;
339 | pid_t pid;
340 | int64_t timeStartedMillis;
341 | char origFileName[PATH_MAX];
342 | char crashFileName[PATH_MAX];
343 | uint64_t pc;
344 | uint64_t backtrace;
345 | uint64_t access;
346 | int exception;
347 | char report[_HF_REPORT_SIZE];
348 | bool mainWorker;
349 | unsigned mutationsPerRun;
350 | struct dynfile_t* dynfileqCurrent;
351 | uint8_t* dynamicFile;
352 | size_t dynamicFileSz;
353 | int dynamicFileFd;
354 | int dynamicFileCopyFd;
355 | uint32_t fuzzNo;
356 | int persistentSock;
357 | bool waitingForReady;
358 | runState_t runState;
359 | bool tmOutSignaled;
360 | #if !defined(_HF_ARCH_DARWIN)
361 | timer_t timerId;
362 | #endif // !defined(_HF_ARCH_DARWIN)
363 |
364 | struct {
365 | /* For Linux code */
366 | uint8_t* perfMmapBuf;
367 | uint8_t* perfMmapAux;
368 | hwcnt_t hwCnts;
369 | int cpuInstrFd;
370 | int cpuBranchFd;
371 | int cpuIptBtsFd;
372 | } linux;
373 |
374 | struct {
375 | /* For NetBSD code */
376 | uint8_t* perfMmapBuf;
377 | uint8_t* perfMmapAux;
378 | hwcnt_t hwCnts;
379 | int cpuInstrFd;
380 | int cpuBranchFd;
381 | int cpuIptBtsFd;
382 | } netbsd;
383 | } run_t;
384 |
385 | /*
386 | * Go-style defer scoped implementation
387 | *
388 | * When compiled with clang, use: -fblocks -lBlocksRuntime
389 | *
390 | * Example of use:
391 | *
392 | * {
393 | * int fd = open(fname, O_RDONLY);
394 | * if (fd == -1) {
395 | * error(....);
396 | * return;
397 | * }
398 | * defer { close(fd); };
399 | * ssize_t sz = read(fd, buf, sizeof(buf));
400 | * ...
401 | * ...
402 | * }
403 | *
404 | */
405 |
406 | #define __STRMERGE(a, b) a##b
407 | #define _STRMERGE(a, b) __STRMERGE(a, b)
408 | #ifdef __clang__
409 | #if __has_extension(blocks)
410 | static void __attribute__((unused)) __clang_cleanup_func(void (^*dfunc)(void)) {
411 | (*dfunc)();
412 | }
413 |
414 | #define defer \
415 | void (^_STRMERGE(__defer_f_, __COUNTER__))(void) \
416 | __attribute__((cleanup(__clang_cleanup_func))) __attribute__((unused)) = ^
417 |
418 | #else /* __has_extension(blocks) */
419 | #define defer UNIMPLEMENTED - NO - SUPPORT - FOR - BLOCKS - IN - YOUR - CLANG - ENABLED
420 | #endif /* __has_extension(blocks) */
421 | #else /* !__clang__, e.g.: gcc */
422 |
423 | #define __block
424 | #define _DEFER(a, count) \
425 | auto void _STRMERGE(__defer_f_, count)(void* _defer_arg __attribute__((unused))); \
426 | int _STRMERGE(__defer_var_, count) __attribute__((cleanup(_STRMERGE(__defer_f_, count)))) \
427 | __attribute__((unused)); \
428 | void _STRMERGE(__defer_f_, count)(void* _defer_arg __attribute__((unused)))
429 | #define defer _DEFER(a, __COUNTER__)
430 | #endif /* ifdef __clang__ */
431 |
432 | /* Block scoped mutexes */
433 | #define MX_SCOPED_LOCK(m) \
434 | MX_LOCK(m); \
435 | defer { \
436 | MX_UNLOCK(m); \
437 | }
438 |
439 | #define MX_SCOPED_RWLOCK_READ(m) \
440 | MX_RWLOCK_READ(m); \
441 | defer { \
442 | MX_RWLOCK_UNLOCK(m); \
443 | }
444 | #define MX_SCOPED_RWLOCK_WRITE(m) \
445 | MX_RWLOCK_WRITE(m); \
446 | defer { \
447 | MX_RWLOCK_UNLOCK(m); \
448 | }
449 |
450 | #endif
451 |
--------------------------------------------------------------------------------
/install/patches/honggfuzz/instrument.c:
--------------------------------------------------------------------------------
1 | #include "instrument.h"
2 |
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 |
17 | #include "honggfuzz.h"
18 | #include "libhfcommon/common.h"
19 | #include "libhfcommon/log.h"
20 | #include "libhfcommon/util.h"
21 |
22 | extern uint64_t branch_execution_count;
23 |
24 | __attribute__((visibility("default"))) __attribute__((used))
25 | const char *const LIBHFUZZ_module_instrument = "LIBHFUZZ_module_instrument";
26 |
27 | typedef struct {
28 | unsigned long pointer : 48U;
29 | uint16_t count : 16U;
30 | } packed_pointer;
31 |
32 | uint16_t cmpMapPcLocal[COVERAGE_MAP_SIZE] = {0};
33 | packed_pointer cmpMapPcCache[COVERAGE_MAP_HASHMAP_SIZE] = {{0, 0}};
34 | __attribute__((preserve_most)) static map_entry_t *get_hash_map_entry(uintptr_t pc);
35 |
36 | /*
37 | * If there's no _HF_BITMAP_FD available (running without the honggfuzz
38 | * supervisor), use a dummy bitmap and control structure located in the BSS
39 | */
40 | static feedback_t bbMapFb;
41 | feedback_t *feedback = &bbMapFb;
42 | uint32_t my_thread_no = 0;
43 |
44 | __attribute__((constructor)) static void initializeInstrument(void) {
45 | if (fcntl(_HF_LOG_FD, F_GETFD) != -1) {
46 | enum llevel_t ll = INFO;
47 | const char *llstr = getenv(_HF_LOG_LEVEL_ENV);
48 | if (llstr) {
49 | ll = atoi(llstr);
50 | }
51 | logInitLogFile(NULL, _HF_LOG_FD, ll);
52 | }
53 |
54 | char *my_thread_no_str = getenv(_HF_THREAD_NO_ENV);
55 | if (my_thread_no_str == NULL) {
56 | LOG_D("The '%s' envvar is not set", _HF_THREAD_NO_ENV);
57 | return;
58 | }
59 | my_thread_no = atoi(my_thread_no_str);
60 |
61 | if (my_thread_no >= _HF_THREAD_MAX) {
62 | LOG_F("Received (via envvar) my_thread_no > _HF_THREAD_MAX (%" PRIu32 " > %d)\n",
63 | my_thread_no, _HF_THREAD_MAX);
64 | }
65 |
66 | struct stat st;
67 | if (fstat(_HF_BITMAP_FD, &st) == -1) {
68 | return;
69 | }
70 | if (st.st_size != sizeof(feedback_t)) {
71 | LOG_F(
72 | "size of the feedback structure mismatch: st.size != sizeof(feedback_t) (%zu != %zu). "
73 | "Link your fuzzed binaries with the newest honggfuzz sources via hfuzz-clang(++)",
74 | (size_t) st.st_size, sizeof(feedback_t));
75 | }
76 | if ((feedback = mmap(NULL, sizeof(feedback_t), PROT_READ | PROT_WRITE, MAP_SHARED,
77 | _HF_BITMAP_FD, 0)) == MAP_FAILED) {
78 | PLOG_F("mmap(fd=%d, size=%zu) of the feedback structure failed", _HF_BITMAP_FD,
79 | sizeof(feedback_t));
80 | }
81 |
82 | /* Reset coverage counters to their initial state */
83 | instrumentClearNewCov();
84 | }
85 |
86 | /* Reset the counters of newly discovered edges/pcs/features */
87 | void instrumentClearNewCov() {
88 | feedback->pidFeedbackPc[my_thread_no] = 0U;
89 | feedback->pidFeedbackEdge[my_thread_no] = 0U;
90 | feedback->pidFeedbackCmp[my_thread_no] = 0U;
91 | }
92 |
93 | void specfuzz_cov_vuln(uintptr_t pc) {
94 | uint64_t index = pc & COVERAGE_INDEX_MASK;
95 | uint8_t prev = feedback->vulnMap[index];
96 | if (prev == 0U) {
97 | ATOMIC_PRE_INC_RELAXED(feedback->pidFeedbackPc[my_thread_no]);
98 | feedback->vulnMap[index] = 1U;
99 | }
100 | }
101 |
102 | __attribute__((preserve_most))
103 | void specfuzz_cov_trace_pc(uintptr_t pc) {
104 | // quick path - check the cache
105 | uint64_t index = pc & COVERAGE_INDEX_MASK;
106 | if (cmpMapPcCache[index].pointer == pc) {
107 | branch_execution_count = cmpMapPcCache[index].count;
108 | return;
109 | }
110 |
111 | // Update the cache and proceed with slow path
112 | cmpMapPcCache[index].pointer = pc;
113 |
114 | // slow path: get an entry from the global coverage map
115 | map_entry_t *entry = get_hash_map_entry(pc);
116 | int localMapIndex = entry - (map_entry_t *) &feedback->cmpMapPc[0];
117 | uint16_t *localEntry = &cmpMapPcLocal[localMapIndex];
118 | uint16_t count = *localEntry;
119 |
120 | if (count != 0) {
121 | // already covered; nothing to do here
122 | cmpMapPcCache[index].count = count;
123 | branch_execution_count = count;
124 | return;
125 | }
126 |
127 | // sloth path: we see this CMP the first time in this run
128 | uint64_t prev = entry->count;
129 | entry->count++;
130 | if (prev == 0) {
131 | ATOMIC_PRE_INC_RELAXED(feedback->pidFeedbackCmp[my_thread_no]);
132 | }
133 | count = ((uint16_t) prev) + 1;
134 | *localEntry = count;
135 | cmpMapPcCache[index].count = count;
136 | branch_execution_count = count;
137 | return;
138 | }
139 |
140 | /// A helper function for accessing the coverage map
141 | __attribute__((always_inline)) __attribute__((preserve_most))
142 | static map_entry_t *get_hash_map_entry(uintptr_t pc) {
143 | map_entry_t *coverage_map = (map_entry_t *) feedback->cmpMapPc;
144 | uint64_t index = pc & COVERAGE_INDEX_MASK;
145 | uint64_t tag = (pc & COVERAGE_TAG_MASK) >> COVERAGE_INDEX_WIDTH;
146 | map_entry_t *entry = &(coverage_map[index]);
147 | map_entry_t *next;
148 |
149 | if (entry->tag == 0) {
150 | entry->tag = tag;
151 | return entry;
152 | } else if (entry->tag == tag) {
153 | return entry;
154 | }
155 |
156 | // hash conflict
157 | static uint32_t coverage_map_conflicts_top = 0;
158 | map_entry_t *coverage_map_conflicts = &coverage_map[COVERAGE_MAP_HASHMAP_SIZE];
159 | do {
160 | if (entry->next == 0) { // create a new entry
161 | next = &(coverage_map_conflicts[coverage_map_conflicts_top]);
162 | entry->next = (uint16_t) coverage_map_conflicts_top;
163 | next->tag = tag;
164 |
165 | coverage_map_conflicts_top++;
166 | if (coverage_map_conflicts_top > COVERAGE_MAP_CONFLICTS_SIZE) {
167 | LOG_F("coverage map overflow");
168 | exit(1);
169 | }
170 | return next;
171 | }
172 | entry = &coverage_map_conflicts[entry->next];
173 | } while (entry->tag != tag);
174 | return entry;
175 | }
--------------------------------------------------------------------------------
/install/patches/llvm/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | set(LLVM_TARGET_DEFINITIONS X86.td)
2 |
3 | tablegen(LLVM X86GenAsmMatcher.inc -gen-asm-matcher)
4 | tablegen(LLVM X86GenAsmWriter.inc -gen-asm-writer)
5 | tablegen(LLVM X86GenAsmWriter1.inc -gen-asm-writer -asmwriternum=1)
6 | tablegen(LLVM X86GenCallingConv.inc -gen-callingconv)
7 | tablegen(LLVM X86GenDAGISel.inc -gen-dag-isel)
8 | tablegen(LLVM X86GenDisassemblerTables.inc -gen-disassembler)
9 | tablegen(LLVM X86GenEVEX2VEXTables.inc -gen-x86-EVEX2VEX-tables)
10 | tablegen(LLVM X86GenFastISel.inc -gen-fast-isel)
11 | tablegen(LLVM X86GenGlobalISel.inc -gen-global-isel)
12 | tablegen(LLVM X86GenInstrInfo.inc -gen-instr-info)
13 | tablegen(LLVM X86GenRegisterBank.inc -gen-register-bank)
14 | tablegen(LLVM X86GenRegisterInfo.inc -gen-register-info)
15 | tablegen(LLVM X86GenSubtargetInfo.inc -gen-subtarget)
16 |
17 | if (X86_GEN_FOLD_TABLES)
18 | tablegen(LLVM X86GenFoldTables.inc -gen-x86-fold-tables)
19 | endif()
20 |
21 | add_public_tablegen_target(X86CommonTableGen)
22 |
23 | set(sources
24 | ShadowCallStack.cpp
25 | SpecFuzzPass.cpp
26 | X86AsmPrinter.cpp
27 | X86CallFrameOptimization.cpp
28 | X86CallingConv.cpp
29 | X86CallLowering.cpp
30 | X86CmovConversion.cpp
31 | X86DomainReassignment.cpp
32 | X86ExpandPseudo.cpp
33 | X86FastISel.cpp
34 | X86FixupBWInsts.cpp
35 | X86FixupLEAs.cpp
36 | X86AvoidStoreForwardingBlocks.cpp
37 | X86FixupSetCC.cpp
38 | X86FlagsCopyLowering.cpp
39 | X86FloatingPoint.cpp
40 | X86FrameLowering.cpp
41 | X86InstructionSelector.cpp
42 | X86ISelDAGToDAG.cpp
43 | X86ISelLowering.cpp
44 | X86IndirectBranchTracking.cpp
45 | X86InterleavedAccess.cpp
46 | X86InstrFMA3Info.cpp
47 | X86InstrFoldTables.cpp
48 | X86InstrInfo.cpp
49 | X86EvexToVex.cpp
50 | X86LegalizerInfo.cpp
51 | X86MCInstLower.cpp
52 | X86MachineFunctionInfo.cpp
53 | X86MacroFusion.cpp
54 | X86OptimizeLEAs.cpp
55 | X86PadShortFunction.cpp
56 | X86RegisterBankInfo.cpp
57 | X86RegisterInfo.cpp
58 | X86RetpolineThunks.cpp
59 | X86SelectionDAGInfo.cpp
60 | X86ShuffleDecodeConstantPool.cpp
61 | X86SpeculativeLoadHardening.cpp
62 | X86Subtarget.cpp
63 | X86TargetMachine.cpp
64 | X86TargetObjectFile.cpp
65 | X86TargetTransformInfo.cpp
66 | X86VZeroUpper.cpp
67 | X86WinAllocaExpander.cpp
68 | X86WinEHState.cpp
69 | )
70 |
71 | add_llvm_target(X86CodeGen ${sources})
72 |
73 | add_subdirectory(AsmParser)
74 | add_subdirectory(Disassembler)
75 | add_subdirectory(InstPrinter)
76 | add_subdirectory(MCTargetDesc)
77 | add_subdirectory(TargetInfo)
78 | add_subdirectory(Utils)
79 |
--------------------------------------------------------------------------------
/install/patches/llvm/X86.h:
--------------------------------------------------------------------------------
1 | //===-- X86.h - Top-level interface for X86 representation ------*- C++ -*-===//
2 | //
3 | // The LLVM Compiler Infrastructure
4 | //
5 | // This file is distributed under the University of Illinois Open Source
6 | // License. See LICENSE.TXT for details.
7 | //
8 | //===----------------------------------------------------------------------===//
9 | //
10 | // This file contains the entry points for global functions defined in the x86
11 | // target library, as used by the LLVM JIT.
12 | //
13 | //===----------------------------------------------------------------------===//
14 |
15 | #ifndef LLVM_LIB_TARGET_X86_X86_H
16 | #define LLVM_LIB_TARGET_X86_X86_H
17 |
18 | #include "llvm/Support/CodeGen.h"
19 |
20 | namespace llvm {
21 |
22 | class FunctionPass;
23 | class ImmutablePass;
24 | class InstructionSelector;
25 | class ModulePass;
26 | class PassRegistry;
27 | class X86RegisterBankInfo;
28 | class X86Subtarget;
29 | class X86TargetMachine;
30 |
31 | /// This pass converts a legalized DAG into a X86-specific DAG, ready for
32 | /// instruction scheduling.
33 | FunctionPass *createX86ISelDag(X86TargetMachine &TM,
34 | CodeGenOpt::Level OptLevel);
35 |
36 | /// This pass initializes a global base register for PIC on x86-32.
37 | FunctionPass *createX86GlobalBaseRegPass();
38 |
39 | /// This pass combines multiple accesses to local-dynamic TLS variables so that
40 | /// the TLS base address for the module is only fetched once per execution path
41 | /// through the function.
42 | FunctionPass *createCleanupLocalDynamicTLSPass();
43 |
44 | /// This function returns a pass which converts floating-point register
45 | /// references and pseudo instructions into floating-point stack references and
46 | /// physical instructions.
47 | FunctionPass *createX86FloatingPointStackifierPass();
48 |
49 | /// This pass inserts AVX vzeroupper instructions before each call to avoid
50 | /// transition penalty between functions encoded with AVX and SSE.
51 | FunctionPass *createX86IssueVZeroUpperPass();
52 |
53 | /// This pass instruments the function prolog to save the return address to a
54 | /// 'shadow call stack' and the function epilog to check that the return address
55 | /// did not change during function execution.
56 | FunctionPass *createShadowCallStackPass();
57 |
58 | /// This pass inserts ENDBR instructions before indirect jump/call
59 | /// destinations as part of CET IBT mechanism.
60 | FunctionPass *createX86IndirectBranchTrackingPass();
61 |
62 | /// Return a pass that pads short functions with NOOPs.
63 | /// This will prevent a stall when returning on the Atom.
64 | FunctionPass *createX86PadShortFunctions();
65 |
66 | /// Return a pass that selectively replaces certain instructions (like add,
67 | /// sub, inc, dec, some shifts, and some multiplies) by equivalent LEA
68 | /// instructions, in order to eliminate execution delays in some processors.
69 | FunctionPass *createX86FixupLEAs();
70 |
71 | /// Return a pass that removes redundant LEA instructions and redundant address
72 | /// recalculations.
73 | FunctionPass *createX86OptimizeLEAs();
74 |
75 | /// Return a pass that transforms setcc + movzx pairs into xor + setcc.
76 | FunctionPass *createX86FixupSetCC();
77 |
78 | /// Return a pass that avoids creating store forward block issues in the hardware.
79 | FunctionPass *createX86AvoidStoreForwardingBlocks();
80 |
81 | /// Return a pass that lowers EFLAGS copy pseudo instructions.
82 | FunctionPass *createX86FlagsCopyLoweringPass();
83 |
84 | /// Return a pass that expands WinAlloca pseudo-instructions.
85 | FunctionPass *createX86WinAllocaExpander();
86 |
87 | /// Return a pass that optimizes the code-size of x86 call sequences. This is
88 | /// done by replacing esp-relative movs with pushes.
89 | FunctionPass *createX86CallFrameOptimization();
90 |
91 | /// Return an IR pass that inserts EH registration stack objects and explicit
92 | /// EH state updates. This pass must run after EH preparation, which does
93 | /// Windows-specific but architecture-neutral preparation.
94 | FunctionPass *createX86WinEHStatePass();
95 |
96 | /// Return a Machine IR pass that expands X86-specific pseudo
97 | /// instructions into a sequence of actual instructions. This pass
98 | /// must run after prologue/epilogue insertion and before lowering
99 | /// the MachineInstr to MC.
100 | FunctionPass *createX86ExpandPseudoPass();
101 |
102 | /// This pass converts X86 cmov instructions into branch when profitable.
103 | FunctionPass *createX86CmovConverterPass();
104 |
105 | /// Return a Machine IR pass that selectively replaces
106 | /// certain byte and word instructions by equivalent 32 bit instructions,
107 | /// in order to eliminate partial register usage, false dependences on
108 | /// the upper portions of registers, and to save code size.
109 | FunctionPass *createX86FixupBWInsts();
110 |
111 | /// Return a Machine IR pass that reassigns instruction chains from one domain
112 | /// to another, when profitable.
113 | FunctionPass *createX86DomainReassignmentPass();
114 |
115 | void initializeFixupBWInstPassPass(PassRegistry &);
116 |
117 | /// This pass replaces EVEX encoded of AVX-512 instructiosn by VEX
118 | /// encoding when possible in order to reduce code size.
119 | FunctionPass *createX86EvexToVexInsts();
120 |
121 | /// This pass creates the thunks for the retpoline feature.
122 | FunctionPass *createX86RetpolineThunksPass();
123 |
124 | InstructionSelector *createX86InstructionSelector(const X86TargetMachine &TM,
125 | X86Subtarget &,
126 | X86RegisterBankInfo &);
127 |
128 | void initializeEvexToVexInstPassPass(PassRegistry &);
129 |
130 | FunctionPass *createX86SpeculativeLoadHardeningPass();
131 |
132 | FunctionPass *createX86SpecFuzzPass();
133 |
134 | } // End llvm namespace
135 |
136 | #endif
137 |
--------------------------------------------------------------------------------
/install/patches/llvm/X86TargetMachine.cpp:
--------------------------------------------------------------------------------
1 | //===-- X86TargetMachine.cpp - Define TargetMachine for the X86 -----------===//
2 | //
3 | // The LLVM Compiler Infrastructure
4 | //
5 | // This file is distributed under the University of Illinois Open Source
6 | // License. See LICENSE.TXT for details.
7 | //
8 | //===----------------------------------------------------------------------===//
9 | //
10 | // This file defines the X86 specific subclass of TargetMachine.
11 | //
12 | //===----------------------------------------------------------------------===//
13 |
14 | #include "X86TargetMachine.h"
15 | #include "MCTargetDesc/X86MCTargetDesc.h"
16 | #include "X86.h"
17 | #include "X86CallLowering.h"
18 | #include "X86LegalizerInfo.h"
19 | #include "X86MacroFusion.h"
20 | #include "X86Subtarget.h"
21 | #include "X86TargetObjectFile.h"
22 | #include "X86TargetTransformInfo.h"
23 | #include "llvm/ADT/Optional.h"
24 | #include "llvm/ADT/STLExtras.h"
25 | #include "llvm/ADT/SmallString.h"
26 | #include "llvm/ADT/StringRef.h"
27 | #include "llvm/ADT/Triple.h"
28 | #include "llvm/Analysis/TargetTransformInfo.h"
29 | #include "llvm/CodeGen/ExecutionDomainFix.h"
30 | #include "llvm/CodeGen/GlobalISel/CallLowering.h"
31 | #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
32 | #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
33 | #include "llvm/CodeGen/GlobalISel/Legalizer.h"
34 | #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
35 | #include "llvm/CodeGen/MachineScheduler.h"
36 | #include "llvm/CodeGen/Passes.h"
37 | #include "llvm/CodeGen/TargetPassConfig.h"
38 | #include "llvm/IR/Attributes.h"
39 | #include "llvm/IR/DataLayout.h"
40 | #include "llvm/IR/Function.h"
41 | #include "llvm/Pass.h"
42 | #include "llvm/Support/CodeGen.h"
43 | #include "llvm/Support/CommandLine.h"
44 | #include "llvm/Support/ErrorHandling.h"
45 | #include "llvm/Support/TargetRegistry.h"
46 | #include "llvm/Target/TargetLoweringObjectFile.h"
47 | #include "llvm/Target/TargetOptions.h"
48 | #include
49 | #include
50 |
51 | using namespace llvm;
52 |
53 | static cl::opt EnableMachineCombinerPass("x86-machine-combiner",
54 | cl::desc("Enable the machine combiner pass"),
55 | cl::init(true), cl::Hidden);
56 |
57 | static cl::opt EnableSpeculativeLoadHardening(
58 | "x86-speculative-load-hardening",
59 | cl::desc("Enable speculative load hardening"), cl::init(false), cl::Hidden);
60 |
61 | static cl::opt EnableSpecFuzz(
62 | "x86-specfuzz",
63 | cl::desc("Enable SpecFuzz"), cl::init(false), cl::Hidden);
64 |
65 | namespace llvm {
66 |
67 | void initializeWinEHStatePassPass(PassRegistry &);
68 | void initializeFixupLEAPassPass(PassRegistry &);
69 | void initializeShadowCallStackPass(PassRegistry &);
70 | void initializeX86CallFrameOptimizationPass(PassRegistry &);
71 | void initializeX86CmovConverterPassPass(PassRegistry &);
72 | void initializeX86ExecutionDomainFixPass(PassRegistry &);
73 | void initializeX86DomainReassignmentPass(PassRegistry &);
74 | void initializeX86AvoidSFBPassPass(PassRegistry &);
75 | void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
76 |
77 | } // end namespace llvm
78 |
79 | extern "C" void LLVMInitializeX86Target() {
80 | // Register the target.
81 | RegisterTargetMachine X(getTheX86_32Target());
82 | RegisterTargetMachine Y(getTheX86_64Target());
83 |
84 | PassRegistry &PR = *PassRegistry::getPassRegistry();
85 | initializeGlobalISel(PR);
86 | initializeWinEHStatePassPass(PR);
87 | initializeFixupBWInstPassPass(PR);
88 | initializeEvexToVexInstPassPass(PR);
89 | initializeFixupLEAPassPass(PR);
90 | initializeShadowCallStackPass(PR);
91 | initializeX86CallFrameOptimizationPass(PR);
92 | initializeX86CmovConverterPassPass(PR);
93 | initializeX86ExecutionDomainFixPass(PR);
94 | initializeX86DomainReassignmentPass(PR);
95 | initializeX86AvoidSFBPassPass(PR);
96 | initializeX86FlagsCopyLoweringPassPass(PR);
97 | }
98 |
99 | static std::unique_ptr createTLOF(const Triple &TT) {
100 | if (TT.isOSBinFormatMachO()) {
101 | if (TT.getArch() == Triple::x86_64)
102 | return llvm::make_unique();
103 | return llvm::make_unique();
104 | }
105 |
106 | if (TT.isOSFreeBSD())
107 | return llvm::make_unique();
108 | if (TT.isOSLinux() || TT.isOSNaCl() || TT.isOSIAMCU())
109 | return llvm::make_unique();
110 | if (TT.isOSSolaris())
111 | return llvm::make_unique();
112 | if (TT.isOSFuchsia())
113 | return llvm::make_unique();
114 | if (TT.isOSBinFormatELF())
115 | return llvm::make_unique();
116 | if (TT.isOSBinFormatCOFF())
117 | return llvm::make_unique();
118 | llvm_unreachable("unknown subtarget type");
119 | }
120 |
121 | static std::string computeDataLayout(const Triple &TT) {
122 | // X86 is little endian
123 | std::string Ret = "e";
124 |
125 | Ret += DataLayout::getManglingComponent(TT);
126 | // X86 and x32 have 32 bit pointers.
127 | if ((TT.isArch64Bit() &&
128 | (TT.getEnvironment() == Triple::GNUX32 || TT.isOSNaCl())) ||
129 | !TT.isArch64Bit())
130 | Ret += "-p:32:32";
131 |
132 | // Some ABIs align 64 bit integers and doubles to 64 bits, others to 32.
133 | if (TT.isArch64Bit() || TT.isOSWindows() || TT.isOSNaCl())
134 | Ret += "-i64:64";
135 | else if (TT.isOSIAMCU())
136 | Ret += "-i64:32-f64:32";
137 | else
138 | Ret += "-f64:32:64";
139 |
140 | // Some ABIs align long double to 128 bits, others to 32.
141 | if (TT.isOSNaCl() || TT.isOSIAMCU())
142 | ; // No f80
143 | else if (TT.isArch64Bit() || TT.isOSDarwin())
144 | Ret += "-f80:128";
145 | else
146 | Ret += "-f80:32";
147 |
148 | if (TT.isOSIAMCU())
149 | Ret += "-f128:32";
150 |
151 | // The registers can hold 8, 16, 32 or, in x86-64, 64 bits.
152 | if (TT.isArch64Bit())
153 | Ret += "-n8:16:32:64";
154 | else
155 | Ret += "-n8:16:32";
156 |
157 | // The stack is aligned to 32 bits on some ABIs and 128 bits on others.
158 | if ((!TT.isArch64Bit() && TT.isOSWindows()) || TT.isOSIAMCU())
159 | Ret += "-a:0:32-S32";
160 | else
161 | Ret += "-S128";
162 |
163 | return Ret;
164 | }
165 |
166 | static Reloc::Model getEffectiveRelocModel(const Triple &TT,
167 | bool JIT,
168 | Optional RM) {
169 | bool is64Bit = TT.getArch() == Triple::x86_64;
170 | if (!RM.hasValue()) {
171 | // JIT codegen should use static relocations by default, since it's
172 | // typically executed in process and not relocatable.
173 | if (JIT)
174 | return Reloc::Static;
175 |
176 | // Darwin defaults to PIC in 64 bit mode and dynamic-no-pic in 32 bit mode.
177 | // Win64 requires rip-rel addressing, thus we force it to PIC. Otherwise we
178 | // use static relocation model by default.
179 | if (TT.isOSDarwin()) {
180 | if (is64Bit)
181 | return Reloc::PIC_;
182 | return Reloc::DynamicNoPIC;
183 | }
184 | if (TT.isOSWindows() && is64Bit)
185 | return Reloc::PIC_;
186 | return Reloc::Static;
187 | }
188 |
189 | // ELF and X86-64 don't have a distinct DynamicNoPIC model. DynamicNoPIC
190 | // is defined as a model for code which may be used in static or dynamic
191 | // executables but not necessarily a shared library. On X86-32 we just
192 | // compile in -static mode, in x86-64 we use PIC.
193 | if (*RM == Reloc::DynamicNoPIC) {
194 | if (is64Bit)
195 | return Reloc::PIC_;
196 | if (!TT.isOSDarwin())
197 | return Reloc::Static;
198 | }
199 |
200 | // If we are on Darwin, disallow static relocation model in X86-64 mode, since
201 | // the Mach-O file format doesn't support it.
202 | if (*RM == Reloc::Static && TT.isOSDarwin() && is64Bit)
203 | return Reloc::PIC_;
204 |
205 | return *RM;
206 | }
207 |
208 | static CodeModel::Model getEffectiveCodeModel(Optional CM,
209 | bool JIT, bool Is64Bit) {
210 | if (CM)
211 | return *CM;
212 | if (JIT)
213 | return Is64Bit ? CodeModel::Large : CodeModel::Small;
214 | return CodeModel::Small;
215 | }
216 |
217 | /// Create an X86 target.
218 | ///
219 | X86TargetMachine::X86TargetMachine(const Target &T, const Triple &TT,
220 | StringRef CPU, StringRef FS,
221 | const TargetOptions &Options,
222 | Optional RM,
223 | Optional CM,
224 | CodeGenOpt::Level OL, bool JIT)
225 | : LLVMTargetMachine(
226 | T, computeDataLayout(TT), TT, CPU, FS, Options,
227 | getEffectiveRelocModel(TT, JIT, RM),
228 | getEffectiveCodeModel(CM, JIT, TT.getArch() == Triple::x86_64), OL),
229 | TLOF(createTLOF(getTargetTriple())) {
230 | // Windows stack unwinder gets confused when execution flow "falls through"
231 | // after a call to 'noreturn' function.
232 | // To prevent that, we emit a trap for 'unreachable' IR instructions.
233 | // (which on X86, happens to be the 'ud2' instruction)
234 | // On PS4, the "return address" of a 'noreturn' call must still be within
235 | // the calling function, and TrapUnreachable is an easy way to get that.
236 | // The check here for 64-bit windows is a bit icky, but as we're unlikely
237 | // to ever want to mix 32 and 64-bit windows code in a single module
238 | // this should be fine.
239 | if ((TT.isOSWindows() && TT.getArch() == Triple::x86_64) || TT.isPS4() ||
240 | TT.isOSBinFormatMachO()) {
241 | this->Options.TrapUnreachable = true;
242 | this->Options.NoTrapAfterNoreturn = TT.isOSBinFormatMachO();
243 | }
244 |
245 | // Outlining is available for x86-64.
246 | if (TT.getArch() == Triple::x86_64)
247 | setMachineOutliner(true);
248 |
249 | initAsmInfo();
250 | }
251 |
252 | X86TargetMachine::~X86TargetMachine() = default;
253 |
254 | const X86Subtarget *
255 | X86TargetMachine::getSubtargetImpl(const Function &F) const {
256 | Attribute CPUAttr = F.getFnAttribute("target-cpu");
257 | Attribute FSAttr = F.getFnAttribute("target-features");
258 |
259 | StringRef CPU = !CPUAttr.hasAttribute(Attribute::None)
260 | ? CPUAttr.getValueAsString()
261 | : (StringRef)TargetCPU;
262 | StringRef FS = !FSAttr.hasAttribute(Attribute::None)
263 | ? FSAttr.getValueAsString()
264 | : (StringRef)TargetFS;
265 |
266 | SmallString<512> Key;
267 | Key.reserve(CPU.size() + FS.size());
268 | Key += CPU;
269 | Key += FS;
270 |
271 | // FIXME: This is related to the code below to reset the target options,
272 | // we need to know whether or not the soft float flag is set on the
273 | // function before we can generate a subtarget. We also need to use
274 | // it as a key for the subtarget since that can be the only difference
275 | // between two functions.
276 | bool SoftFloat =
277 | F.getFnAttribute("use-soft-float").getValueAsString() == "true";
278 | // If the soft float attribute is set on the function turn on the soft float
279 | // subtarget feature.
280 | if (SoftFloat)
281 | Key += FS.empty() ? "+soft-float" : ",+soft-float";
282 |
283 | // Keep track of the key width after all features are added so we can extract
284 | // the feature string out later.
285 | unsigned CPUFSWidth = Key.size();
286 |
287 | // Extract prefer-vector-width attribute.
288 | unsigned PreferVectorWidthOverride = 0;
289 | if (F.hasFnAttribute("prefer-vector-width")) {
290 | StringRef Val = F.getFnAttribute("prefer-vector-width").getValueAsString();
291 | unsigned Width;
292 | if (!Val.getAsInteger(0, Width)) {
293 | Key += ",prefer-vector-width=";
294 | Key += Val;
295 | PreferVectorWidthOverride = Width;
296 | }
297 | }
298 |
299 | // Extract required-vector-width attribute.
300 | unsigned RequiredVectorWidth = UINT32_MAX;
301 | if (F.hasFnAttribute("required-vector-width")) {
302 | StringRef Val = F.getFnAttribute("required-vector-width").getValueAsString();
303 | unsigned Width;
304 | if (!Val.getAsInteger(0, Width)) {
305 | Key += ",required-vector-width=";
306 | Key += Val;
307 | RequiredVectorWidth = Width;
308 | }
309 | }
310 |
311 | // Extracted here so that we make sure there is backing for the StringRef. If
312 | // we assigned earlier, its possible the SmallString reallocated leaving a
313 | // dangling StringRef.
314 | FS = Key.slice(CPU.size(), CPUFSWidth);
315 |
316 | auto &I = SubtargetMap[Key];
317 | if (!I) {
318 | // This needs to be done before we create a new subtarget since any
319 | // creation will depend on the TM and the code generation flags on the
320 | // function that reside in TargetOptions.
321 | resetTargetOptions(F);
322 | I = llvm::make_unique(TargetTriple, CPU, FS, *this,
323 | Options.StackAlignmentOverride,
324 | PreferVectorWidthOverride,
325 | RequiredVectorWidth);
326 | }
327 | return I.get();
328 | }
329 |
330 | //===----------------------------------------------------------------------===//
331 | // Command line options for x86
332 | //===----------------------------------------------------------------------===//
333 | static cl::opt
334 | UseVZeroUpper("x86-use-vzeroupper", cl::Hidden,
335 | cl::desc("Minimize AVX to SSE transition penalty"),
336 | cl::init(true));
337 |
338 | //===----------------------------------------------------------------------===//
339 | // X86 TTI query.
340 | //===----------------------------------------------------------------------===//
341 |
342 | TargetTransformInfo
343 | X86TargetMachine::getTargetTransformInfo(const Function &F) {
344 | return TargetTransformInfo(X86TTIImpl(this, F));
345 | }
346 |
347 | //===----------------------------------------------------------------------===//
348 | // Pass Pipeline Configuration
349 | //===----------------------------------------------------------------------===//
350 |
351 | namespace {
352 |
353 | /// X86 Code Generator Pass Configuration Options.
354 | class X86PassConfig : public TargetPassConfig {
355 | public:
356 | X86PassConfig(X86TargetMachine &TM, PassManagerBase &PM)
357 | : TargetPassConfig(TM, PM) {}
358 |
359 | X86TargetMachine &getX86TargetMachine() const {
360 | return getTM();
361 | }
362 |
363 | ScheduleDAGInstrs *
364 | createMachineScheduler(MachineSchedContext *C) const override {
365 | ScheduleDAGMILive *DAG = createGenericSchedLive(C);
366 | DAG->addMutation(createX86MacroFusionDAGMutation());
367 | return DAG;
368 | }
369 |
370 | void addIRPasses() override;
371 | bool addInstSelector() override;
372 | bool addIRTranslator() override;
373 | bool addLegalizeMachineIR() override;
374 | bool addRegBankSelect() override;
375 | bool addGlobalInstructionSelect() override;
376 | bool addILPOpts() override;
377 | bool addPreISel() override;
378 | void addMachineSSAOptimization() override;
379 | void addPreRegAlloc() override;
380 | void addPostRegAlloc() override;
381 | void addPreEmitPass() override;
382 | void addPreEmitPass2() override;
383 | void addPreSched2() override;
384 | };
385 |
386 | class X86ExecutionDomainFix : public ExecutionDomainFix {
387 | public:
388 | static char ID;
389 | X86ExecutionDomainFix() : ExecutionDomainFix(ID, X86::VR128XRegClass) {}
390 | StringRef getPassName() const override {
391 | return "X86 Execution Dependency Fix";
392 | }
393 | };
394 | char X86ExecutionDomainFix::ID;
395 |
396 | } // end anonymous namespace
397 |
398 | INITIALIZE_PASS_BEGIN(X86ExecutionDomainFix, "x86-execution-domain-fix",
399 | "X86 Execution Domain Fix", false, false)
400 | INITIALIZE_PASS_DEPENDENCY(ReachingDefAnalysis)
401 | INITIALIZE_PASS_END(X86ExecutionDomainFix, "x86-execution-domain-fix",
402 | "X86 Execution Domain Fix", false, false)
403 |
404 | TargetPassConfig *X86TargetMachine::createPassConfig(PassManagerBase &PM) {
405 | return new X86PassConfig(*this, PM);
406 | }
407 |
408 | void X86PassConfig::addIRPasses() {
409 | addPass(createAtomicExpandPass());
410 |
411 | TargetPassConfig::addIRPasses();
412 |
413 | if (TM->getOptLevel() != CodeGenOpt::None)
414 | addPass(createInterleavedAccessPass());
415 |
416 | // Add passes that handle indirect branch removal and insertion of a retpoline
417 | // thunk. These will be a no-op unless a function subtarget has the retpoline
418 | // feature enabled.
419 | addPass(createIndirectBrExpandPass());
420 | }
421 |
422 | bool X86PassConfig::addInstSelector() {
423 | // Install an instruction selector.
424 | addPass(createX86ISelDag(getX86TargetMachine(), getOptLevel()));
425 |
426 | // For ELF, cleanup any local-dynamic TLS accesses.
427 | if (TM->getTargetTriple().isOSBinFormatELF() &&
428 | getOptLevel() != CodeGenOpt::None)
429 | addPass(createCleanupLocalDynamicTLSPass());
430 |
431 | addPass(createX86GlobalBaseRegPass());
432 | return false;
433 | }
434 |
435 | bool X86PassConfig::addIRTranslator() {
436 | addPass(new IRTranslator());
437 | return false;
438 | }
439 |
440 | bool X86PassConfig::addLegalizeMachineIR() {
441 | addPass(new Legalizer());
442 | return false;
443 | }
444 |
445 | bool X86PassConfig::addRegBankSelect() {
446 | addPass(new RegBankSelect());
447 | return false;
448 | }
449 |
450 | bool X86PassConfig::addGlobalInstructionSelect() {
451 | addPass(new InstructionSelect());
452 | return false;
453 | }
454 |
455 | bool X86PassConfig::addILPOpts() {
456 | addPass(&EarlyIfConverterID);
457 | if (EnableMachineCombinerPass)
458 | addPass(&MachineCombinerID);
459 | addPass(createX86CmovConverterPass());
460 | return true;
461 | }
462 |
463 | bool X86PassConfig::addPreISel() {
464 | // Only add this pass for 32-bit x86 Windows.
465 | const Triple &TT = TM->getTargetTriple();
466 | if (TT.isOSWindows() && TT.getArch() == Triple::x86)
467 | addPass(createX86WinEHStatePass());
468 | return true;
469 | }
470 |
471 | void X86PassConfig::addPreRegAlloc() {
472 | if (getOptLevel() != CodeGenOpt::None) {
473 | addPass(&LiveRangeShrinkID);
474 | addPass(createX86FixupSetCC());
475 | addPass(createX86OptimizeLEAs());
476 | addPass(createX86CallFrameOptimization());
477 | addPass(createX86AvoidStoreForwardingBlocks());
478 | }
479 |
480 | if (EnableSpeculativeLoadHardening)
481 | addPass(createX86SpeculativeLoadHardeningPass());
482 |
483 | addPass(createX86FlagsCopyLoweringPass());
484 | addPass(createX86WinAllocaExpander());
485 | }
486 | void X86PassConfig::addMachineSSAOptimization() {
487 | addPass(createX86DomainReassignmentPass());
488 | TargetPassConfig::addMachineSSAOptimization();
489 | }
490 |
491 | void X86PassConfig::addPostRegAlloc() {
492 | addPass(createX86FloatingPointStackifierPass());
493 | }
494 |
495 | void X86PassConfig::addPreSched2() { addPass(createX86ExpandPseudoPass()); }
496 |
497 | void X86PassConfig::addPreEmitPass() {
498 | if (getOptLevel() != CodeGenOpt::None) {
499 | addPass(new X86ExecutionDomainFix());
500 | addPass(createBreakFalseDeps());
501 | }
502 |
503 | addPass(createShadowCallStackPass());
504 | addPass(createX86IndirectBranchTrackingPass());
505 |
506 | if (UseVZeroUpper)
507 | addPass(createX86IssueVZeroUpperPass());
508 |
509 | if (getOptLevel() != CodeGenOpt::None) {
510 | addPass(createX86FixupBWInsts());
511 | addPass(createX86PadShortFunctions());
512 | addPass(createX86FixupLEAs());
513 | addPass(createX86EvexToVexInsts());
514 | }
515 | }
516 |
517 | void X86PassConfig::addPreEmitPass2() {
518 | addPass(createX86RetpolineThunksPass());
519 | if (EnableSpecFuzz)
520 | addPass(createX86SpecFuzzPass());
521 | // Verify basic block incoming and outgoing cfa offset and register values and
522 | // correct CFA calculation rule where needed by inserting appropriate CFI
523 | // instructions.
524 | const Triple &TT = TM->getTargetTriple();
525 | if (!TT.isOSDarwin() && !TT.isOSWindows())
526 | addPass(createCFIInstrInserter());
527 | }
528 |
--------------------------------------------------------------------------------
/install/patches/llvm/asan_poisoning.cc:
--------------------------------------------------------------------------------
1 | //===-- asan_poisoning.cc -------------------------------------------------===//
2 | //
3 | // The LLVM Compiler Infrastructure
4 | //
5 | // This file is distributed under the University of Illinois Open Source
6 | // License. See LICENSE.TXT for details.
7 | //
8 | //===----------------------------------------------------------------------===//
9 | //
10 | // This file is a part of AddressSanitizer, an address sanity checker.
11 | //
12 | // Shadow memory poisoning by ASan RTL and by user application.
13 | //===----------------------------------------------------------------------===//
14 |
15 | #include "asan_poisoning.h"
16 | #include "asan_report.h"
17 | #include "asan_stack.h"
18 | #include "sanitizer_common/sanitizer_atomic.h"
19 | #include "sanitizer_common/sanitizer_libc.h"
20 | #include "sanitizer_common/sanitizer_flags.h"
21 |
22 | namespace __asan {
23 |
24 | static atomic_uint8_t can_poison_memory;
25 | extern long* nesting_level;
26 |
27 | void SetCanPoisonMemory(bool value) {
28 | atomic_store(&can_poison_memory, value, memory_order_release);
29 | }
30 |
31 | bool CanPoisonMemory() {
32 | return atomic_load(&can_poison_memory, memory_order_acquire);
33 | }
34 |
35 | void PoisonShadow(uptr addr, uptr size, u8 value) {
36 | if (value && !CanPoisonMemory()) return;
37 | CHECK(AddrIsAlignedByGranularity(addr));
38 | CHECK(AddrIsInMem(addr));
39 | CHECK(AddrIsAlignedByGranularity(addr + size));
40 | CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
41 | CHECK(REAL(memset));
42 | FastPoisonShadow(addr, size, value);
43 | }
44 |
45 | void PoisonShadowPartialRightRedzone(uptr addr,
46 | uptr size,
47 | uptr redzone_size,
48 | u8 value) {
49 | if (!CanPoisonMemory()) return;
50 | CHECK(AddrIsAlignedByGranularity(addr));
51 | CHECK(AddrIsInMem(addr));
52 | FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
53 | }
54 |
55 | struct ShadowSegmentEndpoint {
56 | u8 *chunk;
57 | s8 offset; // in [0, SHADOW_GRANULARITY)
58 | s8 value; // = *chunk;
59 |
60 | explicit ShadowSegmentEndpoint(uptr address) {
61 | chunk = (u8*)MemToShadow(address);
62 | offset = address & (SHADOW_GRANULARITY - 1);
63 | value = *chunk;
64 | }
65 | };
66 |
67 | void FlushUnneededASanShadowMemory(uptr p, uptr size) {
68 | // Since asan's mapping is compacting, the shadow chunk may be
69 | // not page-aligned, so we only flush the page-aligned portion.
70 | ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
71 | }
72 |
73 | void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
74 | uptr end = ptr + size;
75 | if (Verbosity()) {
76 | Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
77 | poison ? "" : "un", ptr, end, size);
78 | if (Verbosity() >= 2)
79 | PRINT_CURRENT_STACK();
80 | }
81 | CHECK(size);
82 | CHECK_LE(size, 4096);
83 | CHECK(IsAligned(end, SHADOW_GRANULARITY));
84 | if (!IsAligned(ptr, SHADOW_GRANULARITY)) {
85 | *(u8 *)MemToShadow(ptr) =
86 | poison ? static_cast(ptr % SHADOW_GRANULARITY) : 0;
87 | ptr |= SHADOW_GRANULARITY - 1;
88 | ptr++;
89 | }
90 | for (; ptr < end; ptr += SHADOW_GRANULARITY)
91 | *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;
92 | }
93 |
94 | } // namespace __asan
95 |
96 | // ---------------------- Interface ---------------- {{{1
97 | using namespace __asan; // NOLINT
98 |
99 | // Current implementation of __asan_(un)poison_memory_region doesn't check
100 | // that user program (un)poisons the memory it owns. It poisons memory
101 | // conservatively, and unpoisons progressively to make sure asan shadow
102 | // mapping invariant is preserved (see detailed mapping description here:
103 | // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
104 | //
105 | // * if user asks to poison region [left, right), the program poisons
106 | // at least [left, AlignDown(right)).
107 | // * if user asks to unpoison region [left, right), the program unpoisons
108 | // at most [AlignDown(left), right).
109 | void __asan_poison_memory_region(void const volatile *addr, uptr size) {
110 | if (!flags()->allow_user_poisoning || size == 0) return;
111 | if (*nesting_level != 0) {
112 | UNREACHABLE("poisoning shadow memory within SpecFuzz simulation");
113 | return;
114 | }
115 |
116 | uptr beg_addr = (uptr)addr;
117 | uptr end_addr = beg_addr + size;
118 | VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
119 | (void *)end_addr);
120 | ShadowSegmentEndpoint beg(beg_addr);
121 | ShadowSegmentEndpoint end(end_addr);
122 | if (beg.chunk == end.chunk) {
123 | CHECK_LT(beg.offset, end.offset);
124 | s8 value = beg.value;
125 | CHECK_EQ(value, end.value);
126 | // We can only poison memory if the byte in end.offset is unaddressable.
127 | // No need to re-poison memory if it is poisoned already.
128 | if (value > 0 && value <= end.offset) {
129 | if (beg.offset > 0) {
130 | *beg.chunk = Min(value, beg.offset);
131 | } else {
132 | *beg.chunk = kAsanUserPoisonedMemoryMagic;
133 | }
134 | }
135 | return;
136 | }
137 | CHECK_LT(beg.chunk, end.chunk);
138 | if (beg.offset > 0) {
139 | // Mark bytes from beg.offset as unaddressable.
140 | if (beg.value == 0) {
141 | *beg.chunk = beg.offset;
142 | } else {
143 | *beg.chunk = Min(beg.value, beg.offset);
144 | }
145 | beg.chunk++;
146 | }
147 | REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
148 | // Poison if byte in end.offset is unaddressable.
149 | if (end.value > 0 && end.value <= end.offset) {
150 | *end.chunk = kAsanUserPoisonedMemoryMagic;
151 | }
152 | }
153 |
154 | void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
155 | if (!flags()->allow_user_poisoning || size == 0) return;
156 | uptr beg_addr = (uptr)addr;
157 | uptr end_addr = beg_addr + size;
158 | VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
159 | (void *)end_addr);
160 | ShadowSegmentEndpoint beg(beg_addr);
161 | ShadowSegmentEndpoint end(end_addr);
162 | if (beg.chunk == end.chunk) {
163 | CHECK_LT(beg.offset, end.offset);
164 | s8 value = beg.value;
165 | CHECK_EQ(value, end.value);
166 | // We unpoison memory bytes up to enbytes up to end.offset if it is not
167 | // unpoisoned already.
168 | if (value != 0) {
169 | *beg.chunk = Max(value, end.offset);
170 | }
171 | return;
172 | }
173 | CHECK_LT(beg.chunk, end.chunk);
174 | if (beg.offset > 0) {
175 | *beg.chunk = 0;
176 | beg.chunk++;
177 | }
178 | REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
179 | if (end.offset > 0 && end.value != 0) {
180 | *end.chunk = Max(end.value, end.offset);
181 | }
182 | }
183 |
184 | int __asan_address_is_poisoned(void const volatile *addr) {
185 | return __asan::AddressIsPoisoned((uptr)addr);
186 | }
187 |
188 | uptr __asan_region_is_poisoned(uptr beg, uptr size) {
189 | if (!size) return 0;
190 | uptr end = beg + size;
191 | if (SANITIZER_MYRIAD2) {
192 | // On Myriad, address not in DRAM range need to be treated as
193 | // unpoisoned.
194 | if (!AddrIsInMem(beg) && !AddrIsInShadow(beg)) return 0;
195 | if (!AddrIsInMem(end) && !AddrIsInShadow(end)) return 0;
196 | } else {
197 | if (!AddrIsInMem(beg)) return beg;
198 | if (!AddrIsInMem(end)) return end;
199 | }
200 | CHECK_LT(beg, end);
201 | uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
202 | uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
203 | uptr shadow_beg = MemToShadow(aligned_b);
204 | uptr shadow_end = MemToShadow(aligned_e);
205 | // First check the first and the last application bytes,
206 | // then check the SHADOW_GRANULARITY-aligned region by calling
207 | // mem_is_zero on the corresponding shadow.
208 | if (!__asan::AddressIsPoisoned(beg) &&
209 | !__asan::AddressIsPoisoned(end - 1) &&
210 | (shadow_end <= shadow_beg ||
211 | __sanitizer::mem_is_zero((const char *)shadow_beg,
212 | shadow_end - shadow_beg)))
213 | return 0;
214 | // The fast check failed, so we have a poisoned byte somewhere.
215 | // Find it slowly.
216 | for (; beg < end; beg++)
217 | if (__asan::AddressIsPoisoned(beg))
218 | return beg;
219 | UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
220 | return 0;
221 | }
222 |
223 | #define CHECK_SMALL_REGION(p, size, isWrite) \
224 | do { \
225 | uptr __p = reinterpret_cast(p); \
226 | uptr __size = size; \
227 | if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
228 | __asan::AddressIsPoisoned(__p + __size - 1))) { \
229 | GET_CURRENT_PC_BP_SP; \
230 | uptr __bad = __asan_region_is_poisoned(__p, __size); \
231 | __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
232 | } \
233 | } while (false)
234 |
235 |
236 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE
237 | u16 __sanitizer_unaligned_load16(const uu16 *p) {
238 | CHECK_SMALL_REGION(p, sizeof(*p), false);
239 | return *p;
240 | }
241 |
242 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE
243 | u32 __sanitizer_unaligned_load32(const uu32 *p) {
244 | CHECK_SMALL_REGION(p, sizeof(*p), false);
245 | return *p;
246 | }
247 |
248 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE
249 | u64 __sanitizer_unaligned_load64(const uu64 *p) {
250 | CHECK_SMALL_REGION(p, sizeof(*p), false);
251 | return *p;
252 | }
253 |
254 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE
255 | void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
256 | CHECK_SMALL_REGION(p, sizeof(*p), true);
257 | *p = x;
258 | }
259 |
260 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE
261 | void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
262 | CHECK_SMALL_REGION(p, sizeof(*p), true);
263 | *p = x;
264 | }
265 |
266 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE
267 | void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
268 | CHECK_SMALL_REGION(p, sizeof(*p), true);
269 | *p = x;
270 | }
271 |
272 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE
273 | void __asan_poison_cxx_array_cookie(uptr p) {
274 | if (SANITIZER_WORDSIZE != 64) return;
275 | if (!flags()->poison_array_cookie) return;
276 | if (*nesting_level != 0) {
277 | UNREACHABLE("poisoning shadow memory within SpecFuzz simulation");
278 | return;
279 | }
280 | uptr s = MEM_TO_SHADOW(p);
281 | *reinterpret_cast(s) = kAsanArrayCookieMagic;
282 | }
283 |
284 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE
285 | uptr __asan_load_cxx_array_cookie(uptr *p) {
286 | if (SANITIZER_WORDSIZE != 64) return *p;
287 | if (!flags()->poison_array_cookie) return *p;
288 | uptr s = MEM_TO_SHADOW(reinterpret_cast(p));
289 | u8 sval = *reinterpret_cast(s);
290 | if (sval == kAsanArrayCookieMagic) return *p;
291 | // If sval is not kAsanArrayCookieMagic it can only be freed memory,
292 | // which means that we are going to get double-free. So, return 0 to avoid
293 | // infinite loop of destructors. We don't want to report a double-free here
294 | // though, so print a warning just in case.
295 | // CHECK_EQ(sval, kAsanHeapFreeMagic);
296 | if (sval == kAsanHeapFreeMagic) {
297 | Report("AddressSanitizer: loaded array cookie from free-d memory; "
298 | "expect a double-free report\n");
299 | return 0;
300 | }
301 | // The cookie may remain unpoisoned if e.g. it comes from a custom
302 | // operator new defined inside a class.
303 | return *p;
304 | }
305 |
306 | // This is a simplified version of __asan_(un)poison_memory_region, which
307 | // assumes that left border of region to be poisoned is properly aligned.
308 | static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
309 | if (size == 0) return;
310 | uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1);
311 | PoisonShadow(addr, aligned_size,
312 | do_poison ? kAsanStackUseAfterScopeMagic : 0);
313 | if (size == aligned_size)
314 | return;
315 | s8 end_offset = (s8)(size - aligned_size);
316 | s8* shadow_end = (s8*)MemToShadow(addr + aligned_size);
317 | s8 end_value = *shadow_end;
318 | if (do_poison) {
319 | // If possible, mark all the bytes mapping to last shadow byte as
320 | // unaddressable.
321 | if (end_value > 0 && end_value <= end_offset)
322 | *shadow_end = (s8)kAsanStackUseAfterScopeMagic;
323 | } else {
324 | // If necessary, mark few first bytes mapping to last shadow byte
325 | // as addressable
326 | if (end_value != 0)
327 | *shadow_end = Max(end_value, end_offset);
328 | }
329 | }
330 |
331 | void __asan_set_shadow_00(uptr addr, uptr size) {
332 | REAL(memset)((void *)addr, 0, size);
333 | }
334 |
335 | void __asan_set_shadow_f1(uptr addr, uptr size) {
336 | REAL(memset)((void *)addr, 0xf1, size);
337 | }
338 |
339 | void __asan_set_shadow_f2(uptr addr, uptr size) {
340 | REAL(memset)((void *)addr, 0xf2, size);
341 | }
342 |
343 | void __asan_set_shadow_f3(uptr addr, uptr size) {
344 | REAL(memset)((void *)addr, 0xf3, size);
345 | }
346 |
347 | void __asan_set_shadow_f5(uptr addr, uptr size) {
348 | REAL(memset)((void *)addr, 0xf5, size);
349 | }
350 |
351 | void __asan_set_shadow_f8(uptr addr, uptr size) {
352 | REAL(memset)((void *)addr, 0xf8, size);
353 | }
354 |
355 | void __asan_poison_stack_memory(uptr addr, uptr size) {
356 | if (*nesting_level != 0) {
357 | UNREACHABLE("poisoning shadow memory within SpecFuzz simulation");
358 | return;
359 | }
360 | VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
361 | PoisonAlignedStackMemory(addr, size, true);
362 | }
363 |
364 | void __asan_unpoison_stack_memory(uptr addr, uptr size) {
365 | VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
366 | PoisonAlignedStackMemory(addr, size, false);
367 | }
368 |
369 | void __sanitizer_annotate_contiguous_container(const void *beg_p,
370 | const void *end_p,
371 | const void *old_mid_p,
372 | const void *new_mid_p) {
373 | if (!flags()->detect_container_overflow) return;
374 | VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
375 | new_mid_p);
376 | uptr beg = reinterpret_cast(beg_p);
377 | uptr end = reinterpret_cast(end_p);
378 | uptr old_mid = reinterpret_cast(old_mid_p);
379 | uptr new_mid = reinterpret_cast(new_mid_p);
380 | uptr granularity = SHADOW_GRANULARITY;
381 | if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
382 | IsAligned(beg, granularity))) {
383 | GET_STACK_TRACE_FATAL_HERE;
384 | ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
385 | &stack);
386 | }
387 | CHECK_LE(end - beg,
388 | FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check.
389 |
390 | uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
391 | uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
392 | uptr d1 = RoundDownTo(old_mid, granularity);
393 | // uptr d2 = RoundUpTo(old_mid, granularity);
394 | // Currently we should be in this state:
395 | // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
396 | // Make a quick sanity check that we are indeed in this state.
397 | //
398 | // FIXME: Two of these three checks are disabled until we fix
399 | // https://github.com/google/sanitizers/issues/258.
400 | // if (d1 != d2)
401 | // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
402 | if (a + granularity <= d1)
403 | CHECK_EQ(*(u8*)MemToShadow(a), 0);
404 | // if (d2 + granularity <= c && c <= end)
405 | // CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
406 | // kAsanContiguousContainerOOBMagic);
407 |
408 | uptr b1 = RoundDownTo(new_mid, granularity);
409 | uptr b2 = RoundUpTo(new_mid, granularity);
410 | // New state:
411 | // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
412 | PoisonShadow(a, b1 - a, 0);
413 | PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);
414 | if (b1 != b2) {
415 | CHECK_EQ(b2 - b1, granularity);
416 | *(u8*)MemToShadow(b1) = static_cast(new_mid - b1);
417 | }
418 | }
419 |
420 | const void *__sanitizer_contiguous_container_find_bad_address(
421 | const void *beg_p, const void *mid_p, const void *end_p) {
422 | if (!flags()->detect_container_overflow)
423 | return nullptr;
424 | uptr beg = reinterpret_cast(beg_p);
425 | uptr end = reinterpret_cast(end_p);
426 | uptr mid = reinterpret_cast(mid_p);
427 | CHECK_LE(beg, mid);
428 | CHECK_LE(mid, end);
429 | // Check some bytes starting from beg, some bytes around mid, and some bytes
430 | // ending with end.
431 | uptr kMaxRangeToCheck = 32;
432 | uptr r1_beg = beg;
433 | uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
434 | uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
435 | uptr r2_end = Min(end, mid + kMaxRangeToCheck);
436 | uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
437 | uptr r3_end = end;
438 | for (uptr i = r1_beg; i < r1_end; i++)
439 | if (AddressIsPoisoned(i))
440 | return reinterpret_cast(i);
441 | for (uptr i = r2_beg; i < mid; i++)
442 | if (AddressIsPoisoned(i))
443 | return reinterpret_cast(i);
444 | for (uptr i = mid; i < r2_end; i++)
445 | if (!AddressIsPoisoned(i))
446 | return reinterpret_cast(i);
447 | for (uptr i = r3_beg; i < r3_end; i++)
448 | if (!AddressIsPoisoned(i))
449 | return reinterpret_cast(i);
450 | return nullptr;
451 | }
452 |
453 | int __sanitizer_verify_contiguous_container(const void *beg_p,
454 | const void *mid_p,
455 | const void *end_p) {
456 | return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p,
457 | end_p) == nullptr;
458 | }
459 |
460 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE
461 | void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
462 | if (*nesting_level != 0) {
463 | UNREACHABLE("poisoning shadow memory within SpecFuzz simulation");
464 | return;
465 | }
466 | AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true);
467 | }
468 |
469 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE
470 | void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {
471 | AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false);
472 | }
473 |
474 | // --- Implementation of LSan-specific functions --- {{{1
475 | namespace __lsan {
476 | bool WordIsPoisoned(uptr addr) {
477 | return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0);
478 | }
479 | }
480 |
--------------------------------------------------------------------------------
/install/patches/llvm/asan_rtl.cc:
--------------------------------------------------------------------------------
1 | //===-- asan_rtl.cc -------------------------------------------------------===//
2 | //
3 | // The LLVM Compiler Infrastructure
4 | //
5 | // This file is distributed under the University of Illinois Open Source
6 | // License. See LICENSE.TXT for details.
7 | //
8 | //===----------------------------------------------------------------------===//
9 | //
10 | // This is a modified version of AddressSanitizer runtime, tailored
11 | // for SpecFuzz
12 | //===----------------------------------------------------------------------===//
13 |
14 | #include "asan_activation.h"
15 | #include "asan_allocator.h"
16 | #include "asan_interceptors.h"
17 | #include "asan_interface_internal.h"
18 | #include "asan_internal.h"
19 | #include "asan_mapping.h"
20 | #include "asan_poisoning.h"
21 | #include "asan_report.h"
22 | #include "asan_stack.h"
23 | #include "asan_stats.h"
24 | #include "asan_suppressions.h"
25 | #include "asan_thread.h"
26 | #include "sanitizer_common/sanitizer_atomic.h"
27 | #include "sanitizer_common/sanitizer_flags.h"
28 | #include "sanitizer_common/sanitizer_libc.h"
29 | #include "sanitizer_common/sanitizer_symbolizer.h"
30 | #include "lsan/lsan_common.h"
31 | #include "ubsan/ubsan_init.h"
32 | #include "ubsan/ubsan_platform.h"
33 |
34 | #include
35 |
36 | uptr __asan_shadow_memory_dynamic_address; // Global interface symbol.
37 | int __asan_option_detect_stack_use_after_return; // Global interface symbol.
38 | uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
39 |
40 | namespace __asan {
41 |
42 | uptr AsanMappingProfile[kAsanMappingProfileSize];
43 |
44 | static void AsanDie() {
45 | static atomic_uint32_t num_calls;
46 | if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
47 | // Don't die twice - run a busy loop.
48 | while (1) { }
49 | }
50 | if (common_flags()->print_module_map >= 1) PrintModuleMap();
51 | if (flags()->sleep_before_dying) {
52 | Report("Sleeping for %d second(s)\n", flags()->sleep_before_dying);
53 | SleepForSeconds(flags()->sleep_before_dying);
54 | }
55 | if (flags()->unmap_shadow_on_exit) {
56 | if (kMidMemBeg) {
57 | UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg);
58 | UnmapOrDie((void*)kMidMemEnd, kHighShadowEnd - kMidMemEnd);
59 | } else {
60 | if (kHighShadowEnd)
61 | UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
62 | }
63 | }
64 | }
65 |
66 | static void AsanCheckFailed(const char *file, int line, const char *cond,
67 | u64 v1, u64 v2) {
68 | Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
69 | line, cond, (uptr)v1, (uptr)v2);
70 |
71 | // Print a stack trace the first time we come here. Otherwise, we probably
72 | // failed a CHECK during symbolization.
73 | static atomic_uint32_t num_calls;
74 | if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) {
75 | PRINT_CURRENT_STACK_CHECK();
76 | }
77 |
78 | Die();
79 | }
80 |
81 | // -------------------------- Globals --------------------- {{{1
82 | int asan_inited;
83 | bool asan_init_is_running;
84 |
85 | #if !ASAN_FIXED_MAPPING
86 | uptr kHighMemEnd, kMidMemBeg, kMidMemEnd;
87 | #endif
88 |
89 | // -------------------------- Misc ---------------- {{{1
90 | void ShowStatsAndAbort() {
91 | __asan_print_accumulated_stats();
92 | Die();
93 | }
94 |
95 | // --------------- LowLevelAllocateCallbac ---------- {{{1
96 | static void OnLowLevelAllocate(uptr ptr, uptr size) {
97 | PoisonShadow(ptr, size, kAsanInternalHeapMagic);
98 | }
99 |
100 | // -------------------------- Run-time entry ------------------- {{{1
101 | // exported functions
102 | #define ASAN_REPORT_ERROR(type, is_write, size) \
103 | extern "C" NOINLINE INTERFACE_ATTRIBUTE \
104 | void __asan_report_ ## type ## size(uptr addr) { \
105 | GET_CALLER_PC_BP_SP; \
106 | ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \
107 | } \
108 | extern "C" NOINLINE INTERFACE_ATTRIBUTE \
109 | void __asan_report_exp_ ## type ## size(uptr addr, u32 exp) { \
110 | GET_CALLER_PC_BP_SP; \
111 | ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \
112 | } \
113 | extern "C" NOINLINE INTERFACE_ATTRIBUTE \
114 | void __asan_report_ ## type ## size ## _noabort(uptr addr) { \
115 | GET_CALLER_PC_BP_SP; \
116 | ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \
117 | } \
118 |
119 | ASAN_REPORT_ERROR(load, false, 1)
120 | ASAN_REPORT_ERROR(load, false, 2)
121 | ASAN_REPORT_ERROR(load, false, 4)
122 | ASAN_REPORT_ERROR(load, false, 8)
123 | ASAN_REPORT_ERROR(load, false, 16)
124 | ASAN_REPORT_ERROR(store, true, 1)
125 | ASAN_REPORT_ERROR(store, true, 2)
126 | ASAN_REPORT_ERROR(store, true, 4)
127 | ASAN_REPORT_ERROR(store, true, 8)
128 | ASAN_REPORT_ERROR(store, true, 16)
129 |
130 | #define ASAN_REPORT_ERROR_N(type, is_write) \
131 | extern "C" NOINLINE INTERFACE_ATTRIBUTE \
132 | void __asan_report_ ## type ## _n(uptr addr, uptr size) { \
133 | GET_CALLER_PC_BP_SP; \
134 | ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \
135 | } \
136 | extern "C" NOINLINE INTERFACE_ATTRIBUTE \
137 | void __asan_report_exp_ ## type ## _n(uptr addr, uptr size, u32 exp) { \
138 | GET_CALLER_PC_BP_SP; \
139 | ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \
140 | } \
141 | extern "C" NOINLINE INTERFACE_ATTRIBUTE \
142 | void __asan_report_ ## type ## _n_noabort(uptr addr, uptr size) { \
143 | GET_CALLER_PC_BP_SP; \
144 | ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \
145 | } \
146 |
147 | ASAN_REPORT_ERROR_N(load, false)
148 | ASAN_REPORT_ERROR_N(store, true)
149 | //
150 | //#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \
151 | // if (SANITIZER_MYRIAD2 && !AddrIsInMem(addr) && !AddrIsInShadow(addr)) \
152 | // return; \
153 | // uptr sp = MEM_TO_SHADOW(addr); \
154 | // uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast(sp) \
155 | // : *reinterpret_cast(sp); \
156 | // if (UNLIKELY(s)) { \
157 | // if (UNLIKELY(size >= SHADOW_GRANULARITY || \
158 | // ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \
159 | // (s8)s)) { \
160 | // if (__asan_test_only_reported_buggy_pointer) { \
161 | // *__asan_test_only_reported_buggy_pointer = addr; \
162 | // } else { \
163 | // GET_CALLER_PC_BP_SP; \
164 | // ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, \
165 | // fatal); \
166 | // } \
167 | // } \
168 | // }
169 |
170 | typedef void (*func_ptr_t)(void);
171 | func_ptr_t specfuzz_report = 0;
172 | long* nesting_level;
173 |
174 | #define SF_REPORT(addr) \
175 | void *offending_instruction = __builtin_return_address(0) + 8; \
176 | __asm__ volatile("movq %0, %%rdi\n" \
177 | "movq %1, %%rsi\n" \
178 | "callq %2\n" \
179 | : : "g" (addr), "g" (offending_instruction), "m" (specfuzz_report) \
180 | : "rdi", "rsi" );
181 |
182 |
183 | #define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \
184 | if (SANITIZER_MYRIAD2 && !AddrIsInMem(addr) && !AddrIsInShadow(addr)) \
185 | return; \
186 | uptr sp = MEM_TO_SHADOW(addr); \
187 | uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast(sp) \
188 | : *reinterpret_cast(sp); \
189 | if (UNLIKELY(s)) { \
190 | if (UNLIKELY(size >= SHADOW_GRANULARITY || \
191 | ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \
192 | (s8)s)) { \
193 | if (__asan_test_only_reported_buggy_pointer) { \
194 | *__asan_test_only_reported_buggy_pointer = addr; \
195 | } else { \
196 | SF_REPORT(addr); \
197 | } \
198 | } \
199 | }
200 |
201 | #define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \
202 | extern "C" NOINLINE INTERFACE_ATTRIBUTE \
203 | void __asan_##type##size(uptr addr) { \
204 | ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, true) \
205 | } \
206 | extern "C" NOINLINE INTERFACE_ATTRIBUTE \
207 | void __asan_exp_##type##size(uptr addr, u32 exp) { \
208 | ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp, true) \
209 | } \
210 | extern "C" NOINLINE INTERFACE_ATTRIBUTE \
211 | void __asan_##type##size ## _noabort(uptr addr) { \
212 | ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, false) \
213 | } \
214 |
215 | ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1)
216 | ASAN_MEMORY_ACCESS_CALLBACK(load, false, 2)
217 | ASAN_MEMORY_ACCESS_CALLBACK(load, false, 4)
218 | ASAN_MEMORY_ACCESS_CALLBACK(load, false, 8)
219 | ASAN_MEMORY_ACCESS_CALLBACK(load, false, 16)
220 | ASAN_MEMORY_ACCESS_CALLBACK(store, true, 1)
221 | ASAN_MEMORY_ACCESS_CALLBACK(store, true, 2)
222 | ASAN_MEMORY_ACCESS_CALLBACK(store, true, 4)
223 | ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8)
224 | ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16)
225 |
226 | extern "C"
227 | NOINLINE INTERFACE_ATTRIBUTE
228 | void __asan_loadN(uptr addr, uptr size) {
229 | if (__asan_region_is_poisoned(addr, size)) {
230 | SF_REPORT(addr);
231 | }
232 | }
233 |
234 | extern "C"
235 | NOINLINE INTERFACE_ATTRIBUTE
236 | void __asan_exp_loadN(uptr addr, uptr size, u32 exp) {
237 | if (__asan_region_is_poisoned(addr, size)) {
238 | SF_REPORT(addr);
239 | }
240 | }
241 |
242 | extern "C"
243 | NOINLINE INTERFACE_ATTRIBUTE
244 | void __asan_loadN_noabort(uptr addr, uptr size) {
245 | if (__asan_region_is_poisoned(addr, size)) {
246 | SF_REPORT(addr);
247 | }
248 | }
249 |
250 | extern "C"
251 | NOINLINE INTERFACE_ATTRIBUTE
252 | void __asan_storeN(uptr addr, uptr size) {
253 | if (__asan_region_is_poisoned(addr, size)) {
254 | SF_REPORT(addr);
255 | }
256 | }
257 |
258 | extern "C"
259 | NOINLINE INTERFACE_ATTRIBUTE
260 | void __asan_exp_storeN(uptr addr, uptr size, u32 exp) {
261 | if (__asan_region_is_poisoned(addr, size)) {
262 | SF_REPORT(addr);
263 | }
264 | }
265 |
266 | extern "C"
267 | NOINLINE INTERFACE_ATTRIBUTE
268 | void __asan_storeN_noabort(uptr addr, uptr size) {
269 | if (__asan_region_is_poisoned(addr, size)) {
270 | SF_REPORT(addr);
271 | }
272 | }
273 |
274 | // Force the linker to keep the symbols for various ASan interface functions.
275 | // We want to keep those in the executable in order to let the instrumented
276 | // dynamic libraries access the symbol even if it is not used by the executable
277 | // itself. This should help if the build system is removing dead code at link
278 | // time.
279 | static NOINLINE void force_interface_symbols() {
280 | volatile int fake_condition = 0; // prevent dead condition elimination.
281 | // __asan_report_* functions are noreturn, so we need a switch to prevent
282 | // the compiler from removing any of them.
283 | // clang-format off
284 | switch (fake_condition) {
285 | case 1: __asan_report_load1(0); break;
286 | case 2: __asan_report_load2(0); break;
287 | case 3: __asan_report_load4(0); break;
288 | case 4: __asan_report_load8(0); break;
289 | case 5: __asan_report_load16(0); break;
290 | case 6: __asan_report_load_n(0, 0); break;
291 | case 7: __asan_report_store1(0); break;
292 | case 8: __asan_report_store2(0); break;
293 | case 9: __asan_report_store4(0); break;
294 | case 10: __asan_report_store8(0); break;
295 | case 11: __asan_report_store16(0); break;
296 | case 12: __asan_report_store_n(0, 0); break;
297 | case 13: __asan_report_exp_load1(0, 0); break;
298 | case 14: __asan_report_exp_load2(0, 0); break;
299 | case 15: __asan_report_exp_load4(0, 0); break;
300 | case 16: __asan_report_exp_load8(0, 0); break;
301 | case 17: __asan_report_exp_load16(0, 0); break;
302 | case 18: __asan_report_exp_load_n(0, 0, 0); break;
303 | case 19: __asan_report_exp_store1(0, 0); break;
304 | case 20: __asan_report_exp_store2(0, 0); break;
305 | case 21: __asan_report_exp_store4(0, 0); break;
306 | case 22: __asan_report_exp_store8(0, 0); break;
307 | case 23: __asan_report_exp_store16(0, 0); break;
308 | case 24: __asan_report_exp_store_n(0, 0, 0); break;
309 | case 25: __asan_register_globals(nullptr, 0); break;
310 | case 26: __asan_unregister_globals(nullptr, 0); break;
311 | case 27: __asan_set_death_callback(nullptr); break;
312 | case 28: __asan_set_error_report_callback(nullptr); break;
313 | case 29: __asan_handle_no_return(); break;
314 | case 30: __asan_address_is_poisoned(nullptr); break;
315 | case 31: __asan_poison_memory_region(nullptr, 0); break;
316 | case 32: __asan_unpoison_memory_region(nullptr, 0); break;
317 | case 34: __asan_before_dynamic_init(nullptr); break;
318 | case 35: __asan_after_dynamic_init(); break;
319 | case 36: __asan_poison_stack_memory(0, 0); break;
320 | case 37: __asan_unpoison_stack_memory(0, 0); break;
321 | case 38: __asan_region_is_poisoned(0, 0); break;
322 | case 39: __asan_describe_address(0); break;
323 | case 40: __asan_set_shadow_00(0, 0); break;
324 | case 41: __asan_set_shadow_f1(0, 0); break;
325 | case 42: __asan_set_shadow_f2(0, 0); break;
326 | case 43: __asan_set_shadow_f3(0, 0); break;
327 | case 44: __asan_set_shadow_f5(0, 0); break;
328 | case 45: __asan_set_shadow_f8(0, 0); break;
329 | }
330 | // clang-format on
331 | }
332 |
333 | static void asan_atexit() {
334 | Printf("AddressSanitizer exit stats:\n");
335 | __asan_print_accumulated_stats();
336 | // Print AsanMappingProfile.
337 | for (uptr i = 0; i < kAsanMappingProfileSize; i++) {
338 | if (AsanMappingProfile[i] == 0) continue;
339 | Printf("asan_mapping.h:%zd -- %zd\n", i, AsanMappingProfile[i]);
340 | }
341 | }
342 |
343 | static void InitializeHighMemEnd() {
344 | #if !SANITIZER_MYRIAD2
345 | #if !ASAN_FIXED_MAPPING
346 | kHighMemEnd = GetMaxUserVirtualAddress();
347 | // Increase kHighMemEnd to make sure it's properly
348 | // aligned together with kHighMemBeg:
349 | kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
350 | #endif // !ASAN_FIXED_MAPPING
351 | CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
352 | #endif // !SANITIZER_MYRIAD2
353 | }
354 |
355 | void PrintAddressSpaceLayout() {
356 | if (kHighMemBeg) {
357 | Printf("|| `[%p, %p]` || HighMem ||\n",
358 | (void*)kHighMemBeg, (void*)kHighMemEnd);
359 | Printf("|| `[%p, %p]` || HighShadow ||\n",
360 | (void*)kHighShadowBeg, (void*)kHighShadowEnd);
361 | }
362 | if (kMidMemBeg) {
363 | Printf("|| `[%p, %p]` || ShadowGap3 ||\n",
364 | (void*)kShadowGap3Beg, (void*)kShadowGap3End);
365 | Printf("|| `[%p, %p]` || MidMem ||\n",
366 | (void*)kMidMemBeg, (void*)kMidMemEnd);
367 | Printf("|| `[%p, %p]` || ShadowGap2 ||\n",
368 | (void*)kShadowGap2Beg, (void*)kShadowGap2End);
369 | Printf("|| `[%p, %p]` || MidShadow ||\n",
370 | (void*)kMidShadowBeg, (void*)kMidShadowEnd);
371 | }
372 | Printf("|| `[%p, %p]` || ShadowGap ||\n",
373 | (void*)kShadowGapBeg, (void*)kShadowGapEnd);
374 | if (kLowShadowBeg) {
375 | Printf("|| `[%p, %p]` || LowShadow ||\n",
376 | (void*)kLowShadowBeg, (void*)kLowShadowEnd);
377 | Printf("|| `[%p, %p]` || LowMem ||\n",
378 | (void*)kLowMemBeg, (void*)kLowMemEnd);
379 | }
380 | Printf("MemToShadow(shadow): %p %p",
381 | (void*)MEM_TO_SHADOW(kLowShadowBeg),
382 | (void*)MEM_TO_SHADOW(kLowShadowEnd));
383 | if (kHighMemBeg) {
384 | Printf(" %p %p",
385 | (void*)MEM_TO_SHADOW(kHighShadowBeg),
386 | (void*)MEM_TO_SHADOW(kHighShadowEnd));
387 | }
388 | if (kMidMemBeg) {
389 | Printf(" %p %p",
390 | (void*)MEM_TO_SHADOW(kMidShadowBeg),
391 | (void*)MEM_TO_SHADOW(kMidShadowEnd));
392 | }
393 | Printf("\n");
394 | Printf("redzone=%zu\n", (uptr)flags()->redzone);
395 | Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
396 | Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
397 | Printf("thread_local_quarantine_size_kb=%zuK\n",
398 | (uptr)flags()->thread_local_quarantine_size_kb);
399 | Printf("malloc_context_size=%zu\n",
400 | (uptr)common_flags()->malloc_context_size);
401 |
402 | Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE);
403 | Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY);
404 | Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET);
405 | CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
406 | if (kMidMemBeg)
407 | CHECK(kMidShadowBeg > kLowShadowEnd &&
408 | kMidMemBeg > kMidShadowEnd &&
409 | kHighShadowBeg > kMidMemEnd);
410 | }
411 |
412 | static void AsanInitInternal() {
413 | if (LIKELY(asan_inited)) return;
414 | SanitizerToolName = "AddressSanitizer";
415 | CHECK(!asan_init_is_running && "ASan init calls itself!");
416 | asan_init_is_running = true;
417 |
418 | CacheBinaryName();
419 | CheckASLR();
420 |
421 | // Initialize flags. This must be done early, because most of the
422 | // initialization steps look at flags().
423 | InitializeFlags();
424 |
425 | AsanCheckIncompatibleRT();
426 | AsanCheckDynamicRTPrereqs();
427 | AvoidCVE_2016_2143();
428 |
429 | SetCanPoisonMemory(flags()->poison_heap);
430 | SetMallocContextSize(common_flags()->malloc_context_size);
431 |
432 | InitializePlatformExceptionHandlers();
433 |
434 | InitializeHighMemEnd();
435 |
436 | // Make sure we are not statically linked.
437 | AsanDoesNotSupportStaticLinkage();
438 |
439 | // Install tool-specific callbacks in sanitizer_common.
440 | AddDieCallback(AsanDie);
441 | SetCheckFailedCallback(AsanCheckFailed);
442 | SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
443 |
444 | __sanitizer_set_report_path(common_flags()->log_path);
445 |
446 | __asan_option_detect_stack_use_after_return =
447 | flags()->detect_stack_use_after_return;
448 |
449 | // Re-exec ourselves if we need to set additional env or command line args.
450 | MaybeReexec();
451 |
452 | // Setup internal allocator callback.
453 | SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY);
454 | SetLowLevelAllocateCallback(OnLowLevelAllocate);
455 |
456 | InitializeAsanInterceptors();
457 |
458 | // Enable system log ("adb logcat") on Android.
459 | // Doing this before interceptors are initialized crashes in:
460 | // AsanInitInternal -> android_log_write -> __interceptor_strcmp
461 | AndroidLogInit();
462 |
463 | ReplaceSystemMalloc();
464 |
465 | DisableCoreDumperIfNecessary();
466 |
467 | InitializeShadowMemory();
468 |
469 | AsanTSDInit(PlatformTSDDtor);
470 | InstallDeadlySignalHandlers(AsanOnDeadlySignal);
471 |
472 | AllocatorOptions allocator_options;
473 | allocator_options.SetFrom(flags(), common_flags());
474 | InitializeAllocator(allocator_options);
475 |
476 | MaybeStartBackgroudThread();
477 | SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
478 |
479 | // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
480 | // should be set to 1 prior to initializing the threads.
481 | asan_inited = 1;
482 | asan_init_is_running = false;
483 |
484 | if (flags()->atexit)
485 | Atexit(asan_atexit);
486 |
487 | InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
488 |
489 | // Now that ASan runtime is (mostly) initialized, deactivate it if
490 | // necessary, so that it can be re-activated when requested.
491 | if (flags()->start_deactivated)
492 | AsanDeactivate();
493 |
494 | // interceptors
495 | InitTlsSize();
496 |
497 | // Create main thread.
498 | AsanThread *main_thread = CreateMainThread();
499 | CHECK_EQ(0, main_thread->tid());
500 | force_interface_symbols(); // no-op.
501 | SanitizerInitializeUnwinder();
502 |
503 | if (CAN_SANITIZE_LEAKS) {
504 | __lsan::InitCommonLsan();
505 | if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
506 | if (flags()->halt_on_error)
507 | Atexit(__lsan::DoLeakCheck);
508 | else
509 | Atexit(__lsan::DoRecoverableLeakCheckVoid);
510 | }
511 | }
512 |
513 | #if CAN_SANITIZE_UB
514 | __ubsan::InitAsPlugin();
515 | #endif
516 |
517 | InitializeSuppressions();
518 |
519 | if (CAN_SANITIZE_LEAKS) {
520 | // LateInitialize() calls dlsym, which can allocate an error string buffer
521 | // in the TLS. Let's ignore the allocation to avoid reporting a leak.
522 | __lsan::ScopedInterceptorDisabler disabler;
523 | Symbolizer::LateInitialize();
524 | } else {
525 | Symbolizer::LateInitialize();
526 | }
527 |
528 | VReport(1, "AddressSanitizer Init done\n");
529 |
530 | if (flags()->sleep_after_init) {
531 | Report("Sleeping for %d second(s)\n", flags()->sleep_after_init);
532 | SleepForSeconds(flags()->sleep_after_init);
533 | }
534 | }
535 |
536 | // Initialize as requested from some part of ASan runtime library (interceptors,
537 | // allocator, etc).
538 | void AsanInitFromRtl() {
539 | AsanInitInternal();
540 | }
541 |
542 | #if ASAN_DYNAMIC
543 | // Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable
544 | // (and thus normal initializers from .preinit_array or modules haven't run).
545 |
546 | class AsanInitializer {
547 | public: // NOLINT
548 | AsanInitializer() {
549 | AsanInitFromRtl();
550 | }
551 | };
552 |
553 | static AsanInitializer asan_initializer;
554 | #endif // ASAN_DYNAMIC
555 |
556 | } // namespace __asan
557 |
558 | // ---------------------- Interface ---------------- {{{1
559 | using namespace __asan; // NOLINT
560 |
561 | void NOINLINE __asan_handle_no_return() {
562 | if (asan_init_is_running)
563 | return;
564 |
565 | // int local_stack;
566 | // AsanThread *curr_thread = GetCurrentThread();
567 | // uptr PageSize = GetPageSizeCached();
568 | // uptr top, bottom;
569 | // if (curr_thread) {
570 | // top = curr_thread->stack_top();
571 | // bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1);
572 | // } else if (SANITIZER_RTEMS) {
573 | // // Give up On RTEMS.
574 | // return;
575 | // } else {
576 | // CHECK(!SANITIZER_FUCHSIA);
577 | // // If we haven't seen this thread, try asking the OS for stack bounds.
578 | // uptr tls_addr, tls_size, stack_size;
579 | // GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr,
580 | // &tls_size);
581 | // top = bottom + stack_size;
582 | // }
583 | // static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
584 | // if (top - bottom > kMaxExpectedCleanupSize) {
585 | // static bool reported_warning = false;
586 | // if (reported_warning)
587 | // return;
588 | // reported_warning = true;
589 | // Report("WARNING: ASan is ignoring requested __asan_handle_no_return: "
590 | // "stack top: %p; bottom %p; size: %p (%zd)\n"
591 | // "False positive error reports may follow\n"
592 | // "For details see "
593 | // "https://github.com/google/sanitizers/issues/189\n",
594 | // top, bottom, top - bottom, top - bottom);
595 | // return;
596 | // }
597 | // PoisonShadow(bottom, top - bottom, 0);
598 | // if (curr_thread && curr_thread->has_fake_stack())
599 | // curr_thread->fake_stack()->HandleNoReturn();
600 | }
601 |
602 | void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
603 | SetUserDieCallback(callback);
604 | }
605 |
606 | // Initialize as requested from instrumented application code.
607 | // We use this call as a trigger to wake up ASan from deactivated state.
608 | void __asan_init() {
609 | AsanActivate();
610 | AsanInitInternal();
611 |
612 | // Find the rollback function
613 | *(void **) (&specfuzz_report) = dlsym(RTLD_DEFAULT, "specfuzz_report");
614 | nesting_level = (long *) dlsym(RTLD_DEFAULT, "nesting_level");
615 | }
616 |
617 | void __asan_version_mismatch_check() {
618 | // Do nothing.
619 | }
620 |
--------------------------------------------------------------------------------
/install/patches/llvm/sanitizer_coverage_libcdep_new.cc:
--------------------------------------------------------------------------------
1 | //===-- sanitizer_coverage_libcdep_new.cc ---------------------------------===//
2 | //
3 | // The LLVM Compiler Infrastructure
4 | //
5 | // This file is distributed under the University of Illinois Open Source
6 | // License. See LICENSE.TXT for details.
7 | //
8 | //===----------------------------------------------------------------------===//
9 | // Sanitizer Coverage Controller for Trace PC Guard.
10 |
11 | #include "sanitizer_platform.h"
12 |
13 | #if !SANITIZER_FUCHSIA
14 | #include "sancov_flags.h"
15 | #include "sanitizer_allocator_internal.h"
16 | #include "sanitizer_atomic.h"
17 | #include "sanitizer_common.h"
18 | #include "sanitizer_file.h"
19 |
20 | #include
21 |
22 | using namespace __sanitizer;
23 |
24 | using AddressRange = LoadedModule::AddressRange;
25 |
26 | namespace __sancov {
27 | namespace {
28 |
29 | static const u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL;
30 | static const u64 Magic32 = 0xC0BFFFFFFFFFFF32ULL;
31 | static const u64 Magic = SANITIZER_WORDSIZE == 64 ? Magic64 : Magic32;
32 |
33 | static fd_t OpenFile(const char* path) {
34 | error_t err;
35 | fd_t fd = OpenFile(path, WrOnly, &err);
36 | if (fd == kInvalidFd)
37 | Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
38 | path, err);
39 | return fd;
40 | }
41 |
42 | static void GetCoverageFilename(char* path, const char* name,
43 | const char* extension) {
44 | CHECK(name);
45 | internal_snprintf(path, kMaxPathLength, "%s/%s.%zd.%s",
46 | common_flags()->coverage_dir, name, internal_getpid(),
47 | extension);
48 | }
49 |
50 | static void WriteModuleCoverage(char* file_path, const char* module_name,
51 | const uptr* pcs, uptr len) {
52 | GetCoverageFilename(file_path, StripModuleName(module_name), "sancov");
53 | fd_t fd = OpenFile(file_path);
54 | WriteToFile(fd, &Magic, sizeof(Magic));
55 | WriteToFile(fd, pcs, len * sizeof(*pcs));
56 | CloseFile(fd);
57 | Printf("SanitizerCoverage: %s: %zd PCs written\n", file_path, len);
58 | }
59 |
60 | static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) {
61 | if (!len) return;
62 |
63 | char* file_path = static_cast(InternalAlloc(kMaxPathLength));
64 | char* module_name = static_cast(InternalAlloc(kMaxPathLength));
65 | uptr* pcs = static_cast(InternalAlloc(len * sizeof(uptr)));
66 |
67 | internal_memcpy(pcs, unsorted_pcs, len * sizeof(uptr));
68 | Sort(pcs, len);
69 |
70 | bool module_found = false;
71 | uptr last_base = 0;
72 | uptr module_start_idx = 0;
73 |
74 | for (uptr i = 0; i < len; ++i) {
75 | const uptr pc = pcs[i];
76 | if (!pc) continue;
77 |
78 | if (!__sanitizer_get_module_and_offset_for_pc(pc, nullptr, 0, &pcs[i])) {
79 | Printf("ERROR: unknown pc 0x%x (may happen if dlclose is used)\n", pc);
80 | continue;
81 | }
82 | uptr module_base = pc - pcs[i];
83 |
84 | if (module_base != last_base || !module_found) {
85 | if (module_found) {
86 | WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],
87 | i - module_start_idx);
88 | }
89 |
90 | last_base = module_base;
91 | module_start_idx = i;
92 | module_found = true;
93 | __sanitizer_get_module_and_offset_for_pc(pc, module_name, kMaxPathLength,
94 | &pcs[i]);
95 | }
96 | }
97 |
98 | if (module_found) {
99 | WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],
100 | len - module_start_idx);
101 | }
102 |
103 | InternalFree(file_path);
104 | InternalFree(module_name);
105 | InternalFree(pcs);
106 | }
107 |
108 | // Collects trace-pc guard coverage.
109 | // This class relies on zero-initialization.
110 | class TracePcGuardController {
111 | public:
112 | long* nesting_level;
113 | long* disable_speculation;
114 |
115 | void Initialize() {
116 | CHECK(!initialized);
117 |
118 | initialized = true;
119 | InitializeSancovFlags();
120 |
121 | pc_vector.Initialize(0);
122 | nesting_level = (long *) dlsym(RTLD_DEFAULT, "nesting_level");
123 | disable_speculation = (long *) dlsym(RTLD_DEFAULT, "disable_speculation");
124 | }
125 |
126 | void InitTracePcGuard(u32* start, u32* end) {
127 | if (!initialized) Initialize();
128 | CHECK(!*start);
129 | CHECK_NE(start, end);
130 |
131 | u32 i = pc_vector.size();
132 | for (u32* p = start; p < end; p++) *p = ++i;
133 | pc_vector.resize(i);
134 | }
135 |
136 | void TracePcGuard(u32* guard, uptr pc) {
137 | u32 idx = *guard;
138 | if (!idx) return;
139 | // we start indices from 1.
140 | atomic_uintptr_t* pc_ptr =
141 | reinterpret_cast(&pc_vector[idx - 1]);
142 | if (atomic_load(pc_ptr, memory_order_relaxed) == 0)
143 | atomic_store(pc_ptr, pc, memory_order_relaxed);
144 | }
145 |
146 | void Reset() {
147 | internal_memset(&pc_vector[0], 0, sizeof(pc_vector[0]) * pc_vector.size());
148 | }
149 |
150 | void Dump() {
151 | if (!initialized || !common_flags()->coverage) return;
152 | __sanitizer_dump_coverage(pc_vector.data(), pc_vector.size());
153 | }
154 |
155 | private:
156 | bool initialized;
157 | InternalMmapVectorNoCtor pc_vector;
158 | };
159 |
160 | static TracePcGuardController pc_guard_controller;
161 |
162 | } // namespace
163 | } // namespace __sancov
164 |
165 | namespace __sanitizer {
166 | void InitializeCoverage(bool enabled, const char *dir) {
167 | static bool coverage_enabled = false;
168 | if (coverage_enabled)
169 | return; // May happen if two sanitizer enable coverage in the same process.
170 | coverage_enabled = enabled;
171 | Atexit(__sanitizer_cov_dump);
172 | AddDieCallback(__sanitizer_cov_dump);
173 | }
174 | } // namespace __sanitizer
175 |
176 | extern "C" {
177 | SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT
178 | const uptr* pcs, uptr len) {
179 | return __sancov::SanitizerDumpCoverage(pcs, len);
180 | }
181 |
182 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32* guard) {
183 | if (!*guard) return;
184 | if (*__sancov::pc_guard_controller.nesting_level != 0) return;
185 | if (*__sancov::pc_guard_controller.disable_speculation > 0) return;
186 | __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
187 | }
188 |
189 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,
190 | u32* start, u32* end) {
191 | if (start == end || *start) return;
192 | __sancov::pc_guard_controller.InitTracePcGuard(start, end);
193 | }
194 |
195 | SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {
196 | __sancov::pc_guard_controller.Dump();
197 | }
198 | SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
199 | __sanitizer_dump_trace_pc_guard_coverage();
200 | }
201 | SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_reset() {
202 | __sancov::pc_guard_controller.Reset();
203 | }
204 | // Default empty implementations (weak). Users should redefine them.
205 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}
206 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}
207 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}
208 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {}
209 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {}
210 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp1, void) {}
211 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp2, void) {}
212 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp4, void) {}
213 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp8, void) {}
214 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {}
215 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}
216 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}
217 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}
218 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
219 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_8bit_counters_init, void) {}
220 | SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, void) {}
221 | } // extern "C"
222 | // Weak definition for code instrumented with -fsanitize-coverage=stack-depth
223 | // and later linked with code containing a strong definition.
224 | // E.g., -fsanitize=fuzzer-no-link
225 | SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
226 | SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE uptr __sancov_lowest_stack;
227 |
228 | #endif // !SANITIZER_FUCHSIA
229 |
--------------------------------------------------------------------------------
/install/wrapper.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # TODO: this script is in a dire need of documentation
3 |
4 | set -e
5 |
6 | LLVM_CONFIG=${LLVM_CONFIG:-"llvm-7.0.1-config"}
7 | CLANG_BINDIR=$(${LLVM_CONFIG} --bindir)
8 |
9 | CC=${CLANG_BINDIR}/clang
10 | LLC=${CLANG_BINDIR}/llc
11 |
12 | # make sure that external varables do not corrupt our compilation
13 | CFLAGS=""
14 | LDFLAGS=""
15 | LANGUAGE=""
16 | GGDB=""
17 | I=""
18 | INPUT=""
19 | OUTPUT=""
20 | OPT=""
21 |
22 | # configure the compiler
23 | LLCFLAGS="-x86-specfuzz -disable-tail-calls"
24 | ASAN_CFLAGS="-fsanitize=address -mllvm -asan-instrumentation-with-call-threshold=0 -mllvm -asan-use-after-scope=0 "
25 | ASAN_LDFLAGS="-fsanitize=address"
26 | COVERAGE_FLAGS=""
27 |
28 | flag_coverage_only=0
29 | flag_coverage=0
30 | flag_collect=0
31 | flag_function_list=0
32 | flag_branch_list=0
33 | flag_serialization_list=0
34 |
35 | while [ "$#" -gt 0 ]; do
36 | case $1 in
37 | -o)
38 | if (($# > 1)); then
39 | OUTPUT=$2
40 | if [ "$2" == "/dev/null" ]; then
41 | DEVNULL=1
42 | fi
43 | shift
44 | fi
45 | ;;
46 | *.c|*.cc|*.cpp|-)
47 | INPUT="$INPUT $1"
48 | SOURCE=1
49 | ;;
50 | *.o|*.s|*.S|*.a)
51 | INPUT="$INPUT $1"
52 | ;;
53 | -x)
54 | LANGUAGE="$1 $2"
55 | if [ "$2" == "assembler" ]; then
56 | ASM=1
57 | fi
58 | shift
59 | ;;
60 | -c)
61 | CFLAGS="$CFLAGS $1"
62 | CREATE_OBJECT=1
63 | ;;
64 | -I|-include|-isystem)
65 | I="$I $1 $2"
66 | shift
67 | ;;
68 | -I*)
69 | I="$I $1"
70 | ;;
71 | -ggdb*|-g)
72 | GGDB="-g -gcolumn-info"
73 | ;;
74 | -O?)
75 | CFLAGS="$CFLAGS $1"
76 | OPT="$1"
77 | ;;
78 | -S)
79 | CREATE_ASM=1
80 | ;;
81 | --collect)
82 | if [ $flag_collect == 0 ]; then
83 | if [ ! -f $2 ]; then
84 | touch $2
85 | fi
86 | LLCFLAGS+=" -x86-specfuzz-collect-functions-into=$2"
87 | flag_collect=1
88 | fi
89 | shift
90 | ;;
91 | --function-list)
92 | if [ $flag_function_list == 0 ]; then
93 | LLCFLAGS+=" -x86-specfuzz-function-list=$2"
94 | flag_function_list=1
95 | fi
96 | shift
97 | ;;
98 | --branch-list)
99 | if [ $flag_branch_list == 0 ]; then
100 | LLCFLAGS+=" -x86-specfuzz-branch-list=$2"
101 | flag_branch_list=1
102 | fi
103 | shift
104 | ;;
105 | --serialization-list)
106 | if [ $flag_serialization_list == 0 ]; then
107 | LLCFLAGS+=" -x86-specfuzz-serialization-list=$2"
108 | flag_serialization_list=1
109 | fi
110 | shift
111 | ;;
112 | --echo)
113 | ECHO=1
114 | ;;
115 | --debug-pass)
116 | LLCFLAGS+=" -debug-only=x86-specfuzz"
117 | ;;
118 | --no-wrapper-cleanup)
119 | NO_CLEANUP=1
120 | ;;
121 | --disable-asan)
122 | ASAN_CFLAGS=
123 | ASAN_LDFLAGS=
124 | ;;
125 | --enable-coverage)
126 | if [ $flag_coverage == 0 ]; then
127 | ASAN_CFLAGS="$ASAN_CFLAGS $COVERAGE_FLAGS"
128 | ASAN_LDFLAGS="$ASAN_LDFLAGS $COVERAGE_FLAGS"
129 | flag_coverage=1
130 | fi
131 | ;;
132 | --coverage-only)
133 | if [ $flag_coverage_only == 0 ]; then
134 | ASAN_CFLAGS="$ASAN_CFLAGS $COVERAGE_FLAGS"
135 | ASAN_LDFLAGS="$ASAN_LDFLAGS $COVERAGE_FLAGS"
136 | LLCFLAGS+=" -x86-specfuzz-coverage-only"
137 | flag_coverage_only=1
138 | fi
139 | ;;
140 | -V|-v|-qversion)
141 | $CC -v
142 | exit $?
143 | ;;
144 | *)
145 | if [ -z "$OUTPUT" ]; then
146 | CFLAGS="$CFLAGS $1"
147 | else
148 | LDFLAGS="$LDFLAGS $1"
149 | fi
150 | ;;
151 | esac
152 | shift
153 | done
154 |
155 | if [ -z "$OUTPUT" ]; then
156 | if [ $CREATE_OBJECT ]; then
157 | OUTPUT=$(basename ${INPUT%.c*}.o)
158 | else
159 | OUTPUT="a.out"
160 | fi
161 | fi
162 |
163 | CFLAGS="$CFLAGS -mno-red-zone"
164 | CFLAGS="$CFLAGS -mno-avx -mno-avx2 "
165 |
166 |
167 | if ! [ $CREATE_OBJECT ]; then
168 | LDFLAGS="$LDFLAGS -rdynamic -lspecfuzz"
169 | fi
170 |
171 | if [ -n "$SOURCE" ] && [ -z "$ASM" ] && [ -z "$DEVNULL" ]; then
172 | cmd=( $CC $ASAN_CFLAGS $CFLAGS $GGDB $I $LANGUAGE -c -emit-llvm $INPUT -o ${OUTPUT%.o}.bc )
173 | if [ -n "$ECHO" ]; then echo "${cmd[@]}"; fi
174 | "${cmd[@]}"
175 |
176 | cmd=( $LLC $LLCFLAGS $OPT ${OUTPUT%.o}.bc -o ${OUTPUT%.o}.s )
177 | if [ -n "$ECHO" ]; then echo "${cmd[@]}"; fi
178 | "${cmd[@]}"
179 | if [ -z "$NO_CLEANUP" ]; then rm ${OUTPUT%.o}.bc; fi
180 |
181 | if [ -z "$CREATE_ASM" ]; then
182 | cmd=( $CC -Wno-unused-command-line-argument $CFLAGS $ASAN_LDFLAGS ${OUTPUT%.o}.s -o $OUTPUT $LDFLAGS )
183 | if [ -n "$ECHO" ]; then echo "${cmd[@]}"; fi
184 | "${cmd[@]}"
185 | else
186 | cp ${OUTPUT%.o}.s ${OUTPUT%.o}
187 | fi
188 |
189 | if [ -z "$NO_CLEANUP" ]; then rm ${OUTPUT%.o}.s; fi
190 | if [ -n "$ECHO" ]; then echo "==========================================================="; fi
191 | else
192 | if [ -z "$SOURCE" ]; then
193 | I=
194 | fi
195 |
196 | cmd=( $CC $ASAN_LDFLAGS $CFLAGS $GGDB $I $LANGUAGE $INPUT -o $OUTPUT $LDFLAGS )
197 | if [ -n "$ECHO" ]; then echo "${cmd[@]}"; fi
198 | "${cmd[@]}"
199 | fi
200 |
--------------------------------------------------------------------------------
/postprocessing/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tudinfse/SpecFuzz/1c7cf6b1c0528744c56db0c8848e159b2962c21c/postprocessing/__init__.py
--------------------------------------------------------------------------------
/postprocessing/aggregate_rlbk_stats.awk:
--------------------------------------------------------------------------------
1 | #!/usr/bin/gawk -f
2 |
3 | function sorted_print(map, threshold, total, sims, total_simulations) {
4 | for (key in map) {
5 | inverted_map[map[key]/sims[key]] = key
6 | }
7 |
8 | n = asorti(inverted_map, sorted, "@ind_num_desc")
9 | sum = 0.0
10 | for (i = 1; i <= n; i++) {
11 | #rate = (sorted[i] / total) * 100
12 | printf("%s\t%.4f\n", inverted_map[sorted[i]], sorted[i])
13 | #sum += rate
14 | sum += 1
15 | if (sum > threshold) {
16 | break
17 | }
18 | }
19 | return str
20 | }
21 |
22 |
23 | BEGIN {
24 | simulation_id = -1
25 | }
26 |
27 | /\[SF\] rlbk:/ {
28 | if ($6 != simulation_id) {
29 | simulation_id = $6
30 | simulations[$3] += 1
31 | total_simulations++
32 | }
33 | rollbacks[$3] += 1
34 | weighted_simulations[$3] += $4
35 | depth[$4 - $4 % 50] += 1;
36 | nesting[$5] += 1;
37 |
38 | total_rollbacks++
39 | total_depth += $4
40 | }
41 |
42 | END {
43 | print "* Depths:"
44 | for (d in depth) {
45 | #printf("%s: \t%.4f ", (250 - d), (depth[d] / total_rollbacks) * 100)
46 | printf("%s:\t", (250 - d))
47 | rate = (depth[d] / total_rollbacks) * 100
48 | for (i=0; i < rate ; i++) {
49 | printf("*")
50 | }
51 | printf("\n")
52 | }
53 |
54 | printf("\n* Nestings:\n")
55 | for (n in nesting) {
56 | printf("%s: %.2f%\n", n, (nesting[n] / total_rollbacks) * 100)
57 | }
58 |
59 | printf("\n* rollbacks\n")
60 | sorted_print(rollbacks, threshold, total_rollbacks, simulations, total_simulations)
61 | }
62 |
63 |
64 |
65 |
--------------------------------------------------------------------------------
/postprocessing/build_report.awk:
--------------------------------------------------------------------------------
1 | #!/usr/bin/gawk -f
2 | function filter(str) {
3 | gsub("killed", "", str)
4 | gsub("renamable", "", str)
5 | gsub("", "", str)
6 | gsub("implicit .[a-z]+,", "", str)
7 | gsub("implicit-def .[a-z]+,", "", str)
8 | gsub("debug-location ![0-9]+", "", str)
9 | gsub(" +", " ", str)
10 | gsub(/^[ \t]+/, "", str)
11 | return str
12 | }
13 |
14 | BEGIN {
15 | bt_initialized = 0;
16 | }
17 |
18 | /\*+ SpecFuzz :/ {
19 | current_function = $4
20 | }
21 |
22 | /Blacklisted/ {
23 | blacklisted[current_function] = 1
24 | }
25 |
26 | /Instrumenting an indirect branch/ {
27 | gsub("Instrumenting an indirect branch:", "", $0)
28 | $0 = filter($0)
29 | indirect_branches[$0] = current_function
30 | }
31 |
32 | /Instrumenting an indirect call/ {
33 | gsub("Instrumenting an indirect call:", "", $0)
34 | $0 = filter($0)
35 | indirect_calls[$0] = current_function
36 | }
37 |
38 | /Instrumenting a call to an external function/ {
39 | gsub("Instrumenting a call to an external function:", "", $0)
40 | $0 = filter($0)
41 | if (match($2, "@") != 0) {
42 | external_calls[$1 " " $2] = current_function
43 | } else {
44 | external_calls[$0] = current_function
45 | }
46 | }
47 |
48 | /Instrumenting a call to an ASan function/ {
49 | gsub("Instrumenting a call to an ASan function:", "", $0)
50 | $0 = filter($0)
51 | if (match($2, "@") != 0) {
52 | asan_calls[$1 " " $2] = current_function
53 | } else {
54 | asan_calls[$0] = current_function
55 | }
56 | }
57 |
58 | /Instrumenting a serializing instruction/ {
59 | $1=""; $2=""; $3=""; $4="";
60 | $0 = filter($0)
61 | serializing[$0] = current_function
62 | }
63 |
64 |
65 | END {
66 | print "Blacklisted functions:"
67 | for (f in blacklisted) {
68 | print f;
69 | }
70 |
71 | printf "\nBranch Table is initialized: "
72 | if (bt_initialized == 0) {
73 | print "Yes"
74 | } else {
75 | print "No"
76 | }
77 |
78 | printf "\nIndirect branches:\n"
79 | n = asorti(indirect_branches, sorted)
80 | for (i in sorted) {
81 | printf("%s in %s\n", sorted[i], indirect_branches[sorted[i]])
82 | }
83 |
84 | printf "\nIndirect calls:\n"
85 | n = asorti(indirect_calls, sorted)
86 | for (i in sorted) {
87 | printf("%s in %s\n", sorted[i], indirect_calls[sorted[i]])
88 | }
89 |
90 | printf "\nExternal calls:\n"
91 | n = asorti(external_calls, sorted)
92 | for (i in sorted) {
93 | printf("%s in %s\n", sorted[i], external_calls[sorted[i]])
94 | }
95 |
96 | printf "\nASan calls:\n"
97 | n = asorti(asan_calls, sorted)
98 | for (i in sorted) {
99 | printf("%s in %s\n", sorted[i], asan_calls[sorted[i]])
100 | }
101 |
102 | printf "\nSerializing instructions:\n"
103 | n = asorti(serializing, sorted)
104 | for (i in sorted) {
105 | printf("%s in %s\n", sorted[i], serializing[sorted[i]])
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/src/specfuzz_cov.c:
--------------------------------------------------------------------------------
1 | //===-------- specfuzz_cov.c ------------------------------------------------===//
2 | //
3 | // Copyright: This file is distributed under the GPL version 3 License.
4 | // See LICENSE for details.
5 | //
6 | //===------------------------------------------------------------------------===//
7 | /// \file
8 | ///
9 | /// Dummy default implementations of SpecFuzz coverage functions.
10 | /// Used mainly for testing
11 | ///
12 | /// The corresponding strong symbols must be defined by the fuzzer
13 | //===------------------------------------------------------------------------===//
14 | #include "specfuzz_rtl.h"
15 |
16 | void specfuzz_cov_init() {}
17 |
18 | __attribute__((weak)) __attribute__((preserve_most))
19 | void specfuzz_cov_trace_pc(uintptr_t pc) {
20 | branch_execution_count = 1;
21 | }
22 |
23 | __attribute__((weak))
24 | void specfuzz_cov_vuln(uintptr_t pc) {}
--------------------------------------------------------------------------------
/src/specfuzz_init.c:
--------------------------------------------------------------------------------
1 | //===-------- specfuzz_init.c -----------------------------------------------===//
2 | //
3 | // Copyright: This file is distributed under the GPL version 3 License.
4 | // See LICENSE for details.
5 | //
6 | //===------------------------------------------------------------------------===//
7 | /// \file
8 | ///
9 | /// - Initialization of the SpecFuzz runtime.
10 | /// - A signal handler that records the signal and does a rollback
11 | //===------------------------------------------------------------------------===//
12 | #ifndef _GNU_SOURCE
13 | #define _GNU_SOURCE
14 | #endif
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include "specfuzz_rtl.h"
23 |
24 | #if ENABLE_STATS == 1
25 | #define STAT_INCREMENT(X) X++
26 | #else
27 | #define STAT_INCREMENT(X)
28 | #endif
29 |
30 | // a disjoint stack frame for a signal handler
31 | stack_t signal_stack_descr;
32 | char signal_stack[SIGSTKSZ];
33 |
34 | // a global variable for detecting errors in RTL
35 | char inside_handler = 0;
36 |
37 | // output buffer
38 | #define OUTPUT_SIZE 1000000
39 | char output[OUTPUT_SIZE];
40 |
41 | /// Signal handler to catch exceptions on simulated paths
42 | ///
43 | void specfuzz_handler(int signo, siginfo_t *siginfo, void *ucontext) {
44 | ucontext_t *context = ((ucontext_t *) ucontext);
45 | greg_t *uc_gregs = context->uc_mcontext.gregs;
46 | long long int instruction = context->uc_mcontext.gregs[REG_RIP];
47 |
48 | #if ENABLE_SANITY_CHECKS == 1
49 | if (inside_handler != 0) {
50 | fprintf(stderr, "\n[SF] Error: Fault inside the signal handler\n");
51 | abort();
52 | }
53 | inside_handler = 1;
54 |
55 | if (nesting_level <= 0x0) {
56 | fprintf(stderr, "[SF] Error: Signal handler called outside speculation\n");
57 | abort();
58 | }
59 |
60 | if (checkpoint_sp > &checkpoint_stack || checkpoint_sp < &checkpoint_stack_bottom) {
61 | fprintf(stderr, "[SF] Error: checkpoint_sp is corrupted\n");
62 | abort();
63 | }
64 |
65 | if ((uint64_t *) uc_gregs[REG_RSP] <= &specfuzz_rtl_frame
66 | && (uint64_t *) uc_gregs[REG_RSP] >= &specfuzz_rtl_frame_bottom) {
67 | fprintf(stderr, "[SF] Error: a signal caught within the SpecFuzz runtime\n");
68 | abort();
69 | }
70 | #endif
71 |
72 | if (siginfo->si_signo == SIGFPE) {
73 | STAT_INCREMENT(stat_signal_misc);
74 | } else if (context->uc_mcontext.gregs[REG_RSP] >= (long long) &asan_rtl_frame_bottom &&
75 | context->uc_mcontext.gregs[REG_RSP] <= (long long) &asan_rtl_frame) {
76 | // When we detect an overflow in ASan RTL, recovering the offending address is tricky
77 | // For the time being, we ignore these cases
78 | STAT_INCREMENT(stat_signal_overflow);
79 | } else {
80 | #if ENABLE_PRINT == 1
81 | // Print information about the signal
82 | // Note: the calls to fprintf are not multithreading-safe
83 |
84 | // the speculated branch's PC is stored in the second entry of
85 | // the current checkpoint stack frame, or the 16's entry if we go backwards
86 | // TODO: these indexes are ugly. Use a structure instead
87 | uint64_t last_branch_address = store_log_bp[20 + 64 + 1];
88 | fprintf(stderr,
89 | "[SF], %d, 0x%llx, 0x%lx, 0, 0x%lx",
90 | siginfo->si_signo,
91 | instruction,
92 | (unsigned long int) siginfo->si_addr,
93 | last_branch_address);
94 | uint64_t *next_frame = (uint64_t *) store_log_bp[22 + 64 + 1];
95 | while (next_frame) {
96 | fprintf(stderr, ", 0x%lx", next_frame[20 + 64 + 1]);
97 | next_frame = (uint64_t *) next_frame[22 + 64 + 1];
98 | }
99 | fprintf(stderr, "\n");
100 | #endif
101 |
102 | #if ENABLE_COVERAGE == 1
103 | specfuzz_cov_vuln(instruction);
104 | #endif
105 | STAT_INCREMENT(stat_signal_overflow);
106 | }
107 |
108 | // Redirect the flow into the recovery function
109 | uc_gregs[REG_RSP] = (greg_t) &specfuzz_rtl_frame;
110 | uc_gregs[REG_RIP] = (greg_t) &specfuzz_rlbk_forced;
111 | inside_handler = 0;
112 | }
113 |
114 | /// Catch all hardware signals with our handler
115 | ///
116 | void setup_handler() {
117 | // Establish an alternate stack for the handler
118 | signal_stack_descr.ss_sp = &signal_stack;
119 | signal_stack_descr.ss_size = SIGSTKSZ;
120 | signal_stack_descr.ss_flags = 0;
121 |
122 | if (sigaltstack(&signal_stack_descr, NULL) == -1) {
123 | perror("sigaltstack");
124 | _exit(1);
125 | }
126 |
127 | // Configure the signal handler
128 | struct sigaction action;
129 | action.sa_sigaction = specfuzz_handler;
130 | sigemptyset(&action.sa_mask); // do not mask any signals while handling
131 |
132 | // pass signal info, use alternate stack, and catch it's own signals
133 | action.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_NODEFER;
134 |
135 | // Register the handler
136 | if (sigaction(SIGSEGV, &action, NULL) == -1 ||
137 | sigaction(SIGBUS, &action, NULL) == -1 ||
138 | sigaction(SIGILL, &action, NULL) == -1 ||
139 | sigaction(SIGTRAP, &action, NULL) == -1 ||
140 | sigaction(SIGFPE, &action, NULL) == -1) {
141 | perror("sigaction");
142 | _exit(1);
143 | }
144 | }
145 |
146 | /// Prints runtime statistics
147 | ///
148 | #define print_stat(MSG, VAR, TOTAL) fprintf(stderr, MSG, VAR, (VAR * 100) / TOTAL)
149 | void specfuzz_dump_stats() {
150 | uint64_t total = stat_max_depth + stat_corrupted_code_pointer + stat_forced_external_call
151 | + stat_forced_serializing_instruction + stat_forced_indirect_call + stat_asan_overflow
152 | + stat_signal_overflow + stat_signal_misc;
153 | fprintf(stderr, "[SF] Statistics:\n");
154 | print_stat(" Max speculation depth reached: %lu (%lu%%)\n", stat_max_depth, total);
155 | print_stat(" of them, with max nesting %lu (%lu%%)\n", stat_max_nesting, total);
156 | print_stat(" External function call: %lu (%lu%%)\n", stat_forced_external_call, total);
157 | print_stat(" Indirect function call: %lu (%lu%%)\n", stat_forced_indirect_call, total);
158 | print_stat(" Serializing: %lu (%lu%%)\n", stat_forced_serializing_instruction, total);
159 | print_stat(" Bounds violation (ASan): %lu (%lu%%)\n", stat_asan_overflow, total);
160 | print_stat(" Bounds violation (signal): %lu (%lu%%)\n", stat_signal_overflow, total);
161 | print_stat(" Corrupted code pointer: %lu (%lu%%)\n", stat_corrupted_code_pointer, total);
162 | print_stat(" Other signals: %lu (%lu%%)\n", stat_signal_misc, total);
163 | fprintf(stderr, " Simulation disables: %lu\n", stat_simulation_disables);
164 | fprintf(stderr, " Skipped CMP due to disabled simulation: %lu\n", stat_skiped_due_to_disabled);
165 | }
166 |
167 | /// The initialization function. Called before main
168 | ///
169 | __attribute__((preserve_most))
170 | void specfuzz_init() {
171 | // _IOFBF behaves funky. IDK why. Gave up on it for the time being
172 | setvbuf(stderr, output, _IOLBF, OUTPUT_SIZE);
173 | fprintf(stderr, "[SF] Starting\n");
174 | setup_handler();
175 | #if ENABLE_STATS == 1
176 | atexit(specfuzz_dump_stats);
177 | #endif
178 | #if ENABLE_COVERAGE == 1
179 | specfuzz_cov_init();
180 | #endif
181 |
182 | }
183 |
--------------------------------------------------------------------------------
/src/specfuzz_rtl.S:
--------------------------------------------------------------------------------
1 | //===-------- specfuzz_rtl.S ------------------------------------------------===//
2 | //
3 | // Copyright: This file is distributed under the GPL version 3 License.
4 | // See LICENSE for details.
5 | //
6 | //===------------------------------------------------------------------------===//
7 | /// \file
8 | ///
9 | /// A runtime library that implements the more heavy-weight functionality of
10 | /// the SpecFuzz pass: checkpoint+rollback and reporting of the detected
11 | /// vulnerabilities.
12 | /// Also, some helper functions.
13 | ///
14 | /// In the file, the following abbreviations are used:
15 | /// * flags: May clobber EFLAGS
16 | /// * stack: May modify data on stack
17 | /// * spec: May be executed speculatively
18 | //===------------------------------------------------------------------------===//
19 | .file "specfuzz_rtl.S"
20 |
21 | .extern printf
22 | .extern fprintf
23 | .extern stderr
24 | .extern specfuzz_cov_trace_pc
25 | .extern specfuzz_cov_vuln
26 |
27 | .set CHECKPOINT_STACK_SIZE, (4096 * 25)
28 | .set SPECULATION_WINDOW, 250
29 | #ifndef MAX_NESTING_LEVEL
30 | .set MAX_NESTING_LEVEL, 1
31 | #endif
32 |
33 | .macro STAT_INCREMENT var
34 | #if ENABLE_STATS == 1
35 | pushfq
36 | incq \var
37 | popfq
38 | #endif
39 | .endm
40 |
41 | .macro ASSERT lh cond rh error_callback
42 | #if ENABLE_SANITY_CHECKS == 1
43 | cmpq \rh , \lh
44 | \cond \error_callback
45 | #endif
46 | .endm
47 |
48 |
49 | //===------------------------------------------------------------------------===//
50 | // Global variables
51 | //===------------------------------------------------------------------------===//
52 | .data
53 |
54 | // For better cache locality, we try to keep all small variables on one or two cache lines
55 | // and all checkpointed values on the same page
56 | .align 4096
57 |
58 | // Instruction counter: counts instructions executed during a simulation
59 | .globl instruction_counter
60 | instruction_counter: .quad 0
61 |
62 | // Flag indicating if we are currently in a simulation
63 | .globl nesting_level
64 | nesting_level: .quad 0
65 |
66 | // [Prioritized simulation] The maximum order of simulation for the current branch
67 | max_nesting_level: .quad 1
68 |
69 | // Flag that globally disables simulation. Set, for example, when calling non-instrumented
70 | // functions
71 | .globl disable_speculation
72 | disable_speculation: .quad 0
73 |
74 | // The stack pointer value before we called the runtime
75 | .globl current_rsp
76 | current_rsp: .quad 0
77 |
78 | // A variable for passing the results of specfuzz_cov_trace_pc to the rtl; We need it because
79 | // of a bug in LLVM that corrupt the return value of the functions even if
80 | // preserve_most attribute is set
81 | .globl branch_execution_count
82 | branch_execution_count: .quad 0
83 |
84 | // Temporary storage for the registers used in SpecFuzz instrumentation and in the rtl
85 | .globl tmp_gpr1
86 | .globl tmp_gpr2
87 | tmp_gpr1: .quad 0
88 | tmp_gpr2: .quad 0
89 | tmp_eflags: .quad 0
90 | return_address: .quad 0
91 | simulation_start_address: .quad 0
92 |
93 | // HERE, TOTAL LENGTH: 88 bytes
94 |
95 | // Nevermind it. The cannary is for debugging
96 | .globl cannary
97 | cannary: .quad 0
98 |
99 | // A stack of state checkpoints
100 | // Every frame contains all data necessary for rolling back to a checkpoint.
101 | .align 64
102 | .globl checkpoint_stack_bottom
103 | .globl checkpoint_stack
104 | .globl checkpoint_sp
105 | .globl store_log_bp
106 | checkpoint_stack_bottom: .zero CHECKPOINT_STACK_SIZE
107 | checkpoint_stack: .quad 0
108 | checkpoint_sp: .quad checkpoint_stack
109 | store_log_bp: .quad 0 // base pointer of the Store Log
110 |
111 | // Layout of a checkpoint
112 | .set CHECKPOINT_STACK_REG_OFFSET, (512 + 8) // 8 is padding
113 | .set CHECKPOINT_STACK_CTYPE_OFFSET, (CHECKPOINT_STACK_REG_OFFSET + 120)
114 | .set CHECKPOINT_STACK_DISABLE_OFFSET, (CHECKPOINT_STACK_CTYPE_OFFSET + 8)
115 | .set CHECKPOINT_STACK_NESTING_OFFSET, (CHECKPOINT_STACK_DISABLE_OFFSET + 8)
116 | .set CHECKPOINT_STACK_COUNTER_OFFSET, (CHECKPOINT_STACK_NESTING_OFFSET + 8)
117 | .set CHECKPOINT_STACK_SP_OFFSET, (CHECKPOINT_STACK_COUNTER_OFFSET + 8)
118 | .set CHECKPOINT_STACK_PC_OFFSET, (CHECKPOINT_STACK_SP_OFFSET + 8)
119 | .set CHECKPOINT_STACK_FLAGS_OFFSET, (CHECKPOINT_STACK_PC_OFFSET + 8)
120 | .set CHECKPOINT_STACK_BP_OFFSET, (CHECKPOINT_STACK_FLAGS_OFFSET + 8)
121 |
122 |
123 | // Indirect call type stack
124 | // Before every indirect call, we push the callee type on it
125 | .globl specfuzz_call_type_stack_bottom
126 | .globl specfuzz_call_type_stack
127 | .globl specfuzz_call_type_stack_sp
128 | specfuzz_call_type_stack_bottom: .zero 4080 // allocate 1 page for the stack
129 | specfuzz_call_type_stack: .quad 0 // top of the stack
130 | specfuzz_call_type_stack_sp: .quad specfuzz_call_type_stack // stack pointer
131 |
132 |
133 | // A disjoint stack frame used by the runtime functions
134 | // We use it to avoid accidental clobbering of the application stack
135 | .align 4096
136 | .globl specfuzz_rtl_frame
137 | .globl specfuzz_rtl_frame_bottom
138 | specfuzz_rtl_frame_bottom: .zero 4088
139 | specfuzz_rtl_frame: .quad 0
140 |
141 | // A disjoint stack frame for ASan functions
142 | .globl asan_rtl_frame
143 | .globl asan_rtl_frame_bottom
144 | asan_rtl_frame_bottom: .zero 4088
145 | asan_rtl_frame: .quad 0
146 |
147 | // Error messages
148 | error_checkpoint_stack_overflow: .string "[SF] Error: overflow of Checkpoint Stack\n"
149 | error_branch_table_overflow: .string "[SF] Error: too many Branch Table collisions\n"
150 | asan_detected_real_overflow: .string "[SF] Error: That was a real, non-speculative overflow\n"
151 | error_corrupted_nesting: .string "[SF] Error: nesting_level is corrupted (negative)\n"
152 | error_not_speculative: .string "[SF] Error: Corrupted state outside simulation\n"
153 |
154 | // Detected speculative faults
155 | // Format: [SF], Falut Type, Instruction, Address, Offset, Mispredicted Branches
156 | asan_detected_overflow_base: .string "[SF], 1, 0x%llx, 0x%lx, %d, 0x%lx"
157 | asan_detected_overflow_next: .string ", 0x%lx"
158 |
159 | detected_corrupted_code_pointer: .string "[SF], 2, 0x%llx, 0x%lx, %d, 0x%lx\n"
160 |
161 | // Runtime statistics
162 | .globl stat_max_depth
163 | .globl stat_forced_external_call
164 | .globl stat_forced_indirect_call
165 | .globl stat_forced_serializing_instruction
166 | .globl stat_forced_patched
167 | .globl stat_asan_overflow
168 | .globl stat_signal_overflow
169 | .globl stat_corrupted_code_pointer
170 | .globl stat_signal_misc
171 | .globl stat_max_nesting
172 | .globl stat_simulation_disables
173 | .globl stat_skiped_due_to_disabled
174 | stat_max_depth: .quad 0
175 | stat_forced_external_call: .quad 0
176 | stat_forced_indirect_call: .quad 0
177 | stat_forced_serializing_instruction: .quad 0
178 | stat_forced_patched: .quad 0
179 | stat_asan_overflow: .quad 0
180 | stat_signal_overflow: .quad 0
181 | stat_corrupted_code_pointer: .quad 0
182 | stat_signal_misc: .quad 0
183 | stat_max_nesting: .quad 0
184 | stat_simulation_disables: .quad 0
185 | stat_skiped_due_to_disabled: .quad 0
186 |
187 | #if PRINT_ROLLABACK_STATS != 0
188 | first_mispredicted_branch: .quad 0
189 | simulation_id: .quad 0
190 | debug_rollback_depth: .string "[SF] rlbk: 0x%llx %lld %lld %lld\n"
191 | #endif
192 |
193 | //===------------------------------------------------------------------------===//
194 | // Checkpoint and rollback
195 | //===------------------------------------------------------------------------===//
196 | .text
197 |
198 | /// specfuzz_chkp: Make a checkpoint
199 | /// Stores:
200 | /// * current values in CPU registers
201 | /// * EFLAGS
202 | /// * rollback address
203 | /// * stack pointer
204 | ///
205 | /// CLOB: spec
206 | .globl specfuzz_chkp
207 | .type specfuzz_chkp, @function
208 | specfuzz_chkp:
209 | push %r15
210 | movq %r15, tmp_gpr1
211 |
212 | // EFLAGS will get corrupted soon, so preserve it in tmp_eflags
213 | pushfq
214 | xorq %r15, %r15
215 | movq (%rsp), %r15
216 | movq %r15, tmp_eflags
217 |
218 | // do not start a simulation if it is globally disabled
219 | cmpq $0, disable_speculation
220 | jg specfuzz_chkp.disabled_simulation
221 |
222 | // check if it's time to rollback
223 | call specfuzz_rlbk_if_done
224 | ASSERT nesting_level jl $0 specfuzz_exit_corrupted_nesting_level
225 |
226 | // do not start a new simulation if we've reached the max nesting depth
227 | #if ENABLE_PRIORITEZED_SIMULATION == 1
228 | movq max_nesting_level, %r15
229 | cmpq %r15, nesting_level
230 | #else
231 | cmpq $MAX_NESTING_LEVEL, nesting_level
232 | #endif
233 | jge specfuzz_chkp.no_simulation
234 |
235 | specfuzz_chkp.start_simulation:
236 | // Save the return address
237 | movq 16(%rsp), %r15
238 | mov %r15, return_address
239 |
240 | // Entering a new simulation:
241 | // (i.e., we're not within a nested simulation)
242 | cmpq $0, nesting_level
243 | jne .L8
244 | // Fixup stack_sp if it was modified outside speculation
245 | movq $checkpoint_stack, checkpoint_sp
246 |
247 | // Initialize the instruction countdown
248 | movq $SPECULATION_WINDOW, instruction_counter
249 |
250 | #if PRINT_ROLLABACK_STATS != 0
251 | movq %r15, first_mispredicted_branch
252 | incq simulation_id
253 | #endif
254 |
255 | #if ENABLE_COVERAGE == 1
256 | pushq %rdi
257 | pushq %r11
258 | movq %r15, %rdi
259 | callq specfuzz_cov_trace_pc
260 | #if ENABLE_PRIORITEZED_SIMULATION == 1
261 | # every Nth run executes up to order log4(N) + 1
262 | cmpq $0, branch_execution_count
263 | je .L10
264 | tzcntq branch_execution_count, %r11 // Increases every power of 4
265 | shrq $1, %r11
266 | addq $1, %r11 // Default order: 1
267 | movq %r11, max_nesting_level
268 | jmp .L11
269 | .L10:
270 | movq $1, max_nesting_level
271 | .L11:
272 | #endif // ENABLE_PRIORITEZED_SIMULATION
273 | popq %r11
274 | popq %rdi
275 | #endif // ENABLE_COVERAGE
276 | .L8:
277 |
278 | // Get the current stack frame
279 | movq checkpoint_sp, %rsp
280 | ASSERT %rsp jle $checkpoint_stack_bottom specfuzz_exit_state_overflow
281 |
282 | #if ENABLE_SEQUENTIAL_SIMULATION == 1
283 | // Mark that are one level deeper into nesting
284 | addq $1, nesting_level
285 | #endif
286 |
287 | // Take a checkpoint:
288 | // - Preserve the previous base pointer of the Store Log, for nested rollbacks
289 | pushq store_log_bp
290 |
291 | // - The original value of eflags
292 | pushq tmp_eflags
293 |
294 | // - The address where we will continue execution after simulating misprediction
295 | pushq return_address
296 |
297 | // - Store stack pointer
298 | pushq current_rsp
299 |
300 | // - Metadata
301 | pushq instruction_counter
302 | pushq nesting_level
303 | pushq disable_speculation
304 | pushq specfuzz_call_type_stack_sp
305 |
306 | // - Store registers
307 | pushq %rax
308 | pushq %rbx
309 | pushq %rcx
310 | pushq %rdx
311 | pushq %rsi
312 | pushq %rdi
313 | pushq %rbp
314 | pushq %r8
315 | pushq %r9
316 | pushq %r10
317 | pushq %r11
318 | pushq %r12
319 | pushq %r13
320 | pushq %r14
321 | pushq tmp_gpr1
322 |
323 | // - FPU and SIMD states
324 | subq $8, %rsp // alignment
325 | subq $512, %rsp
326 | fxsave64 (%rsp)
327 |
328 | movq %rsp, store_log_bp
329 | movq %rsp, checkpoint_sp
330 |
331 | #if ENABLE_SEQUENTIAL_SIMULATION != 1
332 | // Mark that we got one level deeper into nesting
333 | addq $1, nesting_level
334 | #endif
335 |
336 | // - Checkpoint is finished
337 | // Now, prepare for a simulation
338 |
339 | // To trigger the simulation, we have to skip a few instructions and return into the
340 | // simulated mispredicted branch (see SpecFuzzPass.cpp for details)
341 | movq return_address, %r15
342 | addq $0xa, %r15 // 0xa is the size of this skipped instruction
343 | movq %r15, simulation_start_address
344 |
345 | // Switch back to the RTL stack frame and restore corrupted register values
346 | movq $specfuzz_rtl_frame, %rsp
347 | subq $24, %rsp
348 | popfq
349 | popq %r15
350 |
351 | // Switch to the application stack
352 | // Note: Normally, the pass takes care of it. However, since we're not returning into
353 | // the next instruction, we'll also skip the restoration to the application stack.
354 | // Thus, we have to do it here
355 | movq current_rsp, %rsp
356 |
357 | // Return
358 | jmpq *simulation_start_address
359 |
360 | specfuzz_chkp.no_simulation:
361 | popfq
362 | popq %r15
363 | ret
364 |
365 | specfuzz_chkp.disabled_simulation:
366 | ASSERT checkpoint_sp jle $checkpoint_stack_bottom specfuzz_exit_state_overflow
367 | movq $checkpoint_stack, checkpoint_sp // preventing overflows
368 |
369 | STAT_INCREMENT stat_skiped_due_to_disabled
370 |
371 | popfq
372 | popq %r15
373 | ret
374 |
375 |
376 | /// specfuzz_rlbk_if_done: Rollback if we've reached the maximum simulation depth
377 | ///
378 | /// CLOB: stack spec
379 | .globl specfuzz_rlbk_if_done
380 | .type specfuzz_rlbk_if_done, @function
381 | specfuzz_rlbk_if_done:
382 | pushfq
383 |
384 | // check if we're in a simulation
385 | cmpq $0, nesting_level
386 | je specfuzz_rlbk_if_done.return
387 |
388 | // check if we've passed the speculation window
389 | cmpq $0, instruction_counter
390 | jg specfuzz_rlbk_if_done.return
391 |
392 | #if ENABLE_STATS == 1
393 | STAT_INCREMENT stat_max_depth
394 | cmpq $MAX_NESTING_LEVEL, nesting_level
395 | jne .L4
396 | STAT_INCREMENT stat_max_nesting
397 | .L4:
398 | #endif
399 |
400 | callq specfuzz_rlbk
401 |
402 | specfuzz_rlbk_if_done.return:
403 | popfq
404 | ret
405 |
406 |
407 | /// specfuzz_rlbk_forced: Unconditionally rollback the simulation
408 | ///
409 | /// CLOB: stack spec
410 | .globl specfuzz_rlbk_forced
411 | .type specfuzz_rlbk_forced, @function
412 | specfuzz_rlbk_forced:
413 | pushfq
414 |
415 | // check if we're in a simulation
416 | cmpq $0, nesting_level
417 | je specfuzz_rlbk_forced.return
418 |
419 | callq specfuzz_rlbk
420 |
421 | specfuzz_rlbk_forced.return:
422 | ASSERT checkpoint_sp jle $checkpoint_stack_bottom specfuzz_exit_state_overflow
423 | movq $checkpoint_stack, checkpoint_sp // preventing overflows
424 | popfq
425 | ret
426 |
427 |
428 | /// specfuzz_rlbk_*: Wrappers for the rollback function
429 | /// Calculate statistics on what causes simulation aborts
430 | ///
431 | /// CLOB: eflags
432 | .globl specfuzz_rlbk_external_call
433 | .type specfuzz_rlbk_external_call, @function
434 | specfuzz_rlbk_external_call:
435 | STAT_INCREMENT stat_forced_external_call
436 | jmp specfuzz_rlbk_forced
437 |
438 | .globl specfuzz_rlbk_indirect_call
439 | .type specfuzz_rlbk_indirect_call, @function
440 | specfuzz_rlbk_indirect_call:
441 | STAT_INCREMENT stat_forced_indirect_call
442 | jmp specfuzz_rlbk_forced
443 |
444 | .globl specfuzz_rlbk_serializing
445 | .type specfuzz_rlbk_serializing, @function
446 | specfuzz_rlbk_serializing:
447 | STAT_INCREMENT stat_forced_serializing_instruction
448 | jmp specfuzz_rlbk_forced
449 |
450 | .globl specfuzz_rlbk_patched
451 | .type specfuzz_rlbk_patched, @function
452 | specfuzz_rlbk_patched:
453 | STAT_INCREMENT stat_forced_patched
454 | jmp specfuzz_rlbk_forced
455 |
456 |
457 | /// specfuzz_rlbk: The rollback routine
458 | /// Never to be called by anything outside the RTL
459 | ///
460 | /// Note that we don't bother preserving the previous register values as they will
461 | /// be later overwritten anyway
462 | ///
463 | /// CLOB: flags stack spec
464 | .type specfuzz_rlbk, @function
465 | specfuzz_rlbk:
466 | #if PRINT_ROLLABACK_STATS != 0
467 | movq simulation_id, %r9
468 | movq nesting_level, %r8
469 | movq instruction_counter, %rcx
470 | movq first_mispredicted_branch, %rdx
471 | mov $debug_rollback_depth, %esi
472 | mov stderr, %rdi
473 | mov $0, %eax
474 | call _IO_fprintf
475 | #endif
476 |
477 | // Check that we're not overflowing
478 | movq checkpoint_sp, %rsp
479 | ASSERT %rsp jle $checkpoint_stack_bottom specfuzz_exit_state_overflow
480 |
481 | // Rewind the Store Log:
482 | // - First, a special case: a segfault might have been triggered right after
483 | // the checkpoint, if the page is labeled as read-only
484 | // Thus, attempting to restore the value will cause another segfault
485 | // In this case, ignore the broken entry: checkpoint_sp++
486 | cmp store_log_bp, %rsp
487 | je .L2
488 | movq (%rsp), %rbx
489 | movq 8(%rsp), %rcx
490 | cmp %rbx, (%rcx)
491 | jne .L1
492 | addq $16, %rsp
493 |
494 | // - now, the actual rewind
495 | .L1: cmp store_log_bp, %rsp
496 | je .L2
497 | popq %rbx // value
498 | popq %rcx // address
499 | movq %rbx, (%rcx)
500 | jmp .L1
501 | .L2:
502 |
503 | // Restore FPU and SIMD states
504 | fxrstor64 (%rsp)
505 | addq $512, %rsp
506 | addq $8, %rsp // alignment
507 |
508 | // Restore the values in the GPRs
509 | popq %r15
510 | popq %r14
511 | popq %r13
512 | popq %r12
513 | popq %r11
514 | popq %r10
515 | popq %r9
516 | popq %r8
517 | popq %rbp
518 | popq %rdi
519 | popq %rsi
520 | popq %rdx
521 | popq %rcx
522 | popq %rbx
523 | popq %rax
524 |
525 | // Metadata
526 | popq specfuzz_call_type_stack_sp
527 | popq disable_speculation
528 | popq nesting_level
529 | popq instruction_counter
530 |
531 | // Stack Pointer
532 | popq current_rsp
533 |
534 | // Overwrite the return address with the checkpoint
535 | popq return_address
536 |
537 | // EFlags
538 | popq tmp_eflags
539 |
540 | // Base pointer of the previous Store Log
541 | popq store_log_bp
542 |
543 | // Update the stack pointer of the Checkpoint Stack
544 | movq %rsp, checkpoint_sp
545 | ASSERT %rsp jg $checkpoint_stack specfuzz_exit_state_overflow
546 |
547 | #if ENABLE_SEQUENTIAL_SIMULATION == 1
548 | // When we've reached the level 1, it means we're exiting the simulation
549 | cmpq $1, nesting_level
550 | jne .L7
551 | movq $0, nesting_level
552 | .L7:
553 | #endif
554 |
555 | // Overwrite the return address with the checkpoint
556 | movq $specfuzz_rtl_frame, %rsp
557 | pushq return_address
558 |
559 | // Restore the original value of eflags
560 | pushq tmp_eflags
561 | popfq
562 |
563 | ret // Finish the simulation
564 |
565 |
566 | //===------------------------------------------------------------------------===//
567 | // Reporting
568 | //===------------------------------------------------------------------------===//
569 |
570 | /// specfuzz_report: A callback invoked by ASan when it detects a bounds violation
571 | ///
572 | /// rdi: accessed address
573 | /// rsi: location of the offending instruction
574 | /// CLOB: eflags spec
575 | .globl specfuzz_report
576 | .type specfuzz_report, @function
577 | specfuzz_report:
578 | push %rdx
579 | push %rcx
580 | push %r8
581 | push %r9
582 | push %r10
583 | push %r11
584 | push %rax
585 | push %rbx
586 |
587 | // save the PC for a later use
588 | movq %rsi, tmp_gpr1
589 |
590 | #if ENABLE_PRINT == 1
591 | #if ENABLE_PRINT_OFFSET == 1
592 | // get the corresponding address in ASan's shadow memory
593 | mov %rdi, %rcx
594 | shr $0x3, %rcx
595 | addq $0x7fff8000, %rcx
596 |
597 | // TODO: refactor me!
598 | .macro test_offset var
599 | mov $\var, %rbx
600 | movzbl (%rcx, %rbx),%edx
601 | test %dl,%dl
602 | je specfuzz_report.offset_found
603 | .endm
604 |
605 | test_offset -2
606 | test_offset -4
607 | test_offset -8
608 | test_offset -16
609 | test_offset -32
610 | test_offset 2
611 | test_offset 4
612 | test_offset 8
613 | test_offset 16
614 | test_offset 32
615 |
616 | jmp specfuzz_report.offset_not_found
617 |
618 | specfuzz_report.offset_found:
619 | movq %rbx, %r8 // offset
620 | shlq $2, %r8 // the shadow memory is encoded byte-to-bit
621 | movq $0, %rcx // accessed address
622 | jmp specfuzz_report.offset_done
623 |
624 | specfuzz_report.offset_not_found:
625 | movq $0, %r8
626 | movq %rdi, %rcx // accessed address
627 | jmp specfuzz_report.offset_done
628 |
629 | specfuzz_report.offset_done:
630 |
631 | #else
632 | movq $0, %r8 // offset
633 | movq %rdi, %rcx // accessed address
634 | #endif // ENABLE_PRINT_OFFSET
635 |
636 | // report the detected violation
637 | movq store_log_bp, %r9
638 | movq CHECKPOINT_STACK_PC_OFFSET(%r9), %r9 // r9 = address of the latest speculated branch
639 | movq %rsi, %rdx // rdx = offending instruction
640 | mov $asan_detected_overflow_base, %esi
641 | mov stderr, %rdi
642 | mov $0, %eax
643 | call _IO_fprintf
644 |
645 | // iterate over all frames in Checkpoint Stack and print addresses of speculated branches
646 | movq store_log_bp, %rbx
647 | .L5:
648 | movq CHECKPOINT_STACK_BP_OFFSET(%rbx), %rbx // get previous frame
649 | test %rbx, %rbx // no frames anymore? we're done
650 | je .L6
651 |
652 | movq CHECKPOINT_STACK_PC_OFFSET(%rbx), %rdx // rdx = address of the speculated branch
653 | mov $asan_detected_overflow_next, %esi
654 | mov stderr, %rdi
655 | mov $0, %eax
656 | call _IO_fprintf
657 | jmp .L5
658 | .L6:
659 |
660 | // print new line
661 | movq stderr, %rsi
662 | movl $10, %edi
663 | call _IO_putc
664 | #endif // ENABLE_PRINT
665 |
666 | #if ENABLE_COVERAGE == 1
667 | // report the vulnerability to the fuzzer
668 | movq tmp_gpr1, %rdi
669 | call specfuzz_cov_vuln
670 | #endif
671 |
672 | STAT_INCREMENT stat_asan_overflow
673 | ASSERT nesting_level je $0 specfuzz_exit_asan_overflow
674 |
675 | pop %rbx
676 | pop %rax
677 | pop %r11
678 | pop %r10
679 | pop %r9
680 | pop %r8
681 | pop %rcx
682 | pop %rdx
683 | ret
684 |
685 | /// specfuzz_report_corrupted_code_pointer:
686 | ///
687 | /// Here, we are free to use any registers as we will later proceed with a rollback
688 | /// The stack is also available because we are in a disjoint frame
689 | ///
690 | /// rdi: address
691 | /// rsi: location of the offending instruction
692 | /// CLOB: eflags registers spec
693 | .globl specfuzz_report_corrupted_code_pointer
694 | .type specfuzz_report_corrupted_code_pointer, @function
695 | specfuzz_report_corrupted_code_pointer:
696 | ASSERT nesting_level je $0 specfuzz_exit_unknown_corruption
697 | #if ENABLE_PRINT == 1
698 | movq store_log_bp, %r9
699 | movq CHECKPOINT_STACK_PC_OFFSET(%r9), %r9 // latest speculated branch
700 | movq $0, %r8 // offset
701 | movq %rdi, %rcx // accessed address
702 | movq %rsi, %rdx // offending instruction
703 | mov $detected_corrupted_code_pointer, %esi
704 | mov stderr, %rdi
705 | mov $0, %eax
706 | call _IO_fprintf
707 | #endif
708 | callq specfuzz_rlbk_forced
709 |
710 |
711 | //===------------------------------------------------------------------------===//
712 | // Misc.
713 | //===------------------------------------------------------------------------===//
714 |
715 | /// specfuzz_check_code_pointer: Checks if the pointer that we're about to dereference is within
716 | /// the .text section
717 | ///
718 | /// rdi: accessed address
719 | /// CLOB: spec stack
720 | .globl specfuzz_check_code_pointer
721 | .type specfuzz_check_code_pointer, @function
722 | specfuzz_check_code_pointer:
723 | pushfq
724 | // TODO: this implementation is very simplistic and will often lead to false positives
725 | // We need to come up with a better approach to verify a pointer
726 | // So far, if it makes any trouble, just disable it at compile time
727 | cmpq $__executable_start, %rdi
728 | jl specfuzz_check_code_pointer.corrupted
729 | cmpq $_etext, %rdi
730 | jl specfuzz_check_code_pointer.ok
731 | cmpq $_end, %rdi # bss and data
732 | jl specfuzz_check_code_pointer.corrupted
733 |
734 | specfuzz_check_code_pointer.unknown:
735 | // We are above BSS, which means we are about to either enter a dynamically linked
736 | // code (most likely uninstrumented) or to executes some random data
737 | // We do not report this case, because it could be a desired behavior (dynamic libs),
738 | // but in both cases we need to rollback
739 | cmpq $0, nesting_level
740 | je 1f
741 | callq specfuzz_rlbk
742 | 1: popfq
743 | ret
744 |
745 | specfuzz_check_code_pointer.ok:
746 | popfq
747 | ret
748 |
749 | specfuzz_check_code_pointer.corrupted:
750 | STAT_INCREMENT stat_corrupted_code_pointer
751 | movq 8(%rsp), %rsi
752 | addq $8, %rsi
753 | callq specfuzz_report_corrupted_code_pointer
754 |
755 |
756 | /// specfuzz_cov_trace_pc_wrapper: Pass the callee address to specfuzz_cov_trace_pc
757 | ///
758 | .globl specfuzz_cov_trace_pc_wrapper
759 | .type specfuzz_cov_trace_pc_wrapper, @function
760 | specfuzz_cov_trace_pc_wrapper:
761 | #if ENABLE_COVERAGE == 1
762 | pushq %rdi
763 | pushq %r11
764 | pushfq
765 | movq 24(%rsp), %rdi
766 | callq specfuzz_cov_trace_pc
767 | popfq
768 | popq %r11
769 | popq %rdi
770 | #endif
771 | ret
772 |
773 |
774 | /// specfuzz_exit_*: Exit with an error message
775 | ///
776 | specfuzz_exit_unknown_corruption:
777 | movl $error_not_speculative, %edi
778 | jmp specfuzz_exit
779 |
780 | specfuzz_exit_state_overflow:
781 | movl $error_checkpoint_stack_overflow, %edi
782 | jmp specfuzz_exit
783 |
784 | specfuzz_exit_asan_overflow:
785 | movl $asan_detected_real_overflow, %edi
786 | jmp specfuzz_exit
787 |
788 | specfuzz_exit_corrupted_nesting_level:
789 | movl $error_corrupted_nesting, %edi
790 | jmp specfuzz_exit
791 |
792 | .type specfuzz_exit, @function
793 | specfuzz_exit:
794 | movl $0, %eax
795 | call printf
796 | movl $42, %edi
797 | call exit
798 |
--------------------------------------------------------------------------------
/src/specfuzz_rtl.h:
--------------------------------------------------------------------------------
1 | //===-------- specfuzz_rtl.h ------------------------------------------------===//
2 | //
3 | // Copyright: This file is distributed under the GPL version 3 License.
4 | // See LICENSE for details.
5 | //
6 | //===------------------------------------------------------------------------===//
7 | /// \file
8 | ///
9 | //===------------------------------------------------------------------------===//
10 | #ifndef SPECFUZZ_RTL_H
11 | #define SPECFUZZ_RTL_H
12 | #include
13 |
14 | // global variables declared in specfuzz_rtl.S
15 | extern uint64_t nesting_level;
16 | extern int64_t disable_speculation;
17 | extern uint64_t *store_log_bp;
18 | extern uint64_t branch_execution_count;
19 |
20 | extern uint64_t specfuzz_rtl_frame;
21 | extern uint64_t specfuzz_rtl_frame_bottom;
22 |
23 | extern uint64_t asan_rtl_frame;
24 | extern uint64_t asan_rtl_frame_bottom;
25 |
26 | extern uint64_t *checkpoint_sp;
27 | extern uint64_t checkpoint_stack;
28 | extern uint64_t checkpoint_stack_bottom;
29 |
30 | extern uint64_t stat_max_depth;
31 | extern uint64_t stat_forced_external_call;
32 | extern uint64_t stat_forced_indirect_call;
33 | extern uint64_t stat_forced_serializing_instruction;
34 | extern uint64_t stat_max_nesting;
35 | extern uint64_t stat_asan_overflow;
36 | extern uint64_t stat_signal_overflow;
37 | extern uint64_t stat_corrupted_code_pointer;
38 | extern uint64_t stat_signal_misc;
39 | extern uint64_t stat_simulation_disables;
40 | extern uint64_t stat_skiped_due_to_disabled;
41 |
42 | extern void specfuzz_rlbk_forced(void);
43 |
44 | // Coverage
45 | void specfuzz_cov_init();
46 | __attribute__((weak)) __attribute__((preserve_most))
47 | void specfuzz_cov_trace_pc(uintptr_t pc);
48 | __attribute__((weak))
49 | void specfuzz_cov_vuln(uintptr_t pc);
50 | __attribute__((weak)) __attribute__((preserve_most))
51 | struct map_entry_t *get_hash_map_entry(uintptr_t pc);
52 |
53 |
54 | #endif //SPECFUZZ_RTL_H
55 |
--------------------------------------------------------------------------------
/tests/Makefile:
--------------------------------------------------------------------------------
1 | CC ?= clang
2 | CFLAGS ?= -O3 -ggdb
3 | RTLIB ?= -lspecfuzz
4 |
5 | .SUFFIXES: # disable built-in rules
6 | .PHONY: all
7 |
8 | TESTS := dummy rtl_chkp rtl_chkp_rlbk acceptance-basic acceptance-mmul
9 |
10 |
11 | all:
12 | echo "no default"
13 |
14 | %: %.S libtest.a
15 | $(CC) $(CFLAGS) $< -o $@ $(RTLIB) -ltest -L. -fsanitize=address
16 |
17 | %: %.c libtest.a
18 | $(CC) $(CFLAGS) $< -o $@ $(RTLIB)
19 |
20 | libtest.a: common/process_state.S
21 | # here, we must use a normal, non-SpecFuzz compiler to avoid the instrumentation
22 | gcc $(CFLAGS) common/process_state.S -c -o process_state.o
23 | ar rc $@ process_state.o
24 | rm process_state.o
25 |
26 | clean:
27 | rm -f *.o $(TESTS)
28 |
--------------------------------------------------------------------------------
/tests/acceptance-basic.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | int array_before[] = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3};
5 | int array[] = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3};
6 | int array_next[] = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3};
7 | int temp;
8 |
9 | int main(int argc, char **argv) {
10 | int index = atoi(argv[1]);
11 | if (index < 10) {
12 | temp &= array[index];
13 | } else {
14 | temp = 0;
15 | }
16 | printf("r = %d\n", temp);
17 | return 0;
18 | }
19 |
--------------------------------------------------------------------------------
/tests/acceptance-mmul.c:
--------------------------------------------------------------------------------
1 | //===------------------------------------------------------------------------===//
2 | /// \file
3 | /// A tiny acceptance test to ensure that instrumentation does not do
4 | /// major state corruptions
5 | ///
6 | /// Based on simple matrix multiplication
7 | //===------------------------------------------------------------------------===//
8 |
9 | #include
10 |
11 | int matA[2][2] = {0, 1, 2, 3};
12 | int matB[2][2] = {4, 5, 6, 7};
13 | int matC[2][2]; // 6 7 26 31
14 |
15 | __attribute__((noinline))
16 | void multiply(int i, int j, int N) {
17 | int k;
18 | for (k = 0; k < N; k++) {
19 | matC[i][j] += matA[i][k] * matB[k][j];
20 | }
21 | }
22 |
23 | __attribute__((noinline))
24 | int sum() {
25 | int total = 0;
26 | for (int i = 0; i < 2; i++) {
27 | for (int j = 0; j < 2; j++) {
28 | total += matC[i][j];
29 | }
30 | }
31 | return total;
32 | }
33 |
34 | int main() {
35 | int i, j;
36 |
37 | for (i = 0; i < 2; i++) {
38 | for (j = 0; j < 2; j++) {
39 | matC[i][j] = 0;
40 | multiply(i, j, 2);
41 | }
42 | }
43 | printf("%d\n", sum());
44 | return 0;
45 | }
--------------------------------------------------------------------------------
/tests/analyzer_unit.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import sys
3 |
4 | sys.path.append('..')
5 | from postprocessing import analyzer
6 |
7 |
8 | class TestCollection(unittest.TestCase):
9 | def setUp(self) -> None:
10 | fault = analyzer.Fault(1)
11 | fault.branch_sequences = {
12 | (1, 2, 3, 6), (1, 2, 3), (1, 2, 4), (2, 3, 1), (1, 1, 2, 3), (1, 2, 3, 4),
13 | (5,), (5, 5), }
14 |
15 | self.results = analyzer.CollectedResults()
16 | self.results.faults = {1: fault}
17 |
18 | def test_minimize(self):
19 | self.results.minimize_sequences()
20 | self.assertEqual(self.results.faults[1].branch_sequences, {(1, 2, 3), (1, 2, 4), (5,)})
21 |
22 |
23 | if __name__ == '__main__':
24 | unittest.main()
25 |
--------------------------------------------------------------------------------
/tests/common/header.S:
--------------------------------------------------------------------------------
1 | .extern set_state
2 | .extern store_stack_state
3 | .extern store_metadata
4 | .extern check_state
5 | .extern specfuzz_chkp
6 | .extern current_rsp
7 | .extern log_test_start
8 | .extern log_test_end
9 |
10 | .macro LOG msg
11 | movl $\msg, %edi
12 | xorl %eax, %eax
13 | callq printf
14 | .endm
15 |
16 | .macro RESET_META
17 | movq $0, disable_speculation
18 | movq $0, nesting_level
19 | movq $specfuzz_call_type_stack, specfuzz_call_type_stack_sp
20 | movq $0, instruction_counter
21 | .endm
22 |
23 | .macro PREPARE_CORRUPTION_TEST id value
24 | movq $\id, %rsi
25 | LOG log_test_start
26 | movq $\value, %rdi
27 | callq set_state
28 | callq store_stack_state
29 | callq store_metadata
30 | .endm
31 |
32 | .macro EXECUTE_CORRUPTION_TEST value include_meta
33 | movq $\value, %rdi
34 | callq check_state
35 | cmp $0, %rax
36 | je 1f
37 | ret
38 | 1:
39 |
40 | .if \include_meta
41 | callq check_metadata
42 | .endif
43 | RESET_META
44 | LOG log_test_end
45 | .endm
46 |
47 | .macro CALL_RTL_FUNCTION fn_name
48 | movq %rsp, current_rsp
49 | leaq specfuzz_rtl_frame, %rsp
50 | callq \fn_name
51 | movq current_rsp, %rsp
52 | .endm
53 |
54 | .data
55 | log_test_start: .asciz "Running test No. %d: "
56 | log_test_end: .asciz "ok\n"
57 |
--------------------------------------------------------------------------------
/tests/common/process_state.S:
--------------------------------------------------------------------------------
1 | .data
2 |
3 | .extern printf
4 |
5 | .set EFLAGS_VALUE, 0x256
6 |
7 | .globl previous_rsp
8 | .globl previous_rbp
9 | previous_rsp: .quad 0
10 | previous_rbp: .quad 0
11 |
12 | .globl previous_instruction_counter
13 | .globl previous_nesting_level
14 | .globl previous_disable_speculation
15 | .globl previous_specfuzz_call_type_stack_sp
16 | previous_instruction_counter: .quad 0
17 | previous_nesting_level: .quad 0
18 | previous_disable_speculation: .quad 0
19 | previous_specfuzz_call_type_stack_sp: .quad 0
20 |
21 | .globl fx_frame
22 | .align 64
23 | fx_frame: .zero 512
24 |
25 | corruption_id: .quad 0
26 | error_corruption: .string "Corrupted state with id %d. See process_state.S:check_state() for details\n"
27 |
28 | // -----------------------------------------
29 | .text
30 | /// set_state: Sets all GPRs to the same value
31 | /// rdi: the value
32 | ///
33 | .globl set_state
34 | .type set_state, @function
35 | set_state:
36 | // - FPU and SIMD states
37 | movq $0, %rax
38 | .L1: cmpq $512, %rax
39 | je .L2
40 | movq %rdi, fx_frame(%rax)
41 | addq $8, %rax
42 | jmp .L1
43 | .L2:
44 | fxrstor64 fx_frame
45 |
46 | // GPRs
47 | movq %rdi, %rax
48 | movq %rdi, %rbx
49 | movq %rdi, %rcx
50 | movq %rdi, %rdx
51 | movq %rdi, %rsi
52 | movq %rdi, %r8
53 | movq %rdi, %r9
54 | movq %rdi, %r10
55 | movq %rdi, %r11
56 | movq %rdi, %r12
57 | movq %rdi, %r13
58 | movq %rdi, %r14
59 | movq %rdi, %r15
60 |
61 | // Flags
62 | pushq $EFLAGS_VALUE
63 | popfq
64 | ret
65 |
66 | .globl store_stack_state
67 | .type store_stack_state, @function
68 | store_stack_state:
69 | movq %rsp, previous_rsp
70 | movq %rbp, previous_rbp
71 | ret
72 |
73 | .globl store_metadata
74 | .type store_metadata, @function
75 | store_metadata:
76 | pushq %rax
77 | movq instruction_counter, %rax
78 | movq %rax, previous_instruction_counter
79 | movq nesting_level, %rax
80 | movq %rax, previous_nesting_level
81 | movq disable_speculation, %rax
82 | movq %rax, previous_disable_speculation
83 | movq specfuzz_call_type_stack_sp, %rax
84 | movq %rax, previous_specfuzz_call_type_stack_sp
85 | popq %rax
86 | ret
87 |
88 |
89 | /// check_state: Check if all GPR values match the value in the argument
90 | /// rdi: the value
91 | ///
92 | .globl check_state
93 | .type check_state, @function
94 | check_state:
95 | .macro CHECK_VALUE id, register, value
96 | movq \id, corruption_id
97 | cmp \register, \value
98 | jne check_state.fail
99 | .endm
100 |
101 | // Flags
102 | pushfq
103 | cmpq $EFLAGS_VALUE, (%rsp)
104 | je .L5
105 | popfq
106 | movq $0, corruption_id
107 | jmp check_state.fail
108 | .L5: popfq
109 |
110 | // stack
111 | CHECK_VALUE $1, %rsp, previous_rsp
112 | CHECK_VALUE $2, %rbp, previous_rbp
113 |
114 | // GPRs
115 | CHECK_VALUE $3, %rdi, %rax
116 | CHECK_VALUE $4, %rdi, %rbx
117 | CHECK_VALUE $5, %rdi, %rcx
118 | CHECK_VALUE $6, %rdi, %rdx
119 | CHECK_VALUE $7, %rdi, %rsi
120 | CHECK_VALUE $8, %rdi, %r8
121 | CHECK_VALUE $9, %rdi, %r9
122 | CHECK_VALUE $10, %rdi, %r10
123 | CHECK_VALUE $11, %rdi, %r11
124 | CHECK_VALUE $12, %rdi, %r12
125 | CHECK_VALUE $13, %rdi, %r13
126 | CHECK_VALUE $14, %rdi, %r14
127 | CHECK_VALUE $15, %rdi, %r15
128 |
129 | // FPU
130 | // TODO: this code does not work - sometimes fxsave64 introduces random corruptions
131 | // needs further investigation
132 | //fxsave64 fx_frame
133 | //movq $0, %rax
134 | //.L3: addq $8, %rax // skip the first element, it's always corrupted
135 | //cmpq $512, %rax
136 | //je .L4
137 | // cmpq $24, %rax // 3rd element also always gets corrupted
138 | // je .L3
139 | // CHECK_VALUE $16, %rdi, fx_frame(%rax)
140 | // jmp .L3
141 | //.L4:
142 |
143 | check_state.success:
144 | movq $0, %rax
145 | ret
146 |
147 | check_state.fail:
148 | pushq $0x246 # just in case, overwrite EFLAGS to avoid problems with printf
149 | popfq
150 |
151 | movq corruption_id, %rsi
152 | xor %rdi, %rdi
153 | movl $error_corruption, %edi
154 | movq $0, %rax
155 | call printf
156 |
157 | movq $1, %rax
158 | ret
159 |
160 | /// Check integrity of global variables
161 | ///
162 | .globl check_metadata
163 | .type check_metadata, @function
164 | check_metadata:
165 | .macro CHECK_GLOBAL id, value, prev_value
166 | movq \id, corruption_id
167 | movq \prev_value, %rax
168 | cmp %rax, \value
169 | jne check_metadata.fail
170 | .endm
171 |
172 | CHECK_GLOBAL $16, previous_instruction_counter, instruction_counter
173 | CHECK_GLOBAL $17, previous_nesting_level, nesting_level
174 | CHECK_GLOBAL $18, previous_disable_speculation, disable_speculation
175 | CHECK_GLOBAL $19, previous_specfuzz_call_type_stack_sp, specfuzz_call_type_stack_sp
176 | ret
177 |
178 | check_metadata.fail:
179 | pushq $0x246 # just in case, overwrite EFLAGS to avoid problems with printf
180 | popfq
181 |
182 | movq corruption_id, %rsi
183 | xor %rdi, %rdi
184 | movl $error_corruption, %edi
185 | movq $0, %rax
186 | call printf
187 |
188 | movq $1, %rax
189 | ret
--------------------------------------------------------------------------------
/tests/dummy.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | int main(int argc, const char *argv[]) {
5 | printf("Hello World!\n");
6 | return 0;
7 | }
--------------------------------------------------------------------------------
/tests/rtl_chkp.S:
--------------------------------------------------------------------------------
1 | .include "common/header.S"
2 |
3 | .macro TEST id
4 | PREPARE_CORRUPTION_TEST \id 42
5 | CALL_RTL_FUNCTION specfuzz_chkp
6 | jmp 1f // imitate instrumentation
7 | 1:
8 | EXECUTE_CORRUPTION_TEST 42 0
9 | .endm
10 |
11 | .macro TEST_WITH_META id
12 | PREPARE_CORRUPTION_TEST \id 42
13 | CALL_RTL_FUNCTION specfuzz_chkp
14 | jmp 1f // imitate instrumentation
15 | 1:
16 | EXECUTE_CORRUPTION_TEST 42 1
17 | .endm
18 |
19 | .text
20 | .globl main
21 | .p2align 4, 0x90
22 | .type main, @function
23 | main:
24 | RESET_META
25 |
26 | # default behavior
27 | TEST 1
28 |
29 | # configuration variants
30 | movq $1, disable_speculation
31 | TEST_WITH_META 2
32 |
33 | movq $1, nesting_level
34 | movq $10, instruction_counter # needs to be set. Otherwise, RTL will execute a rollback
35 | TEST 3
36 |
37 | movq $2, nesting_level # larger than max_nesting_level
38 | movq $10, instruction_counter
39 | TEST 4
40 |
41 | movq $0, %rax
42 | ret
43 |
--------------------------------------------------------------------------------
/tests/rtl_chkp_rlbk.S:
--------------------------------------------------------------------------------
1 | .include "common/header.S"
2 |
3 | .data
4 | error_unreachable: .string "Unreachable location\n"
5 |
6 | .text
7 | .globl main
8 | .type main, @function
9 | main:
10 | callq specfuzz_init
11 | RESET_META
12 |
13 | # test executed rollback
14 | movq $250, instruction_counter
15 | PREPARE_CORRUPTION_TEST 1 42
16 | CALL_RTL_FUNCTION specfuzz_chkp
17 | jmp 2f // normal
18 | jmp 1f // simulation
19 |
20 | 1:
21 | // introduce some state corruption
22 | movq $41, %rdi
23 | callq set_state
24 |
25 | // rollback
26 | movq $-1, instruction_counter
27 | CALL_RTL_FUNCTION specfuzz_rlbk_if_done
28 | jmp .unreachable
29 |
30 | 2:
31 | EXECUTE_CORRUPTION_TEST 42 1
32 |
33 | # test skipped rollback
34 | movq $1, disable_speculation
35 | PREPARE_CORRUPTION_TEST 2 42
36 | CALL_RTL_FUNCTION specfuzz_chkp
37 | jmp 2f // normal
38 | jmp 1f // simulation
39 |
40 | 1:
41 | jmp .unreachable
42 |
43 | 2:
44 | movq $41, %rdi
45 | callq set_state
46 |
47 | // rollback
48 | movq $-1, instruction_counter
49 | movq $-1, previous_instruction_counter
50 | CALL_RTL_FUNCTION specfuzz_rlbk_if_done
51 | EXECUTE_CORRUPTION_TEST 41 1
52 |
53 | movq $0, %rax
54 | ret
55 |
56 | .unreachable:
57 | xor %rdi, %rdi
58 | movl $error_unreachable, %edi
59 | movq $0, %rax
60 | call printf
61 | movq $1, %rax
62 | ret
--------------------------------------------------------------------------------
/tests/rtl_report.S:
--------------------------------------------------------------------------------
1 | .include "common/header.S"
2 |
3 | .macro CORRUPTION_TEST_FOR_REPORT value
4 | pushq $0x256
5 | popfq
6 | movq $\value, %rsi
7 |
8 | movq $\value, %rdi
9 | callq check_state
10 | cmp $0, %rax
11 | je 1f
12 | ret
13 | 1:
14 | LOG log_test_end
15 | .endm
16 |
17 | .macro TEST id
18 | PREPARE_CORRUPTION_TEST \id 41
19 | CALL_RTL_FUNCTION specfuzz_report
20 | CORRUPTION_TEST_FOR_REPORT 41
21 | .endm
22 |
23 | .data
24 | error_unreachable: .string "Unreachable location\n"
25 |
26 | .text
27 | .globl main
28 | .p2align 4, 0x90
29 | .type main, @function
30 | main:
31 | callq specfuzz_init
32 | RESET_META
33 | movq $42, %rdi
34 | callq set_state
35 | callq store_stack_state
36 | CALL_RTL_FUNCTION specfuzz_chkp
37 | jmp .L1 // normal
38 | jmp .L2 // simulation
39 |
40 | .L1:
41 | EXECUTE_CORRUPTION_TEST 42 0
42 |
43 | movq $0, %rax
44 | ret
45 |
46 | .L2:
47 | // report
48 | TEST 1
49 | TEST 2 // make sure we can call the report repeatedly
50 |
51 | // rollback
52 | movq $-1, instruction_counter
53 | CALL_RTL_FUNCTION specfuzz_rlbk_if_done
54 | jmp .unreachable
55 |
56 |
57 | .unreachable:
58 | xor %rdi, %rdi
59 | movl $error_unreachable, %edi
60 | movq $0, %rax
61 | call printf
62 | movq $1, %rax
63 | ret
64 |
65 |
--------------------------------------------------------------------------------
/tests/rtl_rlbk.S:
--------------------------------------------------------------------------------
1 | .include "common/header.S"
2 |
3 | .macro TEST id fn_name
4 | PREPARE_CORRUPTION_TEST \id 42
5 | CALL_RTL_FUNCTION \fn_name
6 | EXECUTE_CORRUPTION_TEST 42 1
7 | .endm
8 |
9 | .text
10 | .globl main
11 | .p2align 4, 0x90
12 | .type main, @function
13 | main:
14 | RESET_META
15 | movq $0, nesting_level
16 | TEST 1 specfuzz_rlbk_if_done
17 |
18 | movq $1, nesting_level
19 | movq $10, instruction_counter
20 | TEST 2 specfuzz_rlbk_if_done
21 |
22 | movq $0, nesting_level
23 | TEST 3 specfuzz_rlbk_forced
24 |
25 | movq $0, nesting_level
26 | TEST 4 specfuzz_rlbk_external_call
27 |
28 | movq $0, nesting_level
29 | TEST 5 specfuzz_rlbk_indirect_call
30 |
31 | movq $0, nesting_level
32 | TEST 6 specfuzz_rlbk_serializing
33 |
34 | movq $0, nesting_level
35 | TEST 7 specfuzz_rlbk_patched
36 |
37 | movq $0, %rax
38 | ret
39 |
--------------------------------------------------------------------------------
/tests/run.bats:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bats
2 |
3 | export CLANG_DIR=$(llvm-7.0.1-config --bindir)
4 | export RTFLAGS="-lspecfuzz -L$CLANG_DIR/../lib"
5 |
6 | msg () {
7 | echo "[BATS] " $*
8 | }
9 |
10 | setup () {
11 | make clean
12 | }
13 |
14 | teardown() {
15 | make clean
16 | }
17 |
18 | function asm_test {
19 | CC=clang make ${NAME}
20 | run bash -c "./${NAME} 2>&1"
21 | if [ "$status" -ne 0 ]; then
22 | echo "status: $status"
23 | printf "output: $output\n"
24 | echo " output end"
25 | fi
26 | }
27 |
28 | @test "[$BATS_TEST_NUMBER] Acceptance: The pass is enabled and compiles correctly" {
29 | NAME=dummy
30 | CC=clang-sf make ${NAME}
31 | run bash -c "./${NAME}"
32 | [ "$status" -eq 0 ]
33 | [ "$output" = "[SF] Starting
34 | Hello World!" ]
35 | }
36 |
37 | @test "[$BATS_TEST_NUMBER] Acceptance: Detection of a speculative overflow with ASan" {
38 | NAME=acceptance-basic
39 | CC=clang-sf make ${NAME}
40 | run bash -c "ASAN_OPTIONS=allow_user_segv_handler=1:detect_leaks=0 ./${NAME} 100"
41 | [ "$status" -eq 0 ]
42 | [[ "$output" == *"[SF], 1,"* ]]
43 | }
44 |
45 | @test "[$BATS_TEST_NUMBER] Acceptance: Detection of a speculative overflow with signal handler" {
46 | NAME=acceptance-basic
47 | CC=clang-sf make ${NAME}
48 | run bash -c "ASAN_OPTIONS=allow_user_segv_handler=1:detect_leaks=0 ./${NAME} 1000000000"
49 | [ "$status" -eq 0 ]
50 | [[ "$output" == *"[SF], 11,"* ]]
51 | }
52 |
53 | @test "[$BATS_TEST_NUMBER] Acceptance: mmul" {
54 | NAME=acceptance-mmul
55 |
56 | CC=clang make ${NAME}
57 | run bash -c "./${NAME}"
58 | [ "$status" -eq 0 ]
59 | [[ "$output" == *"70" ]]
60 |
61 | CC=clang-sf make ${NAME}
62 | run bash -c "./${NAME}"
63 | [ "$status" -eq 0 ]
64 | [[ "$output" == *"70" ]]
65 |
66 | CC=clang-sf CFLAGS="-O1" make ${NAME}
67 | run bash -c "./${NAME}"
68 | [ "$status" -eq 0 ]
69 | [[ "$output" == *"70" ]]
70 |
71 | CC=clang-sf CFLAGS="-O2" make ${NAME}
72 | run bash -c "./${NAME}"
73 | [ "$status" -eq 0 ]
74 | [[ "$output" == *"70" ]]
75 |
76 | CC=clang-sf CFLAGS="-O3" make ${NAME}
77 | run bash -c "./${NAME}"
78 | [ "$status" -eq 0 ]
79 | [[ "$output" == *"70" ]]
80 | }
81 |
82 | @test "[$BATS_TEST_NUMBER] Runtime: Checkpointing function does not introduce corruptions" {
83 | NAME=rtl_chkp
84 | asm_test
85 | [ "$status" -eq 0 ]
86 | }
87 |
88 | @test "[$BATS_TEST_NUMBER] Runtime: Rollback function does not introduce corruptions" {
89 | NAME=rtl_rlbk
90 | asm_test
91 | [ "$status" -eq 0 ]
92 | }
93 |
94 | @test "[$BATS_TEST_NUMBER] Runtime: Rollback functions correctly" {
95 | NAME=rtl_chkp_rlbk
96 | asm_test
97 | [ "$status" -eq 0 ]
98 | }
99 |
100 | @test "[$BATS_TEST_NUMBER] Runtime: Reporting does not introduce corruptions" {
101 | NAME=rtl_report
102 | asm_test
103 | [ "$status" -eq 0 ]
104 | }
105 |
106 | @test "[$BATS_TEST_NUMBER] Runtime: Reporting functions correctly" {
107 | NAME=rtl_report
108 | asm_test
109 | [[ "${lines[1]}" == "[SF], 1, 0x29, 0x29, 0, "* ]] || [[ "${lines[1]}" == "[SF], 1, 0x29, 0x0, -8, "* ]]
110 | }
111 |
112 | @test "[$BATS_TEST_NUMBER] Wrapper: mmul compiled with a wrapper script" {
113 | NAME=acceptance-mmul
114 | CC=clang-sf CFLAGS=" --disable-asan -O3 -ggdb" make ${NAME}
115 | run bash -c "./${NAME}"
116 | [ "$status" -eq 0 ]
117 | [[ "$output" == *"70" ]]
118 | }
119 |
120 | @test "[$BATS_TEST_NUMBER] Wrapper: mmul compiled with a c++ wrapper script" {
121 | NAME=acceptance-mmul
122 | CC=clang-sf++ CFLAGS=" --disable-asan -O3 -ggdb" make ${NAME}
123 | run bash -c "./${NAME}"
124 | [ "$status" -eq 0 ]
125 | [[ "$output" == *"70" ]]
126 | }
127 |
128 | @test "[$BATS_TEST_NUMBER] Pass: Collecting functions" {
129 | NAME=dummy
130 | CC=clang-sf CFLAGS=" --collect list.txt --disable-asan -O3 -ggdb" make ${NAME}
131 | uniq list.txt | sort > t && mv t list.txt
132 | run cat list.txt
133 | [ "$output" == "main" ]
134 | rm list.txt
135 | }
136 |
137 | @test "[$BATS_TEST_NUMBER] Analyzer: Unit tests" {
138 | run bash -c "python3 -m unittest -v analyzer_unit"
139 | if [ "$status" -ne 0 ]; then
140 | printf "$output\n"
141 | fi
142 | [ "$status" -eq 0 ]
143 | }
144 |
145 |
146 | # Below are our old tests. They probably won't work anymore
147 |
148 | #@test "[$BATS_TEST_NUMBER] Analyzer: Correctly aggregates values" {
149 | # skip
150 | # touch tmp
151 | # data="[SF], 0, 0x1, 0x1, 10\n[SF], 0, 0x1, 0x2, 20\n[SF], 0, 0x2, 0x2, 10\n"
152 | # run bash -c "printf \"$data\" | analyzer coverage -c tmp -o tmp"
153 | # [ "$status" -eq 0 ]
154 | # [ "$output" == "|1 |1 |False |1 |
155 | #|2 |1,2 |False |1 |" ]
156 | # rm tmp
157 | #}
158 | #
159 | #@test "[$BATS_TEST_NUMBER] Analyzer: Correctly combines experiments" {
160 | # skip
161 | # touch tmp
162 | # experiment1="[SF] Starting\n[SF], 0, 0x1, 0x1, 10\n"
163 | # experiment2="[SF] Starting\n[SF], 1, 0x1, 0x1, 10\n[SF], 0, 0x2, 0x1, 10\n"
164 | # data="$experiment1$experiment2"
165 | # run bash -c "printf \"$data\" | analyzer coverage -c tmp -o tmp"
166 | # [ "$status" -eq 0 ]
167 | # echo "$output"
168 | # [ "$output" == "|1 |1,2 |False |2 |" ]
169 | # rm tmp
170 | #}
171 | #
172 | #@test "[$BATS_TEST_NUMBER] Analyzer: Correctly detects control" {
173 | # skip
174 | # touch tmp
175 | # experiment1="[SF] Starting\n[SF], 0, 0x1, 0x1, 10\n[SF] Starting\n[SF], 1, 0x1, 0x1, 20\n[SF], 0, 0x2, 0x1, 10\n"
176 | # data="$experiment1"
177 | # run bash -c "printf \"$data\" | analyzer coverage -c tmp -o tmp"
178 | # [ "$status" -eq 0 ]
179 | # echo "$output"
180 | # [ "$output" == "|1 |1,2 |True |2 |" ]
181 | # rm tmp
182 | #}
183 | #
184 | #@test "[$BATS_TEST_NUMBER] Analyzer: Detects errors" {
185 | # skip
186 | # touch tmp
187 | # data='[SF] Error: foo bar\n[SF], 0, a, b, 20\n[SF], 0, b, b, 10\n'
188 | # printf "$data" | analyzer coverage -c tmp -o tmp
189 | # run grep "[SF] Error: foo bar" tmp
190 | # [ "$status" -ne 0 ]
191 | # rm tmp
192 | #}
193 |
--------------------------------------------------------------------------------