├── .gitignore ├── common.h ├── init.c ├── common.c ├── gen-insn-common.c ├── Makefile ├── README.md ├── ebpf_fuzzer.c ├── ebpf_fuzzer.h ├── insn_print.c └── gen-insn.c /.gitignore: -------------------------------------------------------------------------------- 1 | image/ 2 | *.o 3 | ebpf_fuzzer 4 | .ycm_extra_conf.py 5 | -------------------------------------------------------------------------------- /common.h: -------------------------------------------------------------------------------- 1 | #ifndef COMMON_H_AVKWXLV7 2 | #define COMMON_H_AVKWXLV7 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | typedef __s8 s8; 27 | typedef __s16 s16; 28 | typedef __s32 s32; 29 | typedef __s64 s64; 30 | typedef __u8 u8; 31 | typedef __u16 u16; 32 | typedef __u32 u32; 33 | typedef __u64 u64; 34 | 35 | extern char *bpf_reg_str[]; 36 | extern char *bpf_alu_op_str[]; 37 | extern char *bpf_jmp_op_str[]; 38 | extern char *bpf_size_str[]; 39 | 40 | #endif /* end of include guard: COMMON_H_AVKWXLV7 */ 41 | -------------------------------------------------------------------------------- /init.c: -------------------------------------------------------------------------------- 1 | #include "ebpf_fuzzer.h" 2 | 3 | extern struct ebpf_fuzz_target kern_5_8; 4 | 5 | static LIST_HEAD(fuzzer_targets); 6 | 7 | static int register_target(struct ebpf_fuzz_target *target) 8 | { 9 | struct ebpf_fuzz_target *tmp; 10 | list_for_each_entry(tmp, &fuzzer_targets, sibling) { 11 | if (!strcmp(tmp->target_name, target->target_name)) 12 | return -1; 13 | } 14 | 15 | list_add_tail(&target->sibling, &fuzzer_targets); 16 | return 0; 17 | } 18 | 19 | int init(void) 20 | { 21 | int err = 0; 22 | err = kern_5_8.init(&kern_5_8); 23 | if (err == -1) { 24 | err_dbg(0, "kern_5_8.init() err"); 25 | return -1; 26 | } 27 | 28 | err = register_target(&kern_5_8); 29 | if (err == -1) { 30 | err_dbg(0, "register_target err"); 31 | return -1; 32 | } 33 | 34 | return 0; 35 | } 36 | 37 | struct ebpf_fuzz_target *find_target(char *version) 38 | { 39 | struct ebpf_fuzz_target *tmp; 40 | list_for_each_entry(tmp, &fuzzer_targets, sibling) { 41 | if (!strcmp(tmp->target_name, version)) 42 | return tmp; 43 | } 44 | 45 | return NULL; 46 | } 47 | -------------------------------------------------------------------------------- /common.c: -------------------------------------------------------------------------------- 1 | #include "./common.h" 2 | 3 | char *bpf_reg_str[] = { 4 | [0] = "BPF_REG_0", 5 | [1] = "BPF_REG_1", 6 | [2] = "BPF_REG_2", 7 | [3] = "BPF_REG_3", 8 | [4] = "BPF_REG_4", 9 | [5] = "BPF_REG_5", 10 | [6] = "BPF_REG_6", 11 | [7] = "BPF_REG_7", 12 | [8] = "BPF_REG_8", 13 | [9] = "BPF_REG_9", 14 | [10] = "BPF_REG_10", 15 | }; 16 | 17 | char *bpf_alu_op_str[] = { 18 | [BPF_ADD] = "BPF_ADD", 19 | [BPF_SUB] = "BPF_SUB", 20 | [BPF_MUL] = "BPF_MUL", 21 | [BPF_DIV] = "BPF_DIV", 22 | [BPF_OR] = "BPF_OR", 23 | [BPF_AND] = "BPF_AND", 24 | [BPF_LSH] = "BPF_LSH", 25 | [BPF_RSH] = "BPF_RSH", 26 | [BPF_NEG] = "BPF_NEG", 27 | [BPF_MOD] = "BPF_MOD", 28 | [BPF_XOR] = "BPF_XOR", 29 | [BPF_MOV] = "BPF_MOV", 30 | [BPF_ARSH] = "BPF_ARSH", 31 | }; 32 | 33 | char *bpf_jmp_op_str[] = { 34 | [BPF_JA] = "BPF_JA", 35 | [BPF_JEQ] = "BPF_JEQ", 36 | [BPF_JGT] = "BPF_JGT", 37 | [BPF_JGE] = "BPF_JGE", 38 | [BPF_JSET] = "BPF_JSET", 39 | [BPF_JNE] = "BPF_JNE", 40 | [BPF_JLT] = "BPF_JLT", 41 | [BPF_JLE] = "BPF_JLE", 42 | [BPF_JSGT] = "BPF_JSGT", 43 | [BPF_JSGE] = "BPF_JSGE", 44 | [BPF_JSLT] = "BPF_JSLT", 45 | [BPF_JSLE] = "BPF_JSLE", 46 | [BPF_CALL] = "BPF_CALL", 47 | [BPF_EXIT] = "BPF_EXIT", 48 | }; 49 | 50 | char *bpf_size_str[] = { 51 | [BPF_W] = "BPF_W", 52 | [BPF_H] = "BPF_H", 53 | [BPF_B] = "BPF_B", 54 | [BPF_DW] = "BPF_DW", 55 | }; 56 | -------------------------------------------------------------------------------- /gen-insn-common.c: -------------------------------------------------------------------------------- 1 | #include "ebpf_fuzzer.h" 2 | 3 | int gen_non_insn(struct bpf_insn *insns, int *idx) 4 | { 5 | unsigned __idx = *idx; 6 | COPY_INSNS(insns, __idx, BPF_MOV64_IMM(BPF_REG_0, 0)); 7 | *idx = __idx; 8 | return 0; 9 | } 10 | 11 | int gen_jmp_insn_common(struct bpf_insn *insns, int *idx, int is_imm, int is_64, 12 | int reg0, int reg1, long imm_v, int op) 13 | { 14 | unsigned __idx = *idx; 15 | 16 | if (is_64 && is_imm) { 17 | COPY_INSNS(insns, __idx, BPF_JMP_IMM(op, reg0, imm_v, 1)); 18 | } else if (is_64 && (!is_imm)) { 19 | COPY_INSNS(insns, __idx, BPF_JMP_REG(op, reg0, reg1, 1)); 20 | } else if ((!is_64) && is_imm) { 21 | COPY_INSNS(insns, __idx, BPF_JMP32_IMM(op, reg0, imm_v, 1)); 22 | } else if ((!is_64) && (!is_imm)) { 23 | COPY_INSNS(insns, __idx, BPF_JMP32_REG(op, reg0, reg1, 1)); 24 | } 25 | COPY_INSNS(insns, __idx, BPF_EXIT_INSN()); 26 | 27 | *idx = __idx; 28 | return 0; 29 | } 30 | 31 | int gen_alu_insn_common(struct bpf_insn *insns, int *idx, int is_imm, int is_64, 32 | int reg0, int reg1, long imm_v, int op) 33 | { 34 | unsigned __idx = *idx; 35 | 36 | if (is_64 && is_imm) { 37 | COPY_INSNS(insns, __idx, BPF_ALU64_IMM(op, reg0, imm_v)); 38 | } else if (is_64 && (!is_imm)) { 39 | COPY_INSNS(insns, __idx, BPF_ALU64_REG(op, reg0, reg1)); 40 | } else if ((!is_64) && is_imm) { 41 | COPY_INSNS(insns, __idx, BPF_ALU32_IMM(op, reg0, imm_v)); 42 | } else if ((!is_64) && (!is_imm)) { 43 | COPY_INSNS(insns, __idx, BPF_ALU32_REG(op, reg0, reg1)); 44 | } 45 | 46 | *idx = __idx; 47 | return 0; 48 | } 49 | 50 | int gen_mov_insn_common(struct bpf_insn *insns, int *idx, int is_imm, int is_64, 51 | int reg0, int reg1, long imm_v) 52 | { 53 | unsigned __idx = *idx; 54 | 55 | if (is_64 && is_imm) { 56 | COPY_INSNS(insns, __idx, BPF_MOV64_IMM(reg0, imm_v)); 57 | } else if (is_64 && (!is_imm)) { 58 | COPY_INSNS(insns, __idx, BPF_MOV64_REG(reg0, reg1)); 59 | } else if ((!is_64) && is_imm) { 60 | COPY_INSNS(insns, __idx, BPF_MOV32_IMM(reg0, imm_v)); 61 | } else if ((!is_64) && (!is_imm)) { 62 | COPY_INSNS(insns, __idx, BPF_MOV32_REG(reg0, reg1)); 63 | } 64 | 65 | *idx = __idx; 66 | return 0; 67 | } 68 | 69 | int gen_ld_insn_common(struct bpf_insn *insns, int *idx, int reg0, long imm_v) 70 | { 71 | unsigned __idx = *idx; 72 | 73 | COPY_INSNS(insns, __idx, BPF_LD_IMM64(reg0, imm_v)); 74 | 75 | *idx = __idx; 76 | return 0; 77 | } 78 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SELF_CFLAGS_N = 2 2 | NEED_CLIB = 1 3 | SELF_DEBUG = 1 4 | BUILD_LIB = 0 5 | 6 | export MAKE_OPT := --no-print-directory 7 | export Q := 8 | #export Q := @ 9 | export CC = gcc 10 | export CXX = g++ 11 | export MAKE = make 12 | export RM = rm -f 13 | export INSTALL = install 14 | export CC_ECHO = " CC " 15 | export CXX_ECHO = " CXX " 16 | export LD_ECHO = " LD " 17 | export GEN_ECHO = " GEN " 18 | export CLEAN_ECHO = " CLEAN " 19 | export INSTALL_ECHO = "INSTALL" 20 | export SRC_ECHO = " <== " 21 | 22 | export ARCH = $(shell getconf LONG_BIT) 23 | export SCRIPT_DIR = $(dirname $(readlink -f "$0")) 24 | 25 | ifeq ($(Q), @) 26 | export MAKE_OPT += -s 27 | endif 28 | 29 | ifeq ($(NEED_CLIB), 1) 30 | export CLIB_PATH = /home/$(LOGNAME)/workspace/clib 31 | export CLIB_INC = $(CLIB_PATH)/include 32 | export CLIB_LIB = $(CLIB_PATH)/lib 33 | export CLIB_SO = clib$(ARCH) 34 | endif 35 | 36 | GCC_VER_MAJ := $(shell expr `gcc -dumpversion | cut -f1 -d.`) 37 | GCC_PLUGIN_BASE = /usr/lib/gcc/x86_64-linux-gnu 38 | export GCC_PLUGIN_INC = $(GCC_PLUGIN_BASE)/$(GCC_VER_MAJ)/plugin/include 39 | 40 | SELF_CFLAGS = 41 | 42 | ifeq ($(SELF_DEBUG), 1) 43 | SELF_CFLAGS += -g 44 | endif 45 | 46 | ifeq ($(BUILD_LIB), 1) 47 | SELF_CFLAGS += -shared 48 | endif 49 | 50 | SELF_CFLAGS += -fPIC -rdynamic 51 | SELF_CFLAGS += -Wall -O$(SELF_CFLAGS_N) 52 | # TODO: Put SELF_CFLAGS here 53 | 54 | 55 | 56 | export CFLAGS = -std=gnu11 $(SELF_CFLAGS) $(EXTRA_CFLAGS) 57 | export CXXFLAGS = -std=gnu++11 $(SELF_CFLAGS) $(EXTRA_CFLAGS) 58 | 59 | # TODO: Put rules here 60 | CC_SRCS = ebpf_fuzzer.c \ 61 | init.c \ 62 | common.c \ 63 | insn_print.c \ 64 | gen-insn-common.c \ 65 | gen-insn.c 66 | 67 | CC_OBJS = $(CC_SRCS:.c=.o) 68 | 69 | CXX_SRCS = 70 | 71 | CXX_OBJS = $(CXX_SRCS:%.cc=%.o) 72 | 73 | # _SO = $(_SRCS:%.c:%.so) 74 | OUTFILE = ebpf_fuzzer 75 | 76 | # INSTALLS = $(addprefix $(OUTDIR)/,$(OUTFILE)) 77 | INSTALLS = 78 | 79 | CFLAGS += 80 | 81 | all: $(OUTFILE) 82 | 83 | # $(Q)$(CC) $(CFLAGS) $(CC_OBJS) $(CXX_OBJS) -L$(CLIB_LIB) -l$(CLIB_SO) -o $(OUTFILE) -Wl,-rpath $(CLIB_LIB) 84 | $(OUTFILE): $(CC_OBJS) $(CXX_OBJS) 85 | $(Q)$(CC) $(CFLAGS) $(CC_OBJS) $(CXX_OBJS) -L$(CLIB_LIB) -l$(CLIB_SO) -o $(OUTFILE) -Wl,-rpath $(CLIB_LIB) 86 | 87 | # $(Q)$(CC) $(CFLAGS) -I$(CLIB_INC) -c -o $@ $< 88 | $(CC_OBJS): %.o: %.c 89 | $(Q)$(CC) $(CFLAGS) -I$(CLIB_INC) -c -o $@ $< 90 | 91 | # $(Q)$(CXX) $(CXXFLAGS) -I$(CLIB_INC) -c -o $@ $< 92 | $(CXX_OBJS): %.o: %.cc 93 | # TODO 94 | 95 | install: $(INSTALLS) 96 | 97 | clean: 98 | $(Q)$(RM) $(CC_OBJS) 99 | $(Q)$(RM) $(CXX_OBJS) 100 | $(Q)$(RM) $(OUTFILE) 101 | 102 | distclean: clean 103 | $(Q)$(RM) $(INSTALLS) 104 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # INTRODUCTION 2 | The idea comes from scannell's blog, [Fuzzing for eBPF JIT bugs in the Linux kernel](https://scannell.io/posts/ebpf-fuzzing/). 3 | 4 | It contains three parts: 5 | - [qemu fuzzlib](https://github.com/snorez/clib/blob/master/src/qemu_fuzzlib.c) 6 | - ebpf sample generator 7 | - exception handler in the linux kernel 8 | 9 | # QEMU FUZZLIB 10 | This module is mainly used to test the linux kernel. It uses [the modified syzkaller script](https://github.com/snorez/clib/blob/master/tools/create-image.sh) to generate debian buster image file and all other necessary files. The modified script adds a new normal user `test` without a password. 11 | 12 | **NOTE**: use [This create-image.sh](https://github.com/snorez/clib/blob/master/tools/create-image.sh) to create the buster img: `./create-image.sh --distribution buster` 13 | 14 | This module provide an interface `qemu_fuzzlib_env_setup()` for the caller to 15 | initialize the fuzzing environment, the prototype of the function is: 16 | ```c 17 | extern struct qemu_fuzzlib_env * 18 | qemu_fuzzlib_env_setup(char *user_name, u64 user_id, char *qemu_exec_path, 19 | char *bzImage_file, char *osimage_file, 20 | char *host_id_rsa, char *listen_ip, u32 inst_max, 21 | u32 idle_sec, u32 inst_memsz, u32 inst_core, 22 | char *env_workdir, char *guest_workdir, 23 | char *guest_user, char *script_file, char *c_file, 24 | char *sample_fname, char *fuzz_db, 25 | int (*db_init)(struct qemu_fuzzlib_env *), 26 | int (*mutate)(struct qemu_fuzzlib_env *, char *)); 27 | ``` 28 | - `user_name`: the fuzzer's name, in this project, it is `ebpf_fuzzer`. 29 | - `user_id`: default `0`. 30 | - `qemu_exec_path`: the binary absolute path to qemu, e.g. `/usr/bin/qemu-system-x86_64`. 31 | - `osimage_file`: the absolute path to bzImage file. 32 | - `host_id_rsa`: the `id_rsa` file generated by the modified script. 33 | - `listen_ip`: `10.0.2.10` recommended. 34 | - `inst_max`: how many qemu instances will be launched. 35 | - `idle_sec`: how many seconds will wait for until an qemu instance is ready for a new sample. 36 | - `inst_memsz`: the memory size for each qemu instance. 37 | - `inst_core`: the core number for each qemu instance. 38 | - `env_workdir`: the work directory of the fuzzing process. 39 | - `guest_workdir`: the work directory of the guest, normally `/tmp`. 40 | - `guest_user`: the user will be used to login the guest, could be `test` or `root`. We need a normal user to trigger different code paths in the kernel. 41 | - `script_file`: the script file will be uploaded to the guest and be executed in the guest. Default: `default_guest.sh`. 42 | - `c_file`: the C source file that will be uploaded to the guest and be compiled and executed in the guest to execute the sample and catch the exception of the sample process. Default: `default_guest.c`. 43 | - `sample_fname`: the sample filename. 44 | - `fuzz_db`: the fuzzing database, not used for now. 45 | - `db_init`: the callback used to initialize the database. 46 | - `mutate`: the callback used to generate new sample. 47 | 48 | After the fuzzing environment is setup, the caller should call `qemu_fuzzlib_env_run()` to start the fuzzer. 49 | 50 | The `qemu_fuzzlib_env_run()` function generates new sample and put it into an available qemu instance to execute, until no more sample is generated or no available qemu instance found after the `idle_sec` seconds. 51 | 52 | # EBPF SAMPLE GENERATOR 53 | We need to focus on just one thing: the `mutate()` callback. This function is used to generate new sample, in this ebpf fuzzer, to generate new ebpf sample. 54 | 55 | The scannell's blog give us a perfect guidance to generate ebpf samples. I recommend you to read the blog first. 56 | 57 | In the current implementation, the sample's header and tail are known. We need to generate the sample body, which is filled by ebpf instructions. The instructions do several things: 58 | - get the two bpf map pointers. 59 | - random instructions to manipulate the `INVALID_P_REG`. implemented in `insn_body()`. 60 | - an ALU operation on `CORRUPT_REG`. 61 | - read from `CORRUPT_REG` and write the value to `STORAGE_REG`. 62 | - exit 63 | 64 | After all instructions generated, we need to print the instructions and write them to the sample file. 65 | 66 | ### insn_body() 67 | - `gen_body0()`: set the `SPECIAL_REG` bounds. 68 | - `gen_body1()`: generate bpf instructions up to `max_body_insn`. Six types of instructions: 69 | - `INSN_GENERATOR_JMP`: `BPF_JMP`. 70 | - `INSN_GENERATOR_ALU`: `BPF_ALU`. 71 | - `INSN_GENERATOR_MOV`: `BPF_MOV`. 72 | - `INSN_GENERATOR_LD`: `BPF_LD_IMM64()`. 73 | - `INSN_GENERATOR_NON`: `BPF_REG_0` = `0`. 74 | - `INSN_GENERATOR_MAX`: the last insn, `INVALID_P_REG` = `SPECIAL_REG`. 75 | 76 | # EXCEPTION HANDLER IN THE LINUX KERNEL 77 | The first time I run the fuzzer to trigger [cve-2020-8835](https://www.thezdi.com/blog/2020/4/8/cve-2020-8835-linux-kernel-privilege-escalation-via-improper-ebpf-program-verification), the guest frozen: one of the kernel threads runs into an infinite loop. [Check this commit](https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/kernel/bpf/verifier.c?h=v5.10.70&id=569033c0825e4d90f7e824696dd334d239adc997): the verifier rewrote original instructions it recognized as dead code with 'goto PC-1'. 78 | 79 | This is a good way to detect bugs in the bpf verifier. 80 | 81 | What else? 82 | 83 | # How to run 84 | After compiling the clib and this project, use `./ebpf_fuzzer /path/to/config 0` to startup the fuzzer. 85 | 86 | For the bzImage file, make sure the following config options are enabled: 87 | ``` 88 | CONFIG_CONFIGFS_FS=y 89 | CONFIG_SECURITYFS=y 90 | CONFIG_E1000=y 91 | CONFIG_BINFMT_MISC=y 92 | ``` 93 | 94 | When the bzImage and buster.img are ready, test the qemu first: 95 | 96 | Launch qemu: 97 | ``` 98 | /usr/bin/qemu-system-x86_64 -m 2G -smp 2 -kernel /path/to/bzImage -append 'console=ttyS0 root=/dev/sda earlyprintk=serial net.ifnames=0' -drive file=/path/to/buster.img,format=raw -net user,host=10.0.2.10,hostfwd=tcp:127.0.0.1:10021-:22 -net nic,model=e1000 -enable-kvm -nographic 99 | ``` 100 | 101 | Communicate with the guest 102 | ``` 103 | ssh -q -i /path/to/buster.id_rsa -p 10021 -o 'StrictHostKeyChecking no' test@127.0.0.1 id 104 | ``` 105 | 106 | An example of the config file: 107 | ```json 108 | [ 109 | { 110 | "version": "general", 111 | "qemu_exec_path": "/path/to/qemu-system-x86_64", 112 | "bzImage_path": "/path/to/bzImage", 113 | "osImage_path": "/path/to/buster.img", 114 | "rsa_path": "/path/to/buster.id_rsa", 115 | "idle_sec": "1800", 116 | "host_ip": "10.0.2.10", 117 | "instance_nr": "8", 118 | "instance_memsz": "1", 119 | "instance_core": "2", 120 | "env_workdir": "/path/to/fuzzer_workdir", 121 | "guest_workdir": "/tmp/", 122 | "guest_user": "test", 123 | "sample_fname": "test.c", 124 | "body1_len": "24", 125 | } 126 | ] 127 | ``` 128 | The `body1_len` is used in mutate module, it's the count of instructions to generate in `gen_body1()`. The larger you give, the lower valid sample rate you will get. Default value is 0x18. 129 | 130 | # FAQ 131 | Q: When running the fuzzer, the output is 'total: 0'?
132 | A: Try to create the buster image with `./create-image.sh --distribution buster`. Check [issue #1](https://github.com/snorez/ebpf-fuzzer/issues/1). 133 | -------------------------------------------------------------------------------- /ebpf_fuzzer.c: -------------------------------------------------------------------------------- 1 | #include "ebpf_fuzzer.h" 2 | 3 | static char *qemu_fuzzlib_user_name = "ebpf_fuzzer"; 4 | static u64 qemu_fuzzlib_userid = 0; 5 | 6 | static char *target_version = NULL; 7 | static char *qemu_exec_path = NULL; 8 | static char *bzImage_path = NULL; 9 | static char *osImage_path = NULL; 10 | static char *rsa_path = NULL; 11 | static u32 idle_sec = 0; 12 | static char *host_ip = NULL; 13 | static u32 instance_nr = 0; 14 | static u32 instance_memsz = 0; 15 | static u32 instance_core = 0; 16 | static char *env_workdir = NULL; 17 | static char *guest_workdir = NULL; 18 | static char *guest_user = NULL; 19 | static char *guest_sh_file = NULL; 20 | static char *guest_c_file = NULL; 21 | static char *sample_fname = NULL; 22 | static char *db_file = NULL; 23 | static u32 body1_len = 0x18; 24 | 25 | struct ebpf_fuzz_target *target; 26 | 27 | static int db_init(struct qemu_fuzzlib_env *env) 28 | { 29 | return 0; 30 | } 31 | 32 | static int mutate(struct qemu_fuzzlib_env *env, char *outfile) 33 | { 34 | int err = 0; 35 | size_t sz = 0; 36 | char body[BODY_LEN]; 37 | 38 | int outfd = open(outfile, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU); 39 | if (outfd == -1) { 40 | err_dbg(1, "open err"); 41 | return QEMU_FUZZLIB_MUTATE_ERR; 42 | } 43 | 44 | err = target->gen_sample_body(body, BODY_LEN, body1_len); 45 | if (err == -1) { 46 | close(outfd); 47 | return QEMU_FUZZLIB_MUTATE_DONE; 48 | } 49 | 50 | sz = strlen(target->sample_header); 51 | err = write(outfd, target->sample_header, sz); 52 | if (err != sz) { 53 | if (err == -1) 54 | err_dbg(1, "write err"); 55 | close(outfd); 56 | return QEMU_FUZZLIB_MUTATE_ERR; 57 | } 58 | 59 | sz = strlen(body); 60 | err = write(outfd, body, sz); 61 | if (err != sz) { 62 | if (err == -1) 63 | err_dbg(1, "write err"); 64 | close(outfd); 65 | return QEMU_FUZZLIB_MUTATE_ERR; 66 | } 67 | 68 | sz = strlen(target->sample_tail); 69 | err = write(outfd, target->sample_tail, sz); 70 | if (err != sz) { 71 | if (err == -1) 72 | err_dbg(1, "write err"); 73 | close(outfd); 74 | return QEMU_FUZZLIB_MUTATE_ERR; 75 | } 76 | 77 | /* TODO: update database */ 78 | close(outfd); 79 | return QEMU_FUZZLIB_MUTATE_OK; 80 | } 81 | 82 | static void usage(char *argv[]) 83 | { 84 | fprintf(stderr, "Usage: %s config is_test\n", argv[0]); 85 | } 86 | 87 | static int validate_args(int argc, char *argv[]) 88 | { 89 | if (argc != 3) { 90 | return -1; 91 | } 92 | 93 | if (!path_exists(argv[1])) { 94 | err_dbg(0, "%s not exists", argv[1]); 95 | return -1; 96 | } 97 | 98 | return 0; 99 | } 100 | 101 | static int parse_conf_kv(char *key, char *val) 102 | { 103 | char *p = NULL; 104 | 105 | p = strdup(val); 106 | if (!p) { 107 | err_dbg(1, "strdup err"); 108 | return -1; 109 | } 110 | 111 | if (!strcmp(key, "version")) { 112 | target_version = p; 113 | } else if (!strcmp(key, "qemu_exec_path")) { 114 | qemu_exec_path = p; 115 | } else if (!strcmp(key, "bzImage_path")) { 116 | bzImage_path = p; 117 | } else if (!strcmp(key, "osImage_path")) { 118 | osImage_path = p; 119 | } else if (!strcmp(key, "rsa_path")) { 120 | rsa_path = p; 121 | } else if (!strcmp(key, "host_ip")) { 122 | host_ip = p; 123 | } else if (!strcmp(key, "instance_nr")) { 124 | instance_nr = atoi(p); 125 | free(p); 126 | } else if (!strcmp(key, "instance_memsz")) { 127 | instance_memsz = atoi(p); 128 | free(p); 129 | } else if (!strcmp(key, "instance_core")) { 130 | instance_core = atoi(p); 131 | free(p); 132 | } else if (!strcmp(key, "env_workdir")) { 133 | env_workdir = p; 134 | } else if (!strcmp(key, "guest_workdir")) { 135 | guest_workdir = p; 136 | } else if (!strcmp(key, "guest_user")) { 137 | guest_user = p; 138 | } else if (!strcmp(key, "guest_sh_file")) { 139 | guest_sh_file = p; 140 | } else if (!strcmp(key, "guest_c_file")) { 141 | guest_c_file = p; 142 | } else if (!strcmp(key, "sample_fname")) { 143 | sample_fname = p; 144 | } else if (!strcmp(key, "db_file")) { 145 | db_file = p; 146 | } else if (!strcmp(key, "idle_sec")) { 147 | idle_sec = atoi(p); 148 | free(p); 149 | } else if (!strcmp(key, "body1_len")) { 150 | body1_len = (u32)atoi(p); 151 | free(p); 152 | } else { 153 | err_dbg(0, "{%s:%s} not recognised", key, val); 154 | free(p); 155 | return -1; 156 | } 157 | 158 | return 0; 159 | } 160 | 161 | static int parse_conf(const char *conf) 162 | { 163 | int err = 0; 164 | struct list_head conf_head; 165 | 166 | INIT_LIST_HEAD(&conf_head); 167 | err = clib_json_load(conf, &conf_head); 168 | if (err == -1) { 169 | err_dbg(0, "clib_json_load err"); 170 | return -1; 171 | } 172 | 173 | struct clib_json *tmp; 174 | tmp = list_first_entry_or_null(&conf_head, struct clib_json, sibling); 175 | if (!tmp) { 176 | err_dbg(0, "no json entry"); 177 | goto err_out; 178 | } 179 | 180 | struct clib_json_kv *cur; 181 | list_for_each_entry(cur, &tmp->kvs, sibling) { 182 | if (cur->val_type != CJVT_STRING) { 183 | err_dbg(0, "json format err"); 184 | goto err_out; 185 | } 186 | 187 | char *bs_key = cur->key; 188 | char *bs_val = cur->value.value; 189 | 190 | err = parse_conf_kv(bs_key, bs_val); 191 | if (err == -1) { 192 | err_dbg(0, "parse_conf_kv err"); 193 | goto err_out; 194 | } 195 | } 196 | 197 | clib_json_cleanup(&conf_head); 198 | return 0; 199 | 200 | err_out: 201 | clib_json_cleanup(&conf_head); 202 | return -1; 203 | } 204 | 205 | static void cleanup(void) 206 | { 207 | free(target_version); 208 | free(qemu_exec_path); 209 | free(bzImage_path); 210 | free(osImage_path); 211 | free(rsa_path); 212 | free(host_ip); 213 | free(env_workdir); 214 | free(guest_workdir); 215 | free(guest_user); 216 | free(guest_sh_file); 217 | free(guest_c_file); 218 | free(sample_fname); 219 | free(db_file); 220 | } 221 | 222 | static void do_test(void) 223 | { 224 | char *outfile = "/tmp/test_sample.c"; 225 | (void)mutate(NULL, outfile); 226 | } 227 | 228 | int main(int argc, char *argv[]) 229 | { 230 | int err = 0; 231 | char *conf = NULL; 232 | int is_test = 0; 233 | struct qemu_fuzzlib_env *env; 234 | 235 | enable_dbg_mode(); 236 | 237 | err = init(); 238 | if (err == -1) { 239 | err_dbg(0, "init err"); 240 | return -1; 241 | } 242 | 243 | err = validate_args(argc, argv); 244 | if (err == -1) { 245 | usage(argv); 246 | return -1; 247 | } 248 | 249 | conf = argv[1]; 250 | is_test = atoi(argv[2]); 251 | 252 | err = parse_conf(conf); 253 | if (err == -1) { 254 | err_dbg(0, "parse_conf err"); 255 | return -1; 256 | } 257 | 258 | target = find_target(target_version); 259 | if (!target) { 260 | err_dbg(0, "Target %s not found\n"); 261 | return -1; 262 | } 263 | 264 | if (!is_test) { 265 | fprintf(stderr, "qemu_fuzzlib_env_setup ..."); 266 | env = qemu_fuzzlib_env_setup(qemu_fuzzlib_user_name, 267 | qemu_fuzzlib_userid, 268 | qemu_exec_path, bzImage_path, 269 | osImage_path, rsa_path, host_ip, 270 | instance_nr, idle_sec, 271 | instance_memsz, 272 | instance_core, env_workdir, 273 | guest_workdir, guest_user, 274 | guest_sh_file, guest_c_file, 275 | sample_fname, db_file, db_init, 276 | mutate); 277 | if (env) { 278 | fprintf(stderr, "done\n"); 279 | qemu_fuzzlib_env_run(env); 280 | qemu_fuzzlib_env_destroy(env); 281 | } else { 282 | fprintf(stderr, "failed\n"); 283 | } 284 | } else { 285 | do_test(); 286 | } 287 | 288 | cleanup(); 289 | return 0; 290 | } 291 | -------------------------------------------------------------------------------- /ebpf_fuzzer.h: -------------------------------------------------------------------------------- 1 | #ifndef EBPF_FUZZER_H_LEVT96MB 2 | #define EBPF_FUZZER_H_LEVT96MB 3 | 4 | #include 5 | #include "./common.h" 6 | 7 | #define BODY_LEN 0x10000 8 | 9 | struct ebpf_fuzz_target { 10 | struct list_head sibling; 11 | char *target_name; 12 | char *sample_header; 13 | char *sample_tail; 14 | 15 | int (*init)(struct ebpf_fuzz_target *target); 16 | int (*gen_sample_body)(char *b, size_t len, 17 | u32 body1_max); 18 | }; 19 | 20 | extern int insn_print_common(char *buf, size_t buflen, struct bpf_insn *insn, 21 | size_t cnt); 22 | extern int init(void); 23 | extern struct ebpf_fuzz_target *find_target(char *version); 24 | 25 | #ifndef BPF_JMP32 26 | #define BPF_JMP32 0x06 27 | #endif 28 | 29 | /* ArgX, context and stack frame pointer register positions. Note, 30 | * Arg1, Arg2, Arg3, etc are used as argument mappings of function 31 | * calls in BPF_CALL instruction. 32 | */ 33 | #define BPF_REG_ARG1 BPF_REG_1 34 | #define BPF_REG_ARG2 BPF_REG_2 35 | #define BPF_REG_ARG3 BPF_REG_3 36 | #define BPF_REG_ARG4 BPF_REG_4 37 | #define BPF_REG_ARG5 BPF_REG_5 38 | #define BPF_REG_CTX BPF_REG_6 39 | #define BPF_REG_FP BPF_REG_10 40 | 41 | /* Additional register mappings for converted user programs. */ 42 | #define BPF_REG_A BPF_REG_0 43 | #define BPF_REG_X BPF_REG_7 44 | #define BPF_REG_TMP BPF_REG_2 /* scratch reg */ 45 | #define BPF_REG_D BPF_REG_8 /* data, callee-saved */ 46 | #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ 47 | 48 | /* Kernel hidden auxiliary/helper register. */ 49 | #define BPF_REG_AX MAX_BPF_REG 50 | #define MAX_BPF_EXT_REG (MAX_BPF_REG + 1) 51 | #define MAX_BPF_JIT_REG MAX_BPF_EXT_REG 52 | 53 | /* unused opcode to mark special call to bpf_tail_call() helper */ 54 | #define BPF_TAIL_CALL 0xf0 55 | 56 | /* unused opcode to mark call to interpreter with arguments */ 57 | #define BPF_CALL_ARGS 0xe0 58 | 59 | /* As per nm, we expose JITed images as text (code) section for 60 | * kallsyms. That way, tools like perf can find it to match 61 | * addresses. 62 | */ 63 | #define BPF_SYM_ELF_TYPE 't' 64 | 65 | /* BPF program can access up to 512 bytes of stack space. */ 66 | #define MAX_BPF_STACK 512 67 | 68 | /* Helper macros for filter block array initializers. */ 69 | 70 | /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ 71 | 72 | #define BPF_ALU64_REG(OP, DST, SRC) \ 73 | ((struct bpf_insn) { \ 74 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ 75 | .dst_reg = DST, \ 76 | .src_reg = SRC, \ 77 | .off = 0, \ 78 | .imm = 0 }) 79 | 80 | #define BPF_ALU32_REG(OP, DST, SRC) \ 81 | ((struct bpf_insn) { \ 82 | .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ 83 | .dst_reg = DST, \ 84 | .src_reg = SRC, \ 85 | .off = 0, \ 86 | .imm = 0 }) 87 | 88 | /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ 89 | 90 | #define BPF_ALU64_IMM(OP, DST, IMM) \ 91 | ((struct bpf_insn) { \ 92 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ 93 | .dst_reg = DST, \ 94 | .src_reg = 0, \ 95 | .off = 0, \ 96 | .imm = IMM }) 97 | 98 | #define BPF_ALU32_IMM(OP, DST, IMM) \ 99 | ((struct bpf_insn) { \ 100 | .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ 101 | .dst_reg = DST, \ 102 | .src_reg = 0, \ 103 | .off = 0, \ 104 | .imm = IMM }) 105 | 106 | /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ 107 | 108 | #define BPF_ENDIAN(TYPE, DST, LEN) \ 109 | ((struct bpf_insn) { \ 110 | .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ 111 | .dst_reg = DST, \ 112 | .src_reg = 0, \ 113 | .off = 0, \ 114 | .imm = LEN }) 115 | 116 | /* Short form of mov, dst_reg = src_reg */ 117 | 118 | #define BPF_MOV64_REG(DST, SRC) \ 119 | ((struct bpf_insn) { \ 120 | .code = BPF_ALU64 | BPF_MOV | BPF_X, \ 121 | .dst_reg = DST, \ 122 | .src_reg = SRC, \ 123 | .off = 0, \ 124 | .imm = 0 }) 125 | 126 | #define BPF_MOV32_REG(DST, SRC) \ 127 | ((struct bpf_insn) { \ 128 | .code = BPF_ALU | BPF_MOV | BPF_X, \ 129 | .dst_reg = DST, \ 130 | .src_reg = SRC, \ 131 | .off = 0, \ 132 | .imm = 0 }) 133 | 134 | /* Short form of mov, dst_reg = imm32 */ 135 | 136 | #define BPF_MOV64_IMM(DST, IMM) \ 137 | ((struct bpf_insn) { \ 138 | .code = BPF_ALU64 | BPF_MOV | BPF_K, \ 139 | .dst_reg = DST, \ 140 | .src_reg = 0, \ 141 | .off = 0, \ 142 | .imm = IMM }) 143 | 144 | #define BPF_MOV32_IMM(DST, IMM) \ 145 | ((struct bpf_insn) { \ 146 | .code = BPF_ALU | BPF_MOV | BPF_K, \ 147 | .dst_reg = DST, \ 148 | .src_reg = 0, \ 149 | .off = 0, \ 150 | .imm = IMM }) 151 | 152 | /* Special form of mov32, used for doing explicit zero extension on dst. */ 153 | #define BPF_ZEXT_REG(DST) \ 154 | ((struct bpf_insn) { \ 155 | .code = BPF_ALU | BPF_MOV | BPF_X, \ 156 | .dst_reg = DST, \ 157 | .src_reg = DST, \ 158 | .off = 0, \ 159 | .imm = 1 }) 160 | 161 | /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ 162 | #define BPF_LD_IMM64(DST, IMM) \ 163 | BPF_LD_IMM64_RAW(DST, 0, IMM) 164 | 165 | #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ 166 | ((struct bpf_insn) { \ 167 | .code = BPF_LD | BPF_DW | BPF_IMM, \ 168 | .dst_reg = DST, \ 169 | .src_reg = SRC, \ 170 | .off = 0, \ 171 | .imm = (__u32) (IMM) }), \ 172 | ((struct bpf_insn) { \ 173 | .code = 0, /* zero is reserved opcode */ \ 174 | .dst_reg = 0, \ 175 | .src_reg = 0, \ 176 | .off = 0, \ 177 | .imm = ((__u64) (IMM)) >> 32 }) 178 | 179 | /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ 180 | #define BPF_LD_MAP_FD(DST, MAP_FD) \ 181 | BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) 182 | 183 | /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ 184 | 185 | #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ 186 | ((struct bpf_insn) { \ 187 | .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ 188 | .dst_reg = DST, \ 189 | .src_reg = SRC, \ 190 | .off = 0, \ 191 | .imm = IMM }) 192 | 193 | #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ 194 | ((struct bpf_insn) { \ 195 | .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ 196 | .dst_reg = DST, \ 197 | .src_reg = SRC, \ 198 | .off = 0, \ 199 | .imm = IMM }) 200 | 201 | /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ 202 | 203 | #define BPF_LD_ABS(SIZE, IMM) \ 204 | ((struct bpf_insn) { \ 205 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ 206 | .dst_reg = 0, \ 207 | .src_reg = 0, \ 208 | .off = 0, \ 209 | .imm = IMM }) 210 | 211 | /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ 212 | 213 | #define BPF_LD_IND(SIZE, SRC, IMM) \ 214 | ((struct bpf_insn) { \ 215 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ 216 | .dst_reg = 0, \ 217 | .src_reg = SRC, \ 218 | .off = 0, \ 219 | .imm = IMM }) 220 | 221 | /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ 222 | 223 | #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ 224 | ((struct bpf_insn) { \ 225 | .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ 226 | .dst_reg = DST, \ 227 | .src_reg = SRC, \ 228 | .off = OFF, \ 229 | .imm = 0 }) 230 | 231 | /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ 232 | 233 | #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ 234 | ((struct bpf_insn) { \ 235 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ 236 | .dst_reg = DST, \ 237 | .src_reg = SRC, \ 238 | .off = OFF, \ 239 | .imm = 0 }) 240 | 241 | /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ 242 | 243 | #define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ 244 | ((struct bpf_insn) { \ 245 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ 246 | .dst_reg = DST, \ 247 | .src_reg = SRC, \ 248 | .off = OFF, \ 249 | .imm = 0 }) 250 | 251 | /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 252 | 253 | #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ 254 | ((struct bpf_insn) { \ 255 | .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ 256 | .dst_reg = DST, \ 257 | .src_reg = 0, \ 258 | .off = OFF, \ 259 | .imm = IMM }) 260 | 261 | /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ 262 | 263 | #define BPF_JMP_REG(OP, DST, SRC, OFF) \ 264 | ((struct bpf_insn) { \ 265 | .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ 266 | .dst_reg = DST, \ 267 | .src_reg = SRC, \ 268 | .off = OFF, \ 269 | .imm = 0 }) 270 | 271 | /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ 272 | 273 | #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ 274 | ((struct bpf_insn) { \ 275 | .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ 276 | .dst_reg = DST, \ 277 | .src_reg = 0, \ 278 | .off = OFF, \ 279 | .imm = IMM }) 280 | 281 | /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ 282 | 283 | #define BPF_JMP32_REG(OP, DST, SRC, OFF) \ 284 | ((struct bpf_insn) { \ 285 | .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ 286 | .dst_reg = DST, \ 287 | .src_reg = SRC, \ 288 | .off = OFF, \ 289 | .imm = 0 }) 290 | 291 | /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ 292 | 293 | #define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ 294 | ((struct bpf_insn) { \ 295 | .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ 296 | .dst_reg = DST, \ 297 | .src_reg = 0, \ 298 | .off = OFF, \ 299 | .imm = IMM }) 300 | 301 | /* Unconditional jumps, goto pc + off16 */ 302 | 303 | #define BPF_JMP_A(OFF) \ 304 | ((struct bpf_insn) { \ 305 | .code = BPF_JMP | BPF_JA, \ 306 | .dst_reg = 0, \ 307 | .src_reg = 0, \ 308 | .off = OFF, \ 309 | .imm = 0 }) 310 | 311 | /* Relative call */ 312 | 313 | #define BPF_CALL_REL(TGT) \ 314 | ((struct bpf_insn) { \ 315 | .code = BPF_JMP | BPF_CALL, \ 316 | .dst_reg = 0, \ 317 | .src_reg = BPF_PSEUDO_CALL, \ 318 | .off = 0, \ 319 | .imm = TGT }) 320 | 321 | #define BPF_EMIT_CALL(FUNC) \ 322 | ((struct bpf_insn) { \ 323 | .code = BPF_JMP | BPF_CALL, \ 324 | .dst_reg = 0, \ 325 | .src_reg = 0, \ 326 | .off = 0, \ 327 | .imm = ((FUNC) - __bpf_call_base) }) 328 | 329 | /* Raw code statement block */ 330 | 331 | #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ 332 | ((struct bpf_insn) { \ 333 | .code = CODE, \ 334 | .dst_reg = DST, \ 335 | .src_reg = SRC, \ 336 | .off = OFF, \ 337 | .imm = IMM }) 338 | 339 | /* Program exit */ 340 | 341 | #define BPF_EXIT_INSN() \ 342 | ((struct bpf_insn) { \ 343 | .code = BPF_JMP | BPF_EXIT, \ 344 | .dst_reg = 0, \ 345 | .src_reg = 0, \ 346 | .off = 0, \ 347 | .imm = 0 }) 348 | 349 | #define LISTENER_PORT (1337) 350 | #define LISTENER_BACKLOG (0x30) 351 | #define STORAGE_MAP_SIZE (8192) 352 | #define FUZZ_MAP_SIZE (8192) 353 | 354 | #define ARRAY_CNT(arr) (sizeof(arr) / sizeof(arr[0])) 355 | 356 | #define CORRUPT_FD_CONST 10 357 | #define STORAGE_FD_CONST 11 358 | #define CORRUPT_REG BPF_REG_9 359 | #define STORAGE_REG BPF_REG_8 360 | #define SPECIAL_REG BPF_REG_7 361 | #define INVALID_P_REG BPF_REG_6 362 | #define LEAKED_V_REG BPF_REG_5 363 | #define UMAX_REG BPF_REG_4 364 | #define EXTRA0_REG BPF_REG_3 365 | #define EXTRA1_REG BPF_REG_2 366 | #define EXTRA2_REG BPF_REG_1 367 | #define MAGIC_VAL1 0x4142434445464748 368 | #define MAGIC_VAL2 0x494a4b4c4d4e4f40 369 | 370 | #define EBPF_INSN_GET_MAP_ADDR(fd, reg) \ 371 | BPF_MOV64_IMM(BPF_REG_0, 0), \ 372 | BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), \ 373 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \ 374 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), \ 375 | BPF_LD_MAP_FD(BPF_REG_1, fd), \ 376 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), \ 377 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), \ 378 | BPF_EXIT_INSN(), \ 379 | BPF_MOV64_REG((reg), BPF_REG_0), \ 380 | BPF_MOV64_IMM(BPF_REG_0, 0) 381 | 382 | /* TODO */ 383 | struct reg_usage { 384 | int id; 385 | int in_use: 1; 386 | int exclusive: 1; 387 | }; 388 | 389 | static inline int insn_add_one(struct bpf_insn *insns, int *idx, int max, 390 | struct bpf_insn insn) 391 | { 392 | int start = *idx; 393 | int end = start + 1; 394 | if (end > max) { 395 | return -1; 396 | } 397 | 398 | insns[start] = insn; 399 | *idx = end; 400 | return 0; 401 | } 402 | 403 | static inline int insn_add(struct bpf_insn *insns, int *idx, int max, 404 | struct bpf_insn *src_insn, int src_insn_count) 405 | { 406 | int err = 0; 407 | 408 | for (int i = 0; i < src_insn_count; i++) { 409 | err = insn_add_one(insns, idx, max, src_insn[i]); 410 | if (err < 0) 411 | return -1; 412 | } 413 | 414 | return 0; 415 | } 416 | 417 | static inline int insn_get_map_ptr(struct bpf_insn *insns, int *idx, int max, 418 | int fd, int reg) 419 | { 420 | struct bpf_insn map_p[] = { 421 | EBPF_INSN_GET_MAP_ADDR(fd, reg), 422 | }; 423 | 424 | return insn_add(insns, idx, max, map_p, ARRAY_CNT(map_p)); 425 | } 426 | 427 | #define COPY_INSNS(dst, IDX, src) \ 428 | ({\ 429 | int err = 0;\ 430 | struct bpf_insn ____insns[] = {src};\ 431 | err = insn_add(dst, (int *)&IDX, BPF_MAXINSNS, ____insns, ARRAY_CNT(____insns));\ 432 | if (err < 0) \ 433 | return -1;\ 434 | }) 435 | 436 | extern int gen_non_insn(struct bpf_insn *insns, int *idx); 437 | extern int gen_jmp_insn_common(struct bpf_insn *insns, int *idx, int is_imm, 438 | int is_64, int reg0, int reg1, long imm_v, int op); 439 | extern int gen_alu_insn_common(struct bpf_insn *insns, int *idx, int is_imm, 440 | int is_64, int reg0, int reg1, long imm_v, int op); 441 | extern int gen_mov_insn_common(struct bpf_insn *insns, int *idx, int is_imm, 442 | int is_64, int reg0, int reg1, long imm_v); 443 | extern int gen_ld_insn_common(struct bpf_insn *insn, int *idx, int reg0, long imm_v); 444 | 445 | #endif /* end of include guard: EBPF_FUZZER_H_LEVT96MB */ 446 | -------------------------------------------------------------------------------- /insn_print.c: -------------------------------------------------------------------------------- 1 | #include "common.h" 2 | 3 | static char *print_buf; 4 | static size_t print_buflen; 5 | static size_t print_bufidx; 6 | 7 | #define PRINT_TAIL(buf, IDX, cnt) \ 8 | ({\ 9 | if (print_bufidx + strlen(buf) > print_buflen)\ 10 | return -1;\ 11 | memcpy(print_buf + print_bufidx, buf, strlen(buf));\ 12 | print_bufidx += strlen(buf);\ 13 | *idx = IDX + cnt;\ 14 | return 0;\ 15 | }) 16 | 17 | /* BPF_MOV64_REG */ 18 | static int do_mov64_reg_print(struct bpf_insn *insn, size_t cnt, 19 | size_t *idx) 20 | { 21 | char buf[0x100]; 22 | char *i = "BPF_MOV64_REG"; 23 | size_t _idx = *idx; 24 | struct bpf_insn _insn0 = insn[_idx]; 25 | snprintf(buf, 0x100, "%s(%s, %s),\n", i, 26 | bpf_reg_str[_insn0.dst_reg], 27 | bpf_reg_str[_insn0.src_reg]); 28 | 29 | PRINT_TAIL(buf, _idx, 1); 30 | } 31 | 32 | static int do_mov64_imm_print(struct bpf_insn *insn, size_t cnt, 33 | size_t *idx) 34 | { 35 | char buf[0x100]; 36 | char *i = "BPF_MOV64_IMM"; 37 | size_t _idx = *idx; 38 | struct bpf_insn _insn0 = insn[_idx]; 39 | snprintf(buf, 0x100, "%s(%s, 0x%x),\n", i, 40 | bpf_reg_str[_insn0.dst_reg], 41 | _insn0.imm); 42 | 43 | PRINT_TAIL(buf, _idx, 1); 44 | } 45 | 46 | static int do_alu64_reg_print(struct bpf_insn *insn, size_t cnt, 47 | size_t *idx) 48 | { 49 | char buf[0x100]; 50 | char *i = "BPF_ALU64_REG"; 51 | size_t _idx = *idx; 52 | struct bpf_insn _insn0 = insn[_idx]; 53 | snprintf(buf, 0x100, "%s(%s, %s, %s),\n", i, 54 | bpf_alu_op_str[BPF_OP(_insn0.code)], 55 | bpf_reg_str[_insn0.dst_reg], 56 | bpf_reg_str[_insn0.src_reg]); 57 | 58 | PRINT_TAIL(buf, _idx, 1); 59 | } 60 | 61 | static int do_alu64_imm_print(struct bpf_insn *insn, size_t cnt, 62 | size_t *idx) 63 | { 64 | char buf[0x100]; 65 | char *i = "BPF_ALU64_IMM"; 66 | size_t _idx = *idx; 67 | struct bpf_insn _insn0 = insn[_idx]; 68 | snprintf(buf, 0x100, "%s(%s, %s, 0x%x),\n", i, 69 | bpf_alu_op_str[BPF_OP(_insn0.code)], 70 | bpf_reg_str[_insn0.dst_reg], 71 | _insn0.imm); 72 | 73 | PRINT_TAIL(buf, _idx, 1); 74 | } 75 | 76 | static int do_alu64_print(struct bpf_insn *insn, size_t cnt, size_t *idx) 77 | { 78 | u8 code = insn[*idx].code; 79 | if (BPF_OP(code) == BPF_MOV) { 80 | if (BPF_SRC(code) == BPF_X) { 81 | return do_mov64_reg_print(insn, cnt, idx); 82 | } else if (BPF_SRC(code) == BPF_K) { 83 | return do_mov64_imm_print(insn, cnt, idx); 84 | } else { 85 | fprintf(stderr, "%d\n", __LINE__); 86 | return -1; 87 | } 88 | } else if (BPF_SRC(code) == BPF_X) { 89 | return do_alu64_reg_print(insn, cnt, idx); 90 | } else if (BPF_SRC(code) == BPF_K) { 91 | return do_alu64_imm_print(insn, cnt, idx); 92 | } else { 93 | fprintf(stderr, "%d\n", __LINE__); 94 | return -1; 95 | } 96 | } 97 | 98 | static int do_end_print(struct bpf_insn *insn, size_t cnt, size_t *idx) 99 | { 100 | char buf[0x100]; 101 | char *i = "BPF_ENDIAN"; 102 | size_t _idx = *idx; 103 | struct bpf_insn _insn0 = insn[_idx]; 104 | snprintf(buf, 0x100, "%s(%s, %s, 0x%x),\n", i, 105 | bpf_alu_op_str[BPF_SRC(_insn0.code)], 106 | bpf_reg_str[_insn0.dst_reg], 107 | _insn0.imm); 108 | 109 | PRINT_TAIL(buf, _idx, 1); 110 | } 111 | 112 | static int do_mov32_reg_print(struct bpf_insn *insn, size_t cnt, 113 | size_t *idx) 114 | { 115 | char buf[0x100]; 116 | char *i = "BPF_MOV32_REG"; 117 | size_t _idx = *idx; 118 | struct bpf_insn _insn0 = insn[_idx]; 119 | snprintf(buf, 0x100, "%s(%s, %s),\n", i, 120 | bpf_reg_str[_insn0.dst_reg], 121 | bpf_reg_str[_insn0.src_reg]); 122 | 123 | PRINT_TAIL(buf, _idx, 1); 124 | } 125 | 126 | static int do_mov32_imm_print(struct bpf_insn *insn, size_t cnt, 127 | size_t *idx) 128 | { 129 | char buf[0x100]; 130 | char *i = "BPF_MOV32_IMM"; 131 | size_t _idx = *idx; 132 | struct bpf_insn _insn0 = insn[_idx]; 133 | snprintf(buf, 0x100, "%s(%s, 0x%x),\n", i, 134 | bpf_reg_str[_insn0.dst_reg], 135 | _insn0.imm); 136 | 137 | PRINT_TAIL(buf, _idx, 1); 138 | } 139 | 140 | static int do_alu32_reg_print(struct bpf_insn *insn, size_t cnt, 141 | size_t *idx) 142 | { 143 | char buf[0x100]; 144 | char *i = "BPF_ALU32_REG"; 145 | size_t _idx = *idx; 146 | struct bpf_insn _insn0 = insn[_idx]; 147 | snprintf(buf, 0x100, "%s(%s, %s, %s),\n", i, 148 | bpf_alu_op_str[BPF_OP(_insn0.code)], 149 | bpf_reg_str[_insn0.dst_reg], 150 | bpf_reg_str[_insn0.src_reg]); 151 | 152 | PRINT_TAIL(buf, _idx, 1); 153 | } 154 | 155 | static int do_alu32_imm_print(struct bpf_insn *insn, size_t cnt, 156 | size_t *idx) 157 | { 158 | char buf[0x100]; 159 | char *i = "BPF_ALU32_IMM"; 160 | size_t _idx = *idx; 161 | struct bpf_insn _insn0 = insn[_idx]; 162 | snprintf(buf, 0x100, "%s(%s, %s, 0x%x),\n", i, 163 | bpf_alu_op_str[BPF_OP(_insn0.code)], 164 | bpf_reg_str[_insn0.dst_reg], 165 | _insn0.imm); 166 | 167 | PRINT_TAIL(buf, _idx, 1); 168 | } 169 | 170 | static int do_alu32_print(struct bpf_insn *insn, size_t cnt, size_t *idx) 171 | { 172 | u8 code = insn[*idx].code; 173 | if (BPF_OP(code) == BPF_END) { 174 | return do_end_print(insn, cnt, idx); 175 | } else if (BPF_OP(code) == BPF_MOV) { 176 | if (BPF_SRC(code) == BPF_X) { 177 | return do_mov32_reg_print(insn, cnt, idx); 178 | } else if (BPF_SRC(code) == BPF_K) { 179 | return do_mov32_imm_print(insn, cnt, idx); 180 | } else { 181 | fprintf(stderr, "%d\n", __LINE__); 182 | return -1; 183 | } 184 | } else if (BPF_SRC(code) == BPF_X) { 185 | return do_alu32_reg_print(insn, cnt, idx); 186 | } else if (BPF_SRC(code) == BPF_K) { 187 | return do_alu32_imm_print(insn, cnt, idx); 188 | } else { 189 | fprintf(stderr, "%d\n", __LINE__); 190 | return -1; 191 | } 192 | } 193 | 194 | static int do_ld_map_fd_print(struct bpf_insn *insn, size_t cnt, 195 | size_t *idx) 196 | { 197 | /* XXX: no check */ 198 | char buf[0x100]; 199 | char *i = "BPF_LD_MAP_FD"; 200 | size_t _idx = *idx; 201 | struct bpf_insn _insn0 = insn[_idx]; 202 | struct bpf_insn _insn1 = insn[_idx+1]; 203 | snprintf(buf, 0x100, "%s(%s, 0x%llx),\n", i, 204 | bpf_reg_str[_insn0.dst_reg], 205 | (u64)_insn1.imm | (u64)_insn0.imm); 206 | 207 | PRINT_TAIL(buf, _idx, 2); 208 | } 209 | 210 | static int do_ld_imm64_print(struct bpf_insn *insn, size_t cnt, 211 | size_t *idx) 212 | { 213 | char buf[0x100]; 214 | char *i = "BPF_LD_IMM64"; 215 | size_t _idx = *idx; 216 | struct bpf_insn _insn0 = insn[_idx]; 217 | struct bpf_insn _insn1 = insn[_idx+1]; 218 | snprintf(buf, 0x100, "%s(%s, 0x%llx),\n", i, 219 | bpf_reg_str[_insn0.dst_reg], 220 | (((u64)_insn1.imm) << 32) | (u64)_insn0.imm); 221 | 222 | PRINT_TAIL(buf, _idx, 2); 223 | } 224 | 225 | static int do_ld_imm64_raw_print(struct bpf_insn *insn, 226 | size_t cnt, size_t *idx) 227 | { 228 | if (insn[*idx].src_reg == BPF_PSEUDO_MAP_FD) { 229 | return do_ld_map_fd_print(insn, cnt, idx); 230 | } else if (insn[*idx].src_reg == 0) { 231 | return do_ld_imm64_print(insn, cnt, idx); 232 | } else { 233 | fprintf(stderr, "%d\n", __LINE__); 234 | return -1; 235 | } 236 | } 237 | 238 | static int do_ld_abs_print(struct bpf_insn *insn, size_t cnt, 239 | size_t *idx) 240 | { 241 | char buf[0x100]; 242 | char *i = "BPF_LD_ABS"; 243 | size_t _idx = *idx; 244 | struct bpf_insn _insn0 = insn[_idx]; 245 | snprintf(buf, 0x100, "%s(%s, 0x%x),\n", i, 246 | bpf_size_str[BPF_SIZE(_insn0.code)], 247 | _insn0.imm); 248 | 249 | PRINT_TAIL(buf, _idx, 1); 250 | } 251 | 252 | static int do_ld_ind_print(struct bpf_insn *insn, size_t cnt, 253 | size_t *idx) 254 | { 255 | char buf[0x100]; 256 | char *i = "BPF_LD_IND"; 257 | size_t _idx = *idx; 258 | struct bpf_insn _insn0 = insn[_idx]; 259 | snprintf(buf, 0x100, "%s(%s, %s, 0x%x),\n", i, 260 | bpf_size_str[BPF_SIZE(_insn0.code)], 261 | bpf_reg_str[_insn0.src_reg], 262 | _insn0.imm); 263 | 264 | PRINT_TAIL(buf, _idx, 1); 265 | } 266 | 267 | static int do_ld_print(struct bpf_insn *insn, size_t cnt, size_t *idx) 268 | { 269 | u8 code = insn[*idx].code; 270 | if ((BPF_SIZE(code) == BPF_DW) && (BPF_MODE(code) == BPF_IMM)) { 271 | return do_ld_imm64_raw_print(insn, cnt, idx); 272 | } else if (BPF_MODE(code) == BPF_ABS) { 273 | return do_ld_abs_print(insn, cnt, idx); 274 | } else if (BPF_MODE(code) == BPF_IND) { 275 | return do_ld_ind_print(insn, cnt, idx); 276 | } else { 277 | fprintf(stderr, "%d\n", __LINE__); 278 | return -1; 279 | } 280 | } 281 | 282 | static int do_ldx_mem_print(struct bpf_insn *insn, size_t cnt, 283 | size_t *idx) 284 | { 285 | char buf[0x100]; 286 | char *i = "BPF_LDX_MEM"; 287 | size_t _idx = *idx; 288 | struct bpf_insn _insn0 = insn[_idx]; 289 | snprintf(buf, 0x100, "%s(%s, %s, %s, %hd),\n", i, 290 | bpf_size_str[BPF_SIZE(_insn0.code)], 291 | bpf_reg_str[_insn0.dst_reg], 292 | bpf_reg_str[_insn0.src_reg], 293 | _insn0.off); 294 | 295 | PRINT_TAIL(buf, _idx, 1); 296 | } 297 | 298 | static int do_ldx_print(struct bpf_insn *insn, size_t cnt, size_t *idx) 299 | { 300 | u8 code = insn[*idx].code; 301 | if (BPF_MODE(code) == BPF_MEM) { 302 | return do_ldx_mem_print(insn, cnt, idx); 303 | } else { 304 | fprintf(stderr, "%d\n", __LINE__); 305 | return -1; 306 | } 307 | } 308 | 309 | static int do_stx_mem_print(struct bpf_insn *insn, size_t cnt, 310 | size_t *idx) 311 | { 312 | char buf[0x100]; 313 | char *i = "BPF_STX_MEM"; 314 | size_t _idx = *idx; 315 | struct bpf_insn _insn0 = insn[_idx]; 316 | snprintf(buf, 0x100, "%s(%s, %s, %s, %hd),\n", i, 317 | bpf_size_str[BPF_SIZE(_insn0.code)], 318 | bpf_reg_str[_insn0.dst_reg], 319 | bpf_reg_str[_insn0.src_reg], 320 | _insn0.off); 321 | 322 | PRINT_TAIL(buf, _idx, 1); 323 | } 324 | 325 | static int do_stx_xadd_print(struct bpf_insn *insn, size_t cnt, 326 | size_t *idx) 327 | { 328 | char buf[0x100]; 329 | char *i = "BPF_STX_XADD"; 330 | size_t _idx = *idx; 331 | struct bpf_insn _insn0 = insn[_idx]; 332 | snprintf(buf, 0x100, "%s(%s, %s, %s, %hd),\n", i, 333 | bpf_size_str[BPF_SIZE(_insn0.code)], 334 | bpf_reg_str[_insn0.dst_reg], 335 | bpf_reg_str[_insn0.src_reg], 336 | _insn0.off); 337 | 338 | PRINT_TAIL(buf, _idx, 1); 339 | } 340 | 341 | static int do_stx_print(struct bpf_insn *insn, size_t cnt, size_t *idx) 342 | { 343 | u8 code = insn[*idx].code; 344 | if (BPF_MODE(code) == BPF_MEM) { 345 | return do_stx_mem_print(insn, cnt, idx); 346 | } else if (BPF_MODE(code) == BPF_XADD) { 347 | return do_stx_xadd_print(insn, cnt, idx); 348 | } else { 349 | fprintf(stderr, "%d\n", __LINE__); 350 | return -1; 351 | } 352 | } 353 | 354 | static int do_st_mem_print(struct bpf_insn *insn, size_t cnt, 355 | size_t *idx) 356 | { 357 | char buf[0x100]; 358 | char *i = "BPF_ST_MEM"; 359 | size_t _idx = *idx; 360 | struct bpf_insn _insn0 = insn[_idx]; 361 | snprintf(buf, 0x100, "%s(%s, %s, %hd, 0x%x),\n", i, 362 | bpf_size_str[BPF_SIZE(_insn0.code)], 363 | bpf_reg_str[_insn0.dst_reg], 364 | _insn0.off, 365 | _insn0.imm); 366 | 367 | PRINT_TAIL(buf, _idx, 1); 368 | } 369 | 370 | static int do_st_print(struct bpf_insn *insn, size_t cnt, size_t *idx) 371 | { 372 | u8 code = insn[*idx].code; 373 | if (BPF_MODE(code) == BPF_MEM) { 374 | return do_st_mem_print(insn, cnt, idx); 375 | } else { 376 | fprintf(stderr, "%d\n", __LINE__); 377 | return -1; 378 | } 379 | } 380 | 381 | static int do_jmp_reg_print(struct bpf_insn *insn, size_t cnt, 382 | size_t *idx) 383 | { 384 | char buf[0x100]; 385 | char *i = "BPF_JMP_REG"; 386 | size_t _idx = *idx; 387 | struct bpf_insn _insn0 = insn[_idx]; 388 | snprintf(buf, 0x100, "%s(%s, %s, %s, %hd),\n", i, 389 | bpf_jmp_op_str[BPF_OP(_insn0.code)], 390 | bpf_reg_str[_insn0.dst_reg], 391 | bpf_reg_str[_insn0.src_reg], 392 | _insn0.off); 393 | 394 | PRINT_TAIL(buf, _idx, 1); 395 | } 396 | 397 | static int do_jmp_imm_print(struct bpf_insn *insn, size_t cnt, 398 | size_t *idx) 399 | { 400 | char buf[0x100]; 401 | char *i = "BPF_JMP_IMM"; 402 | size_t _idx = *idx; 403 | struct bpf_insn _insn0 = insn[_idx]; 404 | snprintf(buf, 0x100, "%s(%s, %s, 0x%x, %hd),\n", i, 405 | bpf_jmp_op_str[BPF_OP(_insn0.code)], 406 | bpf_reg_str[_insn0.dst_reg], 407 | _insn0.imm, 408 | _insn0.off); 409 | 410 | PRINT_TAIL(buf, _idx, 1); 411 | } 412 | 413 | static int do_jmp_a_print(struct bpf_insn *insn, size_t cnt, 414 | size_t *idx) 415 | { 416 | char buf[0x100]; 417 | char *i = "BPF_JMP_A"; 418 | size_t _idx = *idx; 419 | struct bpf_insn _insn0 = insn[_idx]; 420 | snprintf(buf, 0x100, "%s(%hd),\n", i, 421 | _insn0.off); 422 | 423 | PRINT_TAIL(buf, _idx, 1); 424 | } 425 | 426 | static int do_call_rel_print(struct bpf_insn *insn, size_t cnt, 427 | size_t *idx) 428 | { 429 | char buf[0x100]; 430 | char *i = "BPF_CALL_REL"; 431 | size_t _idx = *idx; 432 | struct bpf_insn _insn0 = insn[_idx]; 433 | snprintf(buf, 0x100, "%s(0x%x),\n", i, 434 | _insn0.imm); 435 | 436 | PRINT_TAIL(buf, _idx, 1); 437 | } 438 | 439 | static int do_emit_call_print(struct bpf_insn *insn, size_t cnt, 440 | size_t *idx) 441 | { 442 | char buf[0x100]; 443 | char *i = "BPF_EMIT_CALL"; 444 | size_t _idx = *idx; 445 | struct bpf_insn _insn0 = insn[_idx]; 446 | snprintf(buf, 0x100, "%s(0x%x),\n", i, 447 | _insn0.imm); 448 | 449 | PRINT_TAIL(buf, _idx, 1); 450 | } 451 | 452 | static int do_call_print(struct bpf_insn *insn, size_t cnt, 453 | size_t *idx) 454 | { 455 | if (insn[*idx].src_reg == BPF_PSEUDO_CALL) { 456 | return do_call_rel_print(insn, cnt, idx); 457 | } else if (insn[*idx].src_reg == 0) { 458 | return do_emit_call_print(insn, cnt, idx); 459 | } else { 460 | fprintf(stderr, "%d\n", __LINE__); 461 | return -1; 462 | } 463 | } 464 | 465 | static int do_exit_print(struct bpf_insn *insn, size_t cnt, size_t *idx) 466 | { 467 | char buf[0x100]; 468 | char *i = "BPF_EXIT_INSN"; 469 | size_t _idx = *idx; 470 | snprintf(buf, 0x100, "%s(),\n", i); 471 | 472 | PRINT_TAIL(buf, _idx, 1); 473 | } 474 | 475 | static int do_jmp64_print(struct bpf_insn *insn, size_t cnt, size_t *idx) 476 | { 477 | u8 code = insn[*idx].code; 478 | if (BPF_OP(code) == BPF_CALL) { 479 | return do_call_print(insn, cnt, idx); 480 | } else if (BPF_OP(code) == BPF_EXIT) { 481 | return do_exit_print(insn, cnt, idx); 482 | } else if (BPF_SRC(code) == BPF_X) { 483 | return do_jmp_reg_print(insn, cnt, idx); 484 | } else if (BPF_SRC(code) == BPF_K) { 485 | return do_jmp_imm_print(insn, cnt, idx); 486 | } else if (BPF_OP(code) == BPF_JA) { 487 | return do_jmp_a_print(insn, cnt, idx); 488 | } else { 489 | fprintf(stderr, "%d\n", __LINE__); 490 | return -1; 491 | } 492 | } 493 | 494 | static int do_jmp32_reg_print(struct bpf_insn *insn, size_t cnt, 495 | size_t *idx) 496 | { 497 | char buf[0x100]; 498 | char *i = "BPF_JMP32_REG"; 499 | size_t _idx = *idx; 500 | struct bpf_insn _insn0 = insn[_idx]; 501 | snprintf(buf, 0x100, "%s(%s, %s, %s, %hd),\n", i, 502 | bpf_jmp_op_str[BPF_OP(_insn0.code)], 503 | bpf_reg_str[_insn0.dst_reg], 504 | bpf_reg_str[_insn0.src_reg], 505 | _insn0.off); 506 | 507 | PRINT_TAIL(buf, _idx, 1); 508 | } 509 | 510 | static int do_jmp32_imm_print(struct bpf_insn *insn, size_t cnt, 511 | size_t *idx) 512 | { 513 | char buf[0x100]; 514 | char *i = "BPF_JMP32_IMM"; 515 | size_t _idx = *idx; 516 | struct bpf_insn _insn0 = insn[_idx]; 517 | snprintf(buf, 0x100, "%s(%s, %s, 0x%x, %hd),\n", i, 518 | bpf_jmp_op_str[BPF_OP(_insn0.code)], 519 | bpf_reg_str[_insn0.dst_reg], 520 | _insn0.imm, 521 | _insn0.off); 522 | 523 | PRINT_TAIL(buf, _idx, 1); 524 | } 525 | 526 | static int do_jmp32_print(struct bpf_insn *insn, size_t cnt, size_t *idx) 527 | { 528 | u8 code = insn[*idx].code; 529 | if (BPF_SRC(code) == BPF_X) { 530 | return do_jmp32_reg_print(insn, cnt, idx); 531 | } else if (BPF_SRC(code) == BPF_K) { 532 | return do_jmp32_imm_print(insn, cnt, idx); 533 | } else { 534 | fprintf(stderr, "%d\n", __LINE__); 535 | return -1; 536 | } 537 | } 538 | 539 | static int do_insn_print(struct bpf_insn *insn, size_t cnt) 540 | { 541 | size_t idx = 0; 542 | int err = 0; 543 | u8 code; 544 | 545 | while (idx < cnt) { 546 | code = insn[idx].code; 547 | if (BPF_CLASS(code) == BPF_ALU64) { 548 | err = do_alu64_print(insn, cnt, &idx); 549 | } else if (BPF_CLASS(code) == BPF_ALU) { 550 | err = do_alu32_print(insn, cnt, &idx); 551 | } else if (BPF_CLASS(code) == BPF_LD) { 552 | err = do_ld_print(insn, cnt, &idx); 553 | } else if (BPF_CLASS(code) == BPF_LDX) { 554 | err = do_ldx_print(insn, cnt, &idx); 555 | } else if (BPF_CLASS(code) == BPF_STX) { 556 | err = do_stx_print(insn, cnt, &idx); 557 | } else if (BPF_CLASS(code) == BPF_ST) { 558 | err = do_st_print(insn, cnt, &idx); 559 | } else if (BPF_CLASS(code) == BPF_JMP) { 560 | err = do_jmp64_print(insn, cnt, &idx); 561 | } else if (BPF_CLASS(code) == BPF_JMP32) { 562 | err = do_jmp32_print(insn, cnt, &idx); 563 | } else { 564 | fprintf(stderr, "%d\n", __LINE__); 565 | err = -1; 566 | } 567 | 568 | if (err == -1) 569 | break; 570 | } 571 | 572 | return err; 573 | } 574 | 575 | int insn_print_common(char *buf, size_t buflen, struct bpf_insn *insn, 576 | size_t cnt) 577 | { 578 | /* choose the right handler to print the insn */ 579 | print_buf = buf; 580 | print_buflen = buflen; 581 | print_bufidx = 0; 582 | 583 | return do_insn_print(insn, cnt); 584 | } 585 | -------------------------------------------------------------------------------- /gen-insn.c: -------------------------------------------------------------------------------- 1 | #include "ebpf_fuzzer.h" 2 | 3 | static int invalid_reg_used = 0; 4 | static int extra0_reg_used = 0; 5 | static __maybe_unused struct reg_usage regs[MAX_BPF_REG]; /* TODO */ 6 | 7 | enum jmp_ops { 8 | JMP_OPS_MIN, 9 | JMP_OPS_JNE, 10 | JMP_OPS_JLT, 11 | JMP_OPS_JLE, 12 | JMP_OPS_JSGT, 13 | JMP_OPS_JSGE, 14 | JMP_OPS_JSLT, 15 | JMP_OPS_JSLE, 16 | JMP_OPS_JA, 17 | JMP_OPS_JEQ, 18 | JMP_OPS_JGT, 19 | JMP_OPS_JGE, 20 | JMP_OPS_JSET, 21 | JMP_OPS_MAX, 22 | }; 23 | 24 | int jmp_ops_codes[] = { 25 | [JMP_OPS_JNE] = BPF_JNE, 26 | [JMP_OPS_JLT] = BPF_JLT, 27 | [JMP_OPS_JLE] = BPF_JLE, 28 | [JMP_OPS_JSGT] = BPF_JSGT, 29 | [JMP_OPS_JSGE] = BPF_JSGE, 30 | [JMP_OPS_JSLT] = BPF_JSLT, 31 | [JMP_OPS_JSLE] = BPF_JSLE, 32 | [JMP_OPS_JA] = BPF_JA, 33 | [JMP_OPS_JEQ] = BPF_JEQ, 34 | [JMP_OPS_JGT] = BPF_JGT, 35 | [JMP_OPS_JGE] = BPF_JGE, 36 | [JMP_OPS_JSET] = BPF_JSET, 37 | }; 38 | 39 | enum alu_ops { 40 | ALU_OPS_MIN, 41 | ALU_OPS_ADD, 42 | ALU_OPS_SUB, 43 | ALU_OPS_MUL, 44 | ALU_OPS_DIV, 45 | ALU_OPS_OR, 46 | ALU_OPS_AND, 47 | ALU_OPS_LSH, 48 | ALU_OPS_RSH, 49 | ALU_OPS_NEG, 50 | ALU_OPS_MOD, 51 | ALU_OPS_XOR, 52 | ALU_OPS_MAX, 53 | }; 54 | 55 | int alu_ops_codes[] = { 56 | [ALU_OPS_ADD] = BPF_ADD, 57 | [ALU_OPS_SUB] = BPF_SUB, 58 | [ALU_OPS_MUL] = BPF_MUL, 59 | [ALU_OPS_DIV] = BPF_DIV, 60 | [ALU_OPS_OR] = BPF_OR, 61 | [ALU_OPS_AND] = BPF_AND, 62 | [ALU_OPS_LSH] = BPF_LSH, 63 | [ALU_OPS_RSH] = BPF_RSH, 64 | [ALU_OPS_NEG] = BPF_NEG, 65 | [ALU_OPS_MOD] = BPF_MOD, 66 | [ALU_OPS_XOR] = BPF_XOR, 67 | }; 68 | 69 | enum insn_generator_idx { 70 | INSN_GENERATOR_MIN, 71 | INSN_GENERATOR_JMP, 72 | INSN_GENERATOR_ALU, 73 | INSN_GENERATOR_MOV, 74 | INSN_GENERATOR_LD, 75 | INSN_GENERATOR_NON, 76 | INSN_GENERATOR_MAX, 77 | }; 78 | 79 | static inline int rand_ops(int *arr, int min, int max) 80 | { 81 | int idx = rand_range(min, max); 82 | return arr[idx]; 83 | } 84 | 85 | static inline int rand_jmp_ops(void) 86 | { 87 | return rand_ops(jmp_ops_codes, JMP_OPS_MIN+1, JMP_OPS_MAX); 88 | } 89 | 90 | static inline int rand_alu_ops(void) 91 | { 92 | return rand_ops(alu_ops_codes, ALU_OPS_MIN+1, ALU_OPS_MAX); 93 | } 94 | 95 | typedef int (*insn_generator)(struct bpf_insn *insns, int *idx); 96 | static int gen_jmp_insn(struct bpf_insn *insns, int *idx); 97 | static int gen_alu_insn(struct bpf_insn *insns, int *idx); 98 | static int gen_mov_insn(struct bpf_insn *insns, int *idx); 99 | static int gen_ld_insn(struct bpf_insn *insns, int *idx); 100 | static int gen_last_insn(struct bpf_insn *insns, int *idx); 101 | static insn_generator generators[] = { 102 | [INSN_GENERATOR_JMP] = gen_jmp_insn, 103 | [INSN_GENERATOR_ALU] = gen_alu_insn, 104 | [INSN_GENERATOR_MOV] = gen_mov_insn, 105 | [INSN_GENERATOR_LD] = gen_ld_insn, 106 | [INSN_GENERATOR_NON] = gen_non_insn, 107 | [INSN_GENERATOR_MAX] = gen_last_insn, 108 | }; 109 | 110 | static int gen_body0_min_bound(struct bpf_insn *insns, int *idx, long minv, 111 | int _signed, int _64bit) 112 | { 113 | int __idx = *idx; 114 | int reg = SPECIAL_REG; 115 | 116 | COPY_INSNS(insns, __idx, BPF_MOV64_IMM(BPF_REG_0, 0)); 117 | if (_64bit) { 118 | if (_signed) 119 | COPY_INSNS(insns, __idx, BPF_JMP_IMM(BPF_JSGT, reg, minv, 1)); 120 | else 121 | COPY_INSNS(insns, __idx, BPF_JMP_IMM(BPF_JGT, reg, minv, 1)); 122 | } else { 123 | if (_signed) 124 | COPY_INSNS(insns, __idx, BPF_JMP32_IMM(BPF_JSGT, reg, minv, 1)); 125 | else 126 | COPY_INSNS(insns, __idx, BPF_JMP32_IMM(BPF_JGT, reg, minv, 1)); 127 | } 128 | COPY_INSNS(insns, __idx, BPF_EXIT_INSN()); 129 | 130 | *idx = __idx; 131 | return 0; 132 | } 133 | 134 | static int gen_body0_max_bound(struct bpf_insn *insns, int *idx, long maxv, 135 | int _signed, int _64bit) 136 | { 137 | int __idx = *idx; 138 | int reg = SPECIAL_REG; 139 | 140 | COPY_INSNS(insns, __idx, BPF_MOV64_IMM(BPF_REG_0, 0)); 141 | if (s_rand32() % 2) 142 | COPY_INSNS(insns, __idx, BPF_MOV64_IMM(UMAX_REG, maxv)); 143 | else 144 | COPY_INSNS(insns, __idx, BPF_MOV32_IMM(UMAX_REG, maxv)); 145 | 146 | if (_64bit) { 147 | if (_signed) 148 | COPY_INSNS(insns, __idx, BPF_JMP_REG(BPF_JSLT, reg, UMAX_REG, 1)); 149 | else 150 | COPY_INSNS(insns, __idx, BPF_JMP_REG(BPF_JLT, reg, UMAX_REG, 1)); 151 | } else { 152 | if (_signed) 153 | COPY_INSNS(insns, __idx, BPF_JMP32_REG(BPF_JSLT, reg, UMAX_REG, 1)); 154 | else 155 | COPY_INSNS(insns, __idx, BPF_JMP32_REG(BPF_JLT, reg, UMAX_REG, 1)); 156 | } 157 | COPY_INSNS(insns, __idx, BPF_EXIT_INSN()); 158 | 159 | *idx = __idx; 160 | return 0; 161 | } 162 | 163 | static long gen_min_val(void) 164 | { 165 | long v; 166 | v = rand_range(-FUZZ_MAP_SIZE, FUZZ_MAP_SIZE+1); 167 | 168 | return v; 169 | } 170 | 171 | static long gen_max_val(void) 172 | { 173 | long v; 174 | static unsigned range = 0x100; 175 | static unsigned max_bits = 64; 176 | int bits = rand_range(0, max_bits); 177 | int range_v = rand_range(-range, range+1); 178 | v = (1ULL< max_body_insn) { 571 | err = generators[INSN_GENERATOR_MAX](insns, &idx); 572 | if (err < 0) 573 | return -1; 574 | break; 575 | } 576 | } 577 | 578 | *cnt = idx; 579 | return 0; 580 | } 581 | 582 | static int insn_body(struct bpf_insn *insns, int *idx, int max, 583 | unsigned long special_val, u32 body1_max) 584 | { 585 | int err = 0; 586 | 587 | struct bpf_insn header[BPF_MAXINSNS]; 588 | struct bpf_insn body[BPF_MAXINSNS]; 589 | int header_cnt = 0; 590 | int body_cnt = 0; 591 | 592 | err = gen_body0(header, &header_cnt, special_val); 593 | if (err < 0) { 594 | return -1; 595 | } 596 | 597 | err = gen_body1(body, &body_cnt, body1_max); 598 | if (err < 0) { 599 | return -1; 600 | } 601 | 602 | unsigned insn_cnt = header_cnt + body_cnt; 603 | struct bpf_insn this_insns[insn_cnt]; 604 | for (unsigned i = 0; i < header_cnt; i++) { 605 | this_insns[i] = header[i]; 606 | } 607 | for (unsigned i = 0; i < body_cnt; i++) { 608 | this_insns[i+header_cnt] = body[i]; 609 | } 610 | 611 | err = insn_add(insns, idx, max, this_insns, insn_cnt); 612 | if (err < 0) { 613 | return -1; 614 | } 615 | 616 | return 0; 617 | } 618 | 619 | static int insn_alu_map_ptr(struct bpf_insn *insns, int *idx, int max) 620 | { 621 | int err = 0; 622 | 623 | struct bpf_insn this_insns[] = { 624 | BPF_ALU64_REG(BPF_SUB, CORRUPT_REG, INVALID_P_REG), 625 | }; 626 | 627 | err = insn_add(insns, idx, max, this_insns, ARRAY_CNT(this_insns)); 628 | if (err < 0) { 629 | return -1; 630 | } 631 | 632 | return 0; 633 | } 634 | 635 | static int insn_write_mem(struct bpf_insn *insns, int *idx, int max) 636 | { 637 | int err = 0; 638 | struct bpf_insn this_insns[] = { 639 | BPF_LDX_MEM(BPF_DW, LEAKED_V_REG, CORRUPT_REG, 0), 640 | BPF_STX_MEM(BPF_DW, STORAGE_REG, LEAKED_V_REG, 8), 641 | BPF_MOV64_IMM(BPF_REG_0, 1), 642 | }; 643 | 644 | err = insn_add(insns, idx, max, this_insns, ARRAY_CNT(this_insns)); 645 | if (err < 0) { 646 | return -1; 647 | } 648 | 649 | return 0; 650 | } 651 | 652 | static int insn_exit(struct bpf_insn *insns, int *idx, int max) 653 | { 654 | struct bpf_insn this_insns[] = { 655 | BPF_EXIT_INSN(), 656 | }; 657 | 658 | return insn_add(insns, idx, max, this_insns, ARRAY_CNT(this_insns)); 659 | } 660 | 661 | static char *sample_header = "" 662 | "#include \n" 663 | "#include \n" 664 | "#include \n" 665 | "#include \n" 666 | "#include \n" 667 | "#include \n" 668 | "#include \n" 669 | "#include \n" 670 | "#include \n" 671 | "#include \n" 672 | "#include \n" 673 | "#include \n" 674 | "#include \n" 675 | "#include \n" 676 | "#include \n" 677 | "#include \n" 678 | "#include \n" 679 | "#include \n" 680 | "#include \n" 681 | "#include \n" 682 | "#include \n" 683 | "\n" 684 | "enum qemu_fuzzlib_inst_res {\n" 685 | " QEMU_FUZZLIB_INST_INVALID = -1,\n" 686 | " QEMU_FUZZLIB_INST_NOT_TESTED = 0,\n" 687 | " QEMU_FUZZLIB_INST_VALID,\n" 688 | " QEMU_FUZZLIB_INST_BOOM,\n" 689 | "};\n" 690 | "\n" 691 | "typedef __s8 s8;\n" 692 | "typedef __s16 s16;\n" 693 | "typedef __s32 s32;\n" 694 | "typedef __s64 s64;\n" 695 | "typedef __u8 u8;\n" 696 | "typedef __u16 u16;\n" 697 | "typedef __u32 u32;\n" 698 | "typedef __u64 u64;\n" 699 | "\n" 700 | "struct xmsg {\n" 701 | " unsigned long special_value;\n" 702 | " unsigned long insn_cnt;\n" 703 | " struct bpf_insn insns[BPF_MAXINSNS];\n" 704 | "};\n" 705 | "\n" 706 | "#ifndef BPF_JMP32\n" 707 | "#define BPF_JMP32 0x06\n" 708 | "#endif\n" 709 | "\n" 710 | "/* ArgX, context and stack frame pointer register positions. Note,\n" 711 | " * Arg1, Arg2, Arg3, etc are used as argument mappings of function\n" 712 | " * calls in BPF_CALL instruction.\n" 713 | " */\n" 714 | "#define BPF_REG_ARG1 BPF_REG_1\n" 715 | "#define BPF_REG_ARG2 BPF_REG_2\n" 716 | "#define BPF_REG_ARG3 BPF_REG_3\n" 717 | "#define BPF_REG_ARG4 BPF_REG_4\n" 718 | "#define BPF_REG_ARG5 BPF_REG_5\n" 719 | "#define BPF_REG_CTX BPF_REG_6\n" 720 | "#define BPF_REG_FP BPF_REG_10\n" 721 | "\n" 722 | "/* Additional register mappings for converted user programs. */\n" 723 | "#define BPF_REG_A BPF_REG_0\n" 724 | "#define BPF_REG_X BPF_REG_7\n" 725 | "#define BPF_REG_TMP BPF_REG_2 /* scratch reg */\n" 726 | "#define BPF_REG_D BPF_REG_8 /* data, callee-saved */\n" 727 | "#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */\n" 728 | "\n" 729 | "/* Kernel hidden auxiliary/helper register. */\n" 730 | "#define BPF_REG_AX MAX_BPF_REG\n" 731 | "#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)\n" 732 | "#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG\n" 733 | "\n" 734 | "/* unused opcode to mark special call to bpf_tail_call() helper */\n" 735 | "#define BPF_TAIL_CALL 0xf0\n" 736 | "\n" 737 | "/* unused opcode to mark call to interpreter with arguments */\n" 738 | "#define BPF_CALL_ARGS 0xe0\n" 739 | "\n" 740 | "/* As per nm, we expose JITed images as text (code) section for\n" 741 | " * kallsyms. That way, tools like perf can find it to match\n" 742 | " * addresses.\n" 743 | " */\n" 744 | "#define BPF_SYM_ELF_TYPE 't'\n" 745 | "\n" 746 | "/* BPF program can access up to 512 bytes of stack space. */\n" 747 | "#define MAX_BPF_STACK 512\n" 748 | "\n" 749 | "/* Helper macros for filter block array initializers. */\n" 750 | "\n" 751 | "/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */\n" 752 | "\n" 753 | "#define BPF_ALU64_REG(OP, DST, SRC) \\\n" 754 | " ((struct bpf_insn) { \\\n" 755 | " .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \\\n" 756 | " .dst_reg = DST, \\\n" 757 | " .src_reg = SRC, \\\n" 758 | " .off = 0, \\\n" 759 | " .imm = 0 })\n" 760 | "\n" 761 | "#define BPF_ALU32_REG(OP, DST, SRC) \\\n" 762 | " ((struct bpf_insn) { \\\n" 763 | " .code = BPF_ALU | BPF_OP(OP) | BPF_X, \\\n" 764 | " .dst_reg = DST, \\\n" 765 | " .src_reg = SRC, \\\n" 766 | " .off = 0, \\\n" 767 | " .imm = 0 })\n" 768 | "\n" 769 | "/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */\n" 770 | "\n" 771 | "#define BPF_ALU64_IMM(OP, DST, IMM) \\\n" 772 | " ((struct bpf_insn) { \\\n" 773 | " .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \\\n" 774 | " .dst_reg = DST, \\\n" 775 | " .src_reg = 0, \\\n" 776 | " .off = 0, \\\n" 777 | " .imm = IMM })\n" 778 | "\n" 779 | "#define BPF_ALU32_IMM(OP, DST, IMM) \\\n" 780 | " ((struct bpf_insn) { \\\n" 781 | " .code = BPF_ALU | BPF_OP(OP) | BPF_K, \\\n" 782 | " .dst_reg = DST, \\\n" 783 | " .src_reg = 0, \\\n" 784 | " .off = 0, \\\n" 785 | " .imm = IMM })\n" 786 | "\n" 787 | "/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */\n" 788 | "\n" 789 | "#define BPF_ENDIAN(TYPE, DST, LEN) \\\n" 790 | " ((struct bpf_insn) { \\\n" 791 | " .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \\\n" 792 | " .dst_reg = DST, \\\n" 793 | " .src_reg = 0, \\\n" 794 | " .off = 0, \\\n" 795 | " .imm = LEN })\n" 796 | "\n" 797 | "/* Short form of mov, dst_reg = src_reg */\n" 798 | "\n" 799 | "#define BPF_MOV64_REG(DST, SRC) \\\n" 800 | " ((struct bpf_insn) { \\\n" 801 | " .code = BPF_ALU64 | BPF_MOV | BPF_X, \\\n" 802 | " .dst_reg = DST, \\\n" 803 | " .src_reg = SRC, \\\n" 804 | " .off = 0, \\\n" 805 | " .imm = 0 })\n" 806 | "\n" 807 | "#define BPF_MOV32_REG(DST, SRC) \\\n" 808 | " ((struct bpf_insn) { \\\n" 809 | " .code = BPF_ALU | BPF_MOV | BPF_X, \\\n" 810 | " .dst_reg = DST, \\\n" 811 | " .src_reg = SRC, \\\n" 812 | " .off = 0, \\\n" 813 | " .imm = 0 })\n" 814 | "\n" 815 | "/* Short form of mov, dst_reg = imm32 */\n" 816 | "\n" 817 | "#define BPF_MOV64_IMM(DST, IMM) \\\n" 818 | " ((struct bpf_insn) { \\\n" 819 | " .code = BPF_ALU64 | BPF_MOV | BPF_K, \\\n" 820 | " .dst_reg = DST, \\\n" 821 | " .src_reg = 0, \\\n" 822 | " .off = 0, \\\n" 823 | " .imm = IMM })\n" 824 | "\n" 825 | "#define BPF_MOV32_IMM(DST, IMM) \\\n" 826 | " ((struct bpf_insn) { \\\n" 827 | " .code = BPF_ALU | BPF_MOV | BPF_K, \\\n" 828 | " .dst_reg = DST, \\\n" 829 | " .src_reg = 0, \\\n" 830 | " .off = 0, \\\n" 831 | " .imm = IMM })\n" 832 | "\n" 833 | "/* Special form of mov32, used for doing explicit zero extension on dst. */\n" 834 | "#define BPF_ZEXT_REG(DST) \\\n" 835 | " ((struct bpf_insn) { \\\n" 836 | " .code = BPF_ALU | BPF_MOV | BPF_X, \\\n" 837 | " .dst_reg = DST, \\\n" 838 | " .src_reg = DST, \\\n" 839 | " .off = 0, \\\n" 840 | " .imm = 1 })\n" 841 | "\n" 842 | "/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */\n" 843 | "#define BPF_LD_IMM64(DST, IMM) \\\n" 844 | " BPF_LD_IMM64_RAW(DST, 0, IMM)\n" 845 | "\n" 846 | "#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \\\n" 847 | " ((struct bpf_insn) { \\\n" 848 | " .code = BPF_LD | BPF_DW | BPF_IMM, \\\n" 849 | " .dst_reg = DST, \\\n" 850 | " .src_reg = SRC, \\\n" 851 | " .off = 0, \\\n" 852 | " .imm = (__u32) (IMM) }), \\\n" 853 | " ((struct bpf_insn) { \\\n" 854 | " .code = 0, /* zero is reserved opcode */ \\\n" 855 | " .dst_reg = 0, \\\n" 856 | " .src_reg = 0, \\\n" 857 | " .off = 0, \\\n" 858 | " .imm = ((__u64) (IMM)) >> 32 })\n" 859 | "\n" 860 | "/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */\n" 861 | "#define BPF_LD_MAP_FD(DST, MAP_FD) \\\n" 862 | " BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)\n" 863 | "\n" 864 | "/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */\n" 865 | "\n" 866 | "#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \\\n" 867 | " ((struct bpf_insn) { \\\n" 868 | " .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \\\n" 869 | " .dst_reg = DST, \\\n" 870 | " .src_reg = SRC, \\\n" 871 | " .off = 0, \\\n" 872 | " .imm = IMM })\n" 873 | "\n" 874 | "#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \\\n" 875 | " ((struct bpf_insn) { \\\n" 876 | " .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \\\n" 877 | " .dst_reg = DST, \\\n" 878 | " .src_reg = SRC, \\\n" 879 | " .off = 0, \\\n" 880 | " .imm = IMM })\n" 881 | "\n" 882 | "/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */\n" 883 | "\n" 884 | "#define BPF_LD_ABS(SIZE, IMM) \\\n" 885 | " ((struct bpf_insn) { \\\n" 886 | " .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \\\n" 887 | " .dst_reg = 0, \\\n" 888 | " .src_reg = 0, \\\n" 889 | " .off = 0, \\\n" 890 | " .imm = IMM })\n" 891 | "\n" 892 | "/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */\n" 893 | "\n" 894 | "#define BPF_LD_IND(SIZE, SRC, IMM) \\\n" 895 | " ((struct bpf_insn) { \\\n" 896 | " .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \\\n" 897 | " .dst_reg = 0, \\\n" 898 | " .src_reg = SRC, \\\n" 899 | " .off = 0, \\\n" 900 | " .imm = IMM })\n" 901 | "\n" 902 | "/* Memory load, dst_reg = *(uint *) (src_reg + off16) */\n" 903 | "\n" 904 | "#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \\\n" 905 | " ((struct bpf_insn) { \\\n" 906 | " .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \\\n" 907 | " .dst_reg = DST, \\\n" 908 | " .src_reg = SRC, \\\n" 909 | " .off = OFF, \\\n" 910 | " .imm = 0 })\n" 911 | "\n" 912 | "/* Memory store, *(uint *) (dst_reg + off16) = src_reg */\n" 913 | "\n" 914 | "#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \\\n" 915 | " ((struct bpf_insn) { \\\n" 916 | " .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \\\n" 917 | " .dst_reg = DST, \\\n" 918 | " .src_reg = SRC, \\\n" 919 | " .off = OFF, \\\n" 920 | " .imm = 0 })\n" 921 | "\n" 922 | "/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */\n" 923 | "\n" 924 | "#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \\\n" 925 | " ((struct bpf_insn) { \\\n" 926 | " .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \\\n" 927 | " .dst_reg = DST, \\\n" 928 | " .src_reg = SRC, \\\n" 929 | " .off = OFF, \\\n" 930 | " .imm = 0 })\n" 931 | "\n" 932 | "/* Memory store, *(uint *) (dst_reg + off16) = imm32 */\n" 933 | "\n" 934 | "#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \\\n" 935 | " ((struct bpf_insn) { \\\n" 936 | " .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \\\n" 937 | " .dst_reg = DST, \\\n" 938 | " .src_reg = 0, \\\n" 939 | " .off = OFF, \\\n" 940 | " .imm = IMM })\n" 941 | "\n" 942 | "/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */\n" 943 | "\n" 944 | "#define BPF_JMP_REG(OP, DST, SRC, OFF) \\\n" 945 | " ((struct bpf_insn) { \\\n" 946 | " .code = BPF_JMP | BPF_OP(OP) | BPF_X, \\\n" 947 | " .dst_reg = DST, \\\n" 948 | " .src_reg = SRC, \\\n" 949 | " .off = OFF, \\\n" 950 | " .imm = 0 })\n" 951 | "\n" 952 | "/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */\n" 953 | "\n" 954 | "#define BPF_JMP_IMM(OP, DST, IMM, OFF) \\\n" 955 | " ((struct bpf_insn) { \\\n" 956 | " .code = BPF_JMP | BPF_OP(OP) | BPF_K, \\\n" 957 | " .dst_reg = DST, \\\n" 958 | " .src_reg = 0, \\\n" 959 | " .off = OFF, \\\n" 960 | " .imm = IMM })\n" 961 | "\n" 962 | "/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */\n" 963 | "\n" 964 | "#define BPF_JMP32_REG(OP, DST, SRC, OFF) \\\n" 965 | " ((struct bpf_insn) { \\\n" 966 | " .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \\\n" 967 | " .dst_reg = DST, \\\n" 968 | " .src_reg = SRC, \\\n" 969 | " .off = OFF, \\\n" 970 | " .imm = 0 })\n" 971 | "\n" 972 | "/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */\n" 973 | "\n" 974 | "#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \\\n" 975 | " ((struct bpf_insn) { \\\n" 976 | " .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \\\n" 977 | " .dst_reg = DST, \\\n" 978 | " .src_reg = 0, \\\n" 979 | " .off = OFF, \\\n" 980 | " .imm = IMM })\n" 981 | "\n" 982 | "/* Unconditional jumps, goto pc + off16 */\n" 983 | "\n" 984 | "#define BPF_JMP_A(OFF) \\\n" 985 | " ((struct bpf_insn) { \\\n" 986 | " .code = BPF_JMP | BPF_JA, \\\n" 987 | " .dst_reg = 0, \\\n" 988 | " .src_reg = 0, \\\n" 989 | " .off = OFF, \\\n" 990 | " .imm = 0 })\n" 991 | "\n" 992 | "/* Relative call */\n" 993 | "\n" 994 | "#define BPF_CALL_REL(TGT) \\\n" 995 | " ((struct bpf_insn) { \\\n" 996 | " .code = BPF_JMP | BPF_CALL, \\\n" 997 | " .dst_reg = 0, \\\n" 998 | " .src_reg = BPF_PSEUDO_CALL, \\\n" 999 | " .off = 0, \\\n" 1000 | " .imm = TGT })\n" 1001 | "\n" 1002 | "#define __bpf_call_base 0\n" 1003 | "#define BPF_EMIT_CALL(FUNC) \\\n" 1004 | " ((struct bpf_insn) { \\\n" 1005 | " .code = BPF_JMP | BPF_CALL, \\\n" 1006 | " .dst_reg = 0, \\\n" 1007 | " .src_reg = 0, \\\n" 1008 | " .off = 0, \\\n" 1009 | " .imm = ((FUNC) - __bpf_call_base) })\n" 1010 | "\n" 1011 | "/* Raw code statement block */\n" 1012 | "\n" 1013 | "#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \\\n" 1014 | " ((struct bpf_insn) { \\\n" 1015 | " .code = CODE, \\\n" 1016 | " .dst_reg = DST, \\\n" 1017 | " .src_reg = SRC, \\\n" 1018 | " .off = OFF, \\\n" 1019 | " .imm = IMM })\n" 1020 | "\n" 1021 | "/* Program exit */\n" 1022 | "\n" 1023 | "#define BPF_EXIT_INSN() \\\n" 1024 | " ((struct bpf_insn) { \\\n" 1025 | " .code = BPF_JMP | BPF_EXIT, \\\n" 1026 | " .dst_reg = 0, \\\n" 1027 | " .src_reg = 0, \\\n" 1028 | " .off = 0, \\\n" 1029 | " .imm = 0 })\n" 1030 | "\n" 1031 | "#define LISTENER_PORT (1337)\n" 1032 | "#define LISTENER_BACKLOG (0x30)\n" 1033 | "#define STORAGE_MAP_SIZE (8192)\n" 1034 | "#define FUZZ_MAP_SIZE (8192)\n" 1035 | "\n" 1036 | "#define ARRAY_CNT(arr) (sizeof(arr) / sizeof(arr[0]))\n" 1037 | "\n" 1038 | "#define CORRUPT_FD_CONST 10\n" 1039 | "#define STORAGE_FD_CONST 11\n" 1040 | "#define CORRUPT_REG BPF_REG_9\n" 1041 | "#define STORAGE_REG BPF_REG_8\n" 1042 | "#define SPECIAL_REG BPF_REG_7\n" 1043 | "#define INVALID_P_REG BPF_REG_6\n" 1044 | "#define LEAKED_V_REG BPF_REG_5\n" 1045 | "#define UMAX_REG BPF_REG_4\n" 1046 | "#define EXTRA0_REG BPF_REG_3\n" 1047 | "#define EXTRA1_REG BPF_REG_2\n" 1048 | "#define EXTRA2_REG BPF_REG_1\n" 1049 | "#define MAGIC_VAL1 0x4142434445464748\n" 1050 | "#define MAGIC_VAL2 0x494a4b4c4d4e4f40\n" 1051 | "\n" 1052 | "static int bpf(unsigned int cmd, union bpf_attr *attr, size_t size)\n" 1053 | "{\n" 1054 | " return syscall(SYS_bpf, cmd, attr, size);\n" 1055 | "}\n" 1056 | "\n" 1057 | "static int update_storage_map(int fd, unsigned long special_val)\n" 1058 | "{\n" 1059 | " uint64_t key = 0;\n" 1060 | " unsigned long buf[STORAGE_MAP_SIZE / sizeof(long)];\n" 1061 | " buf[0] = special_val;\n" 1062 | " for (int i = 1; i < (STORAGE_MAP_SIZE / sizeof(long)); i++) {\n" 1063 | " buf[i] = MAGIC_VAL2;\n" 1064 | " }\n" 1065 | " union bpf_attr attr = {\n" 1066 | " .map_fd = fd,\n" 1067 | " .key = (uint64_t)&key,\n" 1068 | " .value = (uint64_t)&buf,\n" 1069 | " };\n" 1070 | "\n" 1071 | " return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));\n" 1072 | "}\n" 1073 | "\n" 1074 | "static int update_corrupt_map(int fd)\n" 1075 | "{\n" 1076 | " uint64_t key = 0;\n" 1077 | " unsigned long buf[STORAGE_MAP_SIZE / sizeof(long)];\n" 1078 | " for (int i = 0; i < (STORAGE_MAP_SIZE / sizeof(long)); i++) {\n" 1079 | " buf[i] = MAGIC_VAL1;\n" 1080 | " }\n" 1081 | " union bpf_attr attr = {\n" 1082 | " .map_fd = fd,\n" 1083 | " .key = (uint64_t)&key,\n" 1084 | " .value = (uint64_t)&buf,\n" 1085 | " };\n" 1086 | "\n" 1087 | " return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));\n" 1088 | "}\n" 1089 | "\n" 1090 | "static int init_maps(int *corrupt_map_fd, int *storage_map_fd)\n" 1091 | "{\n" 1092 | " union bpf_attr corrupt_map = {\n" 1093 | " .map_type = BPF_MAP_TYPE_ARRAY,\n" 1094 | " .key_size = 4,\n" 1095 | " .value_size = STORAGE_MAP_SIZE,\n" 1096 | " .max_entries = 1,\n" 1097 | " };\n" 1098 | " strcpy(corrupt_map.map_name, \"corrupt_map\");\n" 1099 | " *corrupt_map_fd = (int)bpf(BPF_MAP_CREATE, &corrupt_map,\n" 1100 | " sizeof(corrupt_map));\n" 1101 | " if (*corrupt_map_fd < 0)\n" 1102 | " return -1;\n" 1103 | "\n" 1104 | " if (update_corrupt_map(*corrupt_map_fd) < 0)\n" 1105 | " return -1;\n" 1106 | "\n" 1107 | " union bpf_attr storage_map = {\n" 1108 | " .map_type = BPF_MAP_TYPE_ARRAY,\n" 1109 | " .key_size = 4,\n" 1110 | " .value_size = STORAGE_MAP_SIZE,\n" 1111 | " .max_entries = 1,\n" 1112 | " };\n" 1113 | " strcpy(corrupt_map.map_name, \"storage_map\");\n" 1114 | " *storage_map_fd = (int)bpf(BPF_MAP_CREATE, &storage_map,\n" 1115 | " sizeof(storage_map));\n" 1116 | " if (*storage_map_fd < 0)\n" 1117 | " return -1;\n" 1118 | "\n" 1119 | " if (update_storage_map(*storage_map_fd, 0) < 0)\n" 1120 | " return -1;\n" 1121 | "\n" 1122 | " return 0;\n" 1123 | "}\n" 1124 | "\n" 1125 | "static int read_map(int fd, void *buf, size_t size)\n" 1126 | "{\n" 1127 | " assert(size <= (STORAGE_MAP_SIZE));\n" 1128 | "\n" 1129 | " unsigned long lk[STORAGE_MAP_SIZE / sizeof(long)];\n" 1130 | " memset(lk, 0, sizeof(lk));\n" 1131 | " uint64_t key = 0;\n" 1132 | " union bpf_attr lookup_map = {\n" 1133 | " .map_fd = fd,\n" 1134 | " .key = (uint64_t)&key,\n" 1135 | " .value = (uint64_t)&lk,\n" 1136 | " };\n" 1137 | "\n" 1138 | " int err = bpf(BPF_MAP_LOOKUP_ELEM, &lookup_map, sizeof(lookup_map));\n" 1139 | " if (err < 0) {\n" 1140 | " return -1;\n" 1141 | " }\n" 1142 | "\n" 1143 | " memcpy(buf, lk, size);\n" 1144 | "\n" 1145 | " return 0;\n" 1146 | "}\n" 1147 | "\n" 1148 | "static int setup_listener_sock(int port, int backlog)\n" 1149 | "{\n" 1150 | " int sock_fd = socket(AF_INET,\n" 1151 | " SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC,\n" 1152 | " 0);\n" 1153 | " if (sock_fd < 0) {\n" 1154 | " return sock_fd;\n" 1155 | " }\n" 1156 | "\n" 1157 | " struct sockaddr_in servaddr;\n" 1158 | " servaddr.sin_family = AF_INET;\n" 1159 | " servaddr.sin_port = htons(port);\n" 1160 | " servaddr.sin_addr.s_addr = htonl(INADDR_ANY);\n" 1161 | "\n" 1162 | " int err = bind(sock_fd, (struct sockaddr *)&servaddr, sizeof(servaddr));\n" 1163 | " if (err < 0) {\n" 1164 | " close(sock_fd);\n" 1165 | " return err;\n" 1166 | " }\n" 1167 | "\n" 1168 | " err = listen(sock_fd, backlog);\n" 1169 | " if (err < 0) {\n" 1170 | " close(sock_fd);\n" 1171 | " return err;\n" 1172 | " }\n" 1173 | "\n" 1174 | " return sock_fd;\n" 1175 | "}\n" 1176 | "\n" 1177 | "static int setup_send_sock(void)\n" 1178 | "{\n" 1179 | " return socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);\n" 1180 | "}\n" 1181 | "\n" 1182 | "#define LOG_BUF_SIZE 65536\n" 1183 | "static char bpf_log_buf[LOG_BUF_SIZE];\n" 1184 | "\n" 1185 | "static int load_prog(struct bpf_insn *insns, size_t insn_count)\n" 1186 | "{\n" 1187 | " union bpf_attr prog = {};\n" 1188 | " prog.license = (uint64_t)\"GPL\";\n" 1189 | " strcpy(prog.prog_name, \"ebpf_fuzzer\");\n" 1190 | " prog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;\n" 1191 | " prog.insn_cnt = insn_count;\n" 1192 | " prog.insns = (uint64_t)insns;\n" 1193 | " prog.log_buf = (uint64_t)bpf_log_buf;\n" 1194 | " prog.log_size = LOG_BUF_SIZE;\n" 1195 | " prog.log_level = 1;\n" 1196 | "\n" 1197 | " int prog_fd = bpf(BPF_PROG_LOAD, &prog, sizeof(prog));\n" 1198 | " if (prog_fd < 0) {\n" 1199 | " return -1;\n" 1200 | " }\n" 1201 | "\n" 1202 | " return prog_fd;\n" 1203 | "}\n" 1204 | "\n" 1205 | "static int exec_prog(int prog_fd, int *_err)\n" 1206 | "{\n" 1207 | " int listener_sock = setup_listener_sock(LISTENER_PORT, LISTENER_BACKLOG);\n" 1208 | " int send_sock = setup_send_sock();\n" 1209 | "\n" 1210 | " if ((listener_sock < 0) || (send_sock < 0)) {\n" 1211 | " return -1;\n" 1212 | " }\n" 1213 | "\n" 1214 | " if (setsockopt(listener_sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd,\n" 1215 | " sizeof(prog_fd)) < 0) {\n" 1216 | " return -1;\n" 1217 | " }\n" 1218 | "\n" 1219 | " struct sockaddr_in servaddr;\n" 1220 | " servaddr.sin_family = AF_INET;\n" 1221 | " servaddr.sin_port = htons(LISTENER_PORT);\n" 1222 | " servaddr.sin_addr.s_addr = htonl(INADDR_ANY);\n" 1223 | "\n" 1224 | " int err;\n" 1225 | " err = connect(send_sock, (struct sockaddr *)&servaddr, sizeof(servaddr));\n" 1226 | " if (err < 0) {\n" 1227 | " *_err = errno;\n" 1228 | " }\n" 1229 | "\n" 1230 | " close(listener_sock);\n" 1231 | " close(send_sock);\n" 1232 | " return (err < 0) ? 1 : 0;\n" 1233 | "}\n" 1234 | "\n" 1235 | "static int detect_oob(char *buf0, char *buf1, size_t size)\n" 1236 | "{\n" 1237 | " char *b = &buf1[8];\n" 1238 | " unsigned long *_b = (unsigned long *)buf1;\n" 1239 | " for (int i = 0; i < 8; i++) {\n" 1240 | " if ((b[i] > 0x4f) || (b[i] < 0x40)) {\n" 1241 | " fprintf(stderr, \"[1]: %lx\\n\", _b[1]);\n" 1242 | " return 1;\n" 1243 | " }\n" 1244 | " }\n" 1245 | "\n" 1246 | " fprintf(stderr, \"[2]: %lx\\n\", _b[2]);\n" 1247 | " return 0;\n" 1248 | "}\n" 1249 | "\n" 1250 | "static int repro_xmsg(int corrupt_map_fd, int storage_map_fd, struct xmsg *msg)\n" 1251 | "{\n" 1252 | " int err = 0;\n" 1253 | " char buf0[STORAGE_MAP_SIZE];\n" 1254 | " char buf1[STORAGE_MAP_SIZE];\n" 1255 | "\n" 1256 | " err = update_storage_map(storage_map_fd, msg->special_value);\n" 1257 | " if (err < 0) {\n" 1258 | " fprintf(stderr, \"update_storage_map err\\n\");\n" 1259 | " return -1;\n" 1260 | " }\n" 1261 | " fprintf(stderr, \"update_storage_map done.\\n\");\n" 1262 | "\n" 1263 | " err = read_map(storage_map_fd, buf0, STORAGE_MAP_SIZE);\n" 1264 | " if (err < 0) {\n" 1265 | " fprintf(stderr, \"read_map err\\n\");\n" 1266 | " return -1;\n" 1267 | " }\n" 1268 | "\n" 1269 | " /* load and execute prog */\n" 1270 | " int prog_fd = load_prog(msg->insns, msg->insn_cnt);\n" 1271 | " if (prog_fd < 0) {\n" 1272 | " //fprintf(stderr, \"load_prog() err\\n\");\n" 1273 | " return -1;\n" 1274 | " }\n" 1275 | " fprintf(stderr, \"%ld, %s.\\n\", strlen(bpf_log_buf), bpf_log_buf);\n" 1276 | "\n" 1277 | " int connect_err;\n" 1278 | " err = exec_prog(prog_fd, &connect_err);\n" 1279 | " if (err != 1) {\n" 1280 | " /* prog not execute successfully */\n" 1281 | " return 0;\n" 1282 | " }\n" 1283 | " fprintf(stderr, \"exec_prog done.\\n\");\n" 1284 | "\n" 1285 | " /* read the map again, check the content */\n" 1286 | " err = read_map(storage_map_fd, buf1, STORAGE_MAP_SIZE);\n" 1287 | " if (err < 0) {\n" 1288 | " fprintf(stderr, \"read_map err\\n\");\n" 1289 | " return -1;\n" 1290 | " }\n" 1291 | "\n" 1292 | " if (detect_oob(buf0, buf1, STORAGE_MAP_SIZE)) {\n" 1293 | " return 1;\n" 1294 | " }\n" 1295 | "\n" 1296 | " return 0;\n" 1297 | "}\n" 1298 | "\n" 1299 | "int main(int argc, char *argv[])\n" 1300 | "{\n" 1301 | " struct xmsg msg;\n" 1302 | " int corrupt_map_fd, storage_map_fd;\n" 1303 | " int err;\n" 1304 | "\n" 1305 | " err = init_maps(&corrupt_map_fd, &storage_map_fd);\n" 1306 | " if (err < 0) {\n" 1307 | " fprintf(stderr, \"init_maps err\\n\");\n" 1308 | " return QEMU_FUZZLIB_INST_NOT_TESTED;\n" 1309 | " }\n" 1310 | " dup2(corrupt_map_fd, CORRUPT_FD_CONST);\n" 1311 | " dup2(storage_map_fd, STORAGE_FD_CONST);\n" 1312 | " close(corrupt_map_fd);\n" 1313 | " close(storage_map_fd);\n" 1314 | " corrupt_map_fd = CORRUPT_FD_CONST;\n" 1315 | " storage_map_fd = STORAGE_FD_CONST;\n" 1316 | " memset(&msg, 0, sizeof(msg));\n" 1317 | "\n" 1318 | " struct bpf_insn __insns[] = {\n"; 1319 | 1320 | static char *sample_tail = "" 1321 | " msg.insn_cnt = ARRAY_CNT(__insns);\n" 1322 | " memcpy(msg.insns, __insns, msg.insn_cnt * sizeof(struct bpf_insn));\n" 1323 | "\n" 1324 | " err = repro_xmsg(corrupt_map_fd, storage_map_fd, &msg);\n" 1325 | " if (err == 1) {\n" 1326 | " fprintf(stderr, \"repro done\\n\");\n" 1327 | " return QEMU_FUZZLIB_INST_BOOM;\n" 1328 | " } else if (err == 0) {\n" 1329 | " fprintf(stderr, \"repro failed\\n\");\n" 1330 | " return QEMU_FUZZLIB_INST_VALID;\n" 1331 | " } else if (err == -1) {\n" 1332 | " fprintf(stderr, \"repro failed\\n\");\n" 1333 | " return QEMU_FUZZLIB_INST_INVALID;\n" 1334 | " }\n" 1335 | "}\n"; 1336 | 1337 | #define BODY_FORMAT "%s\t};\n\n\tmsg.special_value = 0x%lx;\n" 1338 | static int gen_sample_body(char *b, size_t len, u32 body1_max) 1339 | { 1340 | int err; 1341 | struct bpf_insn insns[BPF_MAXINSNS]; 1342 | int idx = 0; 1343 | char insn_buf[BODY_LEN]; 1344 | memset(insn_buf, 0, BODY_LEN); 1345 | 1346 | /* 1 stage, gen special val */ 1347 | unsigned long this_special = s_rand64(); 1348 | 1349 | /* 2 stage, gen insns */ 1350 | err = insn_get_map_ptr(insns, &idx, BPF_MAXINSNS, CORRUPT_FD_CONST, 1351 | CORRUPT_REG); 1352 | if (err < 0) { 1353 | return -1; 1354 | } 1355 | 1356 | err = insn_get_map_ptr(insns, &idx, BPF_MAXINSNS, STORAGE_FD_CONST, 1357 | STORAGE_REG); 1358 | if (err < 0) { 1359 | return -1; 1360 | } 1361 | 1362 | err = insn_body(insns, &idx, BPF_MAXINSNS, this_special, body1_max); 1363 | if (err < 0) { 1364 | return -1; 1365 | } 1366 | 1367 | err = insn_alu_map_ptr(insns, &idx, BPF_MAXINSNS); 1368 | if (err < 0) { 1369 | return -1; 1370 | } 1371 | 1372 | err = insn_write_mem(insns, &idx, BPF_MAXINSNS); 1373 | if (err < 0) { 1374 | return -1; 1375 | } 1376 | 1377 | err = insn_exit(insns, &idx, BPF_MAXINSNS); 1378 | if (err < 0) { 1379 | return -1; 1380 | } 1381 | 1382 | err = insn_print_common(insn_buf, BODY_LEN, insns, idx); 1383 | if (err < 0) { 1384 | return -1; 1385 | } 1386 | 1387 | snprintf(b, len, BODY_FORMAT, insn_buf, this_special); 1388 | return 0; 1389 | } 1390 | 1391 | static int this_init(struct ebpf_fuzz_target *target) 1392 | { 1393 | target->sample_header = sample_header; 1394 | target->sample_tail = sample_tail; 1395 | return 0; 1396 | } 1397 | 1398 | struct ebpf_fuzz_target kern_5_8 = { 1399 | .target_name = "general", 1400 | .init = this_init, 1401 | .gen_sample_body = gen_sample_body, 1402 | }; 1403 | --------------------------------------------------------------------------------