├── .gitignore ├── .travis.yml ├── 99-anbox.rules ├── INSTALL.sh ├── README.md ├── UNINSTALL.sh ├── anbox.conf ├── ashmem ├── Makefile ├── ashmem.c ├── ashmem.h ├── deps.c ├── dkms.conf └── uapi │ └── ashmem.h ├── binder ├── Makefile ├── binder.c ├── binder.h ├── binder_alloc.c ├── binder_alloc.h ├── binder_internal.h ├── binder_trace.h ├── binderfs.c ├── deps.c ├── deps.h └── dkms.conf ├── debian ├── README.Debian ├── changelog ├── compat ├── control ├── copyright ├── dirs ├── dkms ├── install ├── rules ├── source │ ├── format │ └── options └── udev └── scripts ├── build-against-kernel.sh ├── build-with-docker.sh └── clean-build.sh /.gitignore: -------------------------------------------------------------------------------- 1 | *.ko 2 | *.mod 3 | *.mod.c 4 | *.o 5 | *.order 6 | *.symvers 7 | *.swp 8 | .*.cmd 9 | .tmp_versions 10 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: c 2 | os: linux 3 | sudo: false 4 | 5 | addons: 6 | apt: 7 | sources: 8 | - ubuntu-toolchain-r-test 9 | packages: 10 | - bison 11 | - flex 12 | - libelf-dev 13 | - dpkg-dev 14 | - debhelper 15 | - dkms 16 | - fakeroot 17 | - gcc-8 18 | 19 | env: 20 | - KVER=4.4 21 | - KVER=4.8 22 | - KVER=4.9 23 | - KVER=4.13 24 | - KVER=4.14 25 | - KVER=4.15 26 | - KVER=4.16 27 | - KVER=4.17 28 | - KVER=5.0 && CC=gcc-8 29 | - KVER=5.1 && CC=gcc-8 30 | - KVER=5.2 && CC=gcc-8 31 | - KVER=5.3 && CC=gcc-8 32 | - KVER=5.4 && CC=gcc-8 33 | - KVER=master && CC=gcc-8 34 | 35 | matrix: 36 | allow_failures: 37 | - env: KVER=master 38 | include: 39 | - script: 40 | - dpkg-buildpackage -us -uc 41 | env: KVER="Debian Package Building" 42 | 43 | script: 44 | - ./scripts/build-against-kernel.sh ${KVER} ${CC} 45 | -------------------------------------------------------------------------------- /99-anbox.rules: -------------------------------------------------------------------------------- 1 | KERNEL=="ashmem", MODE="0666" 2 | KERNEL=="*binder", MODE="0666", SYMLINK+="anbox-%k" 3 | -------------------------------------------------------------------------------- /INSTALL.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # First install the configuration files: 4 | sudo cp anbox.conf /etc/modules-load.d/ 5 | sudo cp 99-anbox.rules /lib/udev/rules.d/ 6 | 7 | # Then copy the module sources to /usr/src/: 8 | sudo cp -rT ashmem /usr/src/anbox-ashmem-1 9 | sudo cp -rT binder /usr/src/anbox-binder-1 10 | 11 | # Finally use dkms to build and install: 12 | sudo dkms install anbox-ashmem/1 13 | sudo dkms install anbox-binder/1 14 | 15 | # Verify by loading these modules and checking the created devices: 16 | sudo modprobe ashmem_linux 17 | sudo modprobe binder_linux 18 | lsmod | grep -e ashmem_linux -e binder_linux 19 | ls -alh /dev/binder /dev/ashmem 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/anbox/anbox-modules.svg?branch=master)](https://travis-ci.org/anbox/anbox-modules) 2 | 3 | # Anbox Kernel Modules 4 | 5 | This repository contains the kernel modules necessary to run the Anbox 6 | Android container runtime. They're split out of the original Anbox 7 | repository to make packaging in various Linux distributions easier. 8 | 9 | # Install Instruction 10 | 11 | You need to have `dkms` and linux-headers on your system. You can install them by 12 | `sudo apt install dkms` or `sudo yum install dkms` (`dkms` is available in epel repo 13 | for CentOS). 14 | 15 | Package name for linux-headers varies on different distributions, e.g. 16 | `linux-headers-generic` (Ubuntu), `linux-headers-amd64` (Debian), 17 | `kernel-devel` (CentOS, Fedora), `kernel-default-devel` (openSUSE). 18 | 19 | 20 | You can either run `./INSTALL.sh` script to automate the installation steps or follow them manually below: 21 | 22 | * First install the configuration files: 23 | 24 | ``` 25 | sudo cp anbox.conf /etc/modules-load.d/ 26 | sudo cp 99-anbox.rules /lib/udev/rules.d/ 27 | ``` 28 | 29 | * Then copy the module sources to `/usr/src/`: 30 | 31 | ``` 32 | sudo cp -rT ashmem /usr/src/anbox-ashmem-1 33 | sudo cp -rT binder /usr/src/anbox-binder-1 34 | ``` 35 | 36 | * Finally use `dkms` to build and install: 37 | 38 | ``` 39 | sudo dkms install anbox-ashmem/1 40 | sudo dkms install anbox-binder/1 41 | ``` 42 | 43 | You can verify by loading these modules and checking the created devices: 44 | 45 | ``` 46 | sudo modprobe ashmem_linux 47 | sudo modprobe binder_linux 48 | lsmod | grep -e ashmem_linux -e binder_linux 49 | ls -alh /dev/binder /dev/ashmem 50 | ``` 51 | 52 | You are expected to see output like: 53 | 54 | ``` 55 | binder_linux 114688 0 56 | ashmem_linux 16384 0 57 | crw-rw-rw- 1 root root 10, 55 Jun 19 16:30 /dev/ashmem 58 | crw-rw-rw- 1 root root 511, 0 Jun 19 16:30 /dev/binder 59 | ``` 60 | 61 | # Uninstall Instructions 62 | 63 | ou can either run `./UNINSTALL.sh` script to automate the installation steps or follow them manually below: 64 | 65 | * First use dkms to remove the modules: 66 | 67 | ``` 68 | sudo dkms remove anbox-ashmem/1 69 | sudo dkms remove anbox-binder/1 70 | ``` 71 | 72 | * Then remove the module sources from /usr/src/: 73 | 74 | ``` 75 | sudo rm -rf /usr/src/anbox-ashmem-1 76 | sudo rm -rf /usr/src/anbox-binder-1 77 | ``` 78 | 79 | * Finally remove the configuration files: 80 | 81 | ``` 82 | sudo rm -f /etc/modules-load.d/anbox.conf 83 | sudo rm -f /lib/udev/rules.d/99-anbox.rules 84 | ``` 85 | 86 | You must then restart your device. You can then verify modules were removed by trying to load the modules and checking the created devices: 87 | 88 | ``` 89 | sudo modprobe ashmem_linux 90 | sudo modprobe binder_linux 91 | lsmod | grep -e ashmem_linux -e binder_linux 92 | ls -alh /dev/binder /dev/ashmem 93 | ``` 94 | 95 | You are expected to see output like: 96 | 97 | ``` 98 | modprobe: FATAL: Module ashmem_linux not found in directory /lib/modules/6.0.2-76060002-generic 99 | modprobe: FATAL: Module binder_linux not found in directory /lib/modules/6.0.2-76060002-generic 100 | ls: cannot access '/dev/binder': No such file or directory 101 | ls: cannot access '/dev/ashmem': No such file or directory 102 | ``` 103 | 104 | # Packaging: 105 | ## Debian/Ubuntu: 106 | ``` 107 | sudo apt-get install devscripts dh-dkms -y 108 | git log --pretty=" -%an<%ae>:%aI - %s" > ./debian/changelog 109 | debuild -i -us -uc -b 110 | ls -lrt ../anbox-modules-dkms_*.deb 111 | ``` 112 | -------------------------------------------------------------------------------- /UNINSTALL.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # First use dkms to remove the modules: 4 | sudo dkms remove anbox-ashmem/1 5 | sudo dkms remove anbox-binder/1 6 | 7 | # Then remove the module sources from /usr/src/: 8 | sudo rm -rf /usr/src/anbox-ashmem-1 9 | sudo rm -rf /usr/src/anbox-binder-1 10 | 11 | # Finally remove the configuration files: 12 | sudo rm -f /etc/modules-load.d/anbox.conf 13 | sudo rm -f /lib/udev/rules.d/99-anbox.rules 14 | 15 | # Verify remove by trying to load the modules and checking the created devices: 16 | failed_checks=0 17 | if sudo modprobe ashmem_linux > /dev/null 2>&1; then 18 | failed_checks=1 19 | else 20 | failed_checks=0 21 | fi 22 | 23 | if sudo modprobe binder_linux > /dev/null 2>&1; then 24 | failed_checks=1 25 | else 26 | failed_checks=0 27 | fi 28 | 29 | if lsmod | grep -e ashmem_linux -e binder_linux > /dev/null 2>&1; then 30 | failed_checks=1 31 | else 32 | failed_checks=0 33 | fi 34 | 35 | if ls -alh /dev/binder /dev/ashmem > /dev/null 2>&1; then 36 | failed_checks=1 37 | else 38 | failed_checks=0 39 | fi 40 | 41 | if [ $failed_checks == 1 ]; then 42 | echo "Please restart your device and rerun this script to verify changes" 43 | else 44 | echo "Modules not installed" 45 | fi -------------------------------------------------------------------------------- /anbox.conf: -------------------------------------------------------------------------------- 1 | ashmem_linux 2 | binder_linux 3 | -------------------------------------------------------------------------------- /ashmem/Makefile: -------------------------------------------------------------------------------- 1 | ccflags-y += -I$(src) -Wno-error=implicit-int -Wno-int-conversion 2 | obj-m := ashmem_linux.o 3 | ashmem_linux-y := deps.o ashmem.o 4 | 5 | KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build 6 | VZ= $(shell uname -r | grep vz) 7 | ifneq ($(VZ),) 8 | ccflags-y += -DVZKERNEL 9 | endif 10 | 11 | all: 12 | $(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD 13 | 14 | install: 15 | cp ashmem_linux.ko $(DESTDIR)/ 16 | 17 | clean: 18 | rm -rf deps.h *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions 19 | -------------------------------------------------------------------------------- /ashmem/ashmem.c: -------------------------------------------------------------------------------- 1 | /* mm/ashmem.c 2 | * 3 | * Anonymous Shared Memory Subsystem, ashmem 4 | * 5 | * Copyright (C) 2008 Google, Inc. 6 | * 7 | * Robert Love 8 | * 9 | * This software is licensed under the terms of the GNU General Public 10 | * License version 2, as published by the Free Software Foundation, and 11 | * may be copied, distributed, and modified under those terms. 12 | * 13 | * This program is distributed in the hope that it will be useful, 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | * GNU General Public License for more details. 17 | */ 18 | 19 | #define pr_fmt(fmt) "ashmem: " fmt 20 | 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include "ashmem.h" 38 | 39 | #define ASHMEM_NAME_PREFIX "dev/ashmem/" 40 | #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) 41 | #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) 42 | 43 | /** 44 | * struct ashmem_area - The anonymous shared memory area 45 | * @name: The optional name in /proc/pid/maps 46 | * @unpinned_list: The list of all ashmem areas 47 | * @file: The shmem-based backing file 48 | * @size: The size of the mapping, in bytes 49 | * @prot_mask: The allowed protection bits, as vm_flags 50 | * 51 | * The lifecycle of this structure is from our parent file's open() until 52 | * its release(). It is also protected by 'ashmem_mutex' 53 | * 54 | * Warning: Mappings do NOT pin this structure; It dies on close() 55 | */ 56 | struct ashmem_area { 57 | char name[ASHMEM_FULL_NAME_LEN]; 58 | struct list_head unpinned_list; 59 | struct file *file; 60 | size_t size; 61 | unsigned long prot_mask; 62 | }; 63 | 64 | /** 65 | * struct ashmem_range - A range of unpinned/evictable pages 66 | * @lru: The entry in the LRU list 67 | * @unpinned: The entry in its area's unpinned list 68 | * @asma: The associated anonymous shared memory area. 69 | * @pgstart: The starting page (inclusive) 70 | * @pgend: The ending page (inclusive) 71 | * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) 72 | * 73 | * The lifecycle of this structure is from unpin to pin. 74 | * It is protected by 'ashmem_mutex' 75 | */ 76 | struct ashmem_range { 77 | struct list_head lru; 78 | struct list_head unpinned; 79 | struct ashmem_area *asma; 80 | size_t pgstart; 81 | size_t pgend; 82 | unsigned int purged; 83 | }; 84 | 85 | /* LRU list of unpinned pages, protected by ashmem_mutex */ 86 | static LIST_HEAD(ashmem_lru_list); 87 | 88 | /* 89 | * long lru_count - The count of pages on our LRU list. 90 | * 91 | * This is protected by ashmem_mutex. 92 | */ 93 | static unsigned long lru_count; 94 | 95 | /* 96 | * ashmem_mutex - protects the list of and each individual ashmem_area 97 | * 98 | * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem 99 | */ 100 | static DEFINE_MUTEX(ashmem_mutex); 101 | 102 | static struct kmem_cache *ashmem_area_cachep __read_mostly; 103 | static struct kmem_cache *ashmem_range_cachep __read_mostly; 104 | 105 | #define range_size(range) \ 106 | ((range)->pgend - (range)->pgstart + 1) 107 | 108 | #define range_on_lru(range) \ 109 | ((range)->purged == ASHMEM_NOT_PURGED) 110 | 111 | #define page_range_subsumes_range(range, start, end) \ 112 | (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) 113 | 114 | #define page_range_subsumed_by_range(range, start, end) \ 115 | (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) 116 | 117 | #define page_in_range(range, page) \ 118 | (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) 119 | 120 | #define page_range_in_range(range, start, end) \ 121 | (page_in_range(range, start) || page_in_range(range, end) || \ 122 | page_range_subsumes_range(range, start, end)) 123 | 124 | #define range_before_page(range, page) \ 125 | ((range)->pgend < (page)) 126 | 127 | #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) 128 | 129 | /** 130 | * lru_add() - Adds a range of memory to the LRU list 131 | * @range: The memory range being added. 132 | * 133 | * The range is first added to the end (tail) of the LRU list. 134 | * After this, the size of the range is added to @lru_count 135 | */ 136 | static inline void lru_add(struct ashmem_range *range) 137 | { 138 | list_add_tail(&range->lru, &ashmem_lru_list); 139 | lru_count += range_size(range); 140 | } 141 | 142 | /** 143 | * lru_del() - Removes a range of memory from the LRU list 144 | * @range: The memory range being removed 145 | * 146 | * The range is first deleted from the LRU list. 147 | * After this, the size of the range is removed from @lru_count 148 | */ 149 | static inline void lru_del(struct ashmem_range *range) 150 | { 151 | list_del(&range->lru); 152 | lru_count -= range_size(range); 153 | } 154 | 155 | /** 156 | * range_alloc() - Allocates and initializes a new ashmem_range structure 157 | * @asma: The associated ashmem_area 158 | * @prev_range: The previous ashmem_range in the sorted asma->unpinned list 159 | * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) 160 | * @start: The starting page (inclusive) 161 | * @end: The ending page (inclusive) 162 | * 163 | * This function is protected by ashmem_mutex. 164 | * 165 | * Return: 0 if successful, or -ENOMEM if there is an error 166 | */ 167 | static int range_alloc(struct ashmem_area *asma, 168 | struct ashmem_range *prev_range, unsigned int purged, 169 | size_t start, size_t end) 170 | { 171 | struct ashmem_range *range; 172 | 173 | range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); 174 | if (unlikely(!range)) 175 | return -ENOMEM; 176 | 177 | range->asma = asma; 178 | range->pgstart = start; 179 | range->pgend = end; 180 | range->purged = purged; 181 | 182 | list_add_tail(&range->unpinned, &prev_range->unpinned); 183 | 184 | if (range_on_lru(range)) 185 | lru_add(range); 186 | 187 | return 0; 188 | } 189 | 190 | /** 191 | * range_del() - Deletes and dealloctes an ashmem_range structure 192 | * @range: The associated ashmem_range that has previously been allocated 193 | */ 194 | static void range_del(struct ashmem_range *range) 195 | { 196 | list_del(&range->unpinned); 197 | if (range_on_lru(range)) 198 | lru_del(range); 199 | kmem_cache_free(ashmem_range_cachep, range); 200 | } 201 | 202 | /** 203 | * range_shrink() - Shrinks an ashmem_range 204 | * @range: The associated ashmem_range being shrunk 205 | * @start: The starting byte of the new range 206 | * @end: The ending byte of the new range 207 | * 208 | * This does not modify the data inside the existing range in any way - It 209 | * simply shrinks the boundaries of the range. 210 | * 211 | * Theoretically, with a little tweaking, this could eventually be changed 212 | * to range_resize, and expand the lru_count if the new range is larger. 213 | */ 214 | static inline void range_shrink(struct ashmem_range *range, 215 | size_t start, size_t end) 216 | { 217 | size_t pre = range_size(range); 218 | 219 | range->pgstart = start; 220 | range->pgend = end; 221 | 222 | if (range_on_lru(range)) 223 | lru_count -= pre - range_size(range); 224 | } 225 | 226 | /** 227 | * ashmem_open() - Opens an Anonymous Shared Memory structure 228 | * @inode: The backing file's index node(?) 229 | * @file: The backing file 230 | * 231 | * Please note that the ashmem_area is not returned by this function - It is 232 | * instead written to "file->private_data". 233 | * 234 | * Return: 0 if successful, or another code if unsuccessful. 235 | */ 236 | static int ashmem_open(struct inode *inode, struct file *file) 237 | { 238 | struct ashmem_area *asma; 239 | int ret; 240 | 241 | ret = generic_file_open(inode, file); 242 | if (unlikely(ret)) 243 | return ret; 244 | 245 | asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); 246 | if (unlikely(!asma)) 247 | return -ENOMEM; 248 | 249 | INIT_LIST_HEAD(&asma->unpinned_list); 250 | memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); 251 | asma->prot_mask = PROT_MASK; 252 | file->private_data = asma; 253 | 254 | return 0; 255 | } 256 | 257 | /** 258 | * ashmem_release() - Releases an Anonymous Shared Memory structure 259 | * @ignored: The backing file's Index Node(?) - It is ignored here. 260 | * @file: The backing file 261 | * 262 | * Return: 0 if successful. If it is anything else, go have a coffee and 263 | * try again. 264 | */ 265 | static int ashmem_release(struct inode *ignored, struct file *file) 266 | { 267 | struct ashmem_area *asma = file->private_data; 268 | struct ashmem_range *range, *next; 269 | 270 | mutex_lock(&ashmem_mutex); 271 | list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) 272 | range_del(range); 273 | mutex_unlock(&ashmem_mutex); 274 | 275 | if (asma->file) 276 | fput(asma->file); 277 | kmem_cache_free(ashmem_area_cachep, asma); 278 | 279 | return 0; 280 | } 281 | 282 | /** 283 | * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file 284 | * @file: The associated backing file. 285 | * @buf: The buffer of data being written to 286 | * @len: The number of bytes being read 287 | * @pos: The position of the first byte to read. 288 | * 289 | * Return: 0 if successful, or another return code if not. 290 | */ 291 | static ssize_t ashmem_read(struct file *file, char __user *buf, 292 | size_t len, loff_t *pos) 293 | { 294 | struct ashmem_area *asma = file->private_data; 295 | int ret = 0; 296 | 297 | mutex_lock(&ashmem_mutex); 298 | 299 | /* If size is not set, or set to 0, always return EOF. */ 300 | if (asma->size == 0) 301 | goto out_unlock; 302 | 303 | if (!asma->file) { 304 | ret = -EBADF; 305 | goto out_unlock; 306 | } 307 | 308 | mutex_unlock(&ashmem_mutex); 309 | 310 | /* 311 | * asma and asma->file are used outside the lock here. We assume 312 | * once asma->file is set it will never be changed, and will not 313 | * be destroyed until all references to the file are dropped and 314 | * ashmem_release is called. 315 | * 316 | * kernel_read supersedes vfs_read from kernel version 3.9 317 | */ 318 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) 319 | ret = __vfs_read(asma->file, buf, len, pos); 320 | #else 321 | ret = kernel_read(asma->file, buf, len, pos); 322 | #endif 323 | if (ret >= 0) 324 | /** Update backing file pos, since f_ops->read() doesn't */ 325 | asma->file->f_pos = *pos; 326 | return ret; 327 | 328 | out_unlock: 329 | mutex_unlock(&ashmem_mutex); 330 | return ret; 331 | } 332 | 333 | static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) 334 | { 335 | struct ashmem_area *asma = file->private_data; 336 | int ret; 337 | 338 | mutex_lock(&ashmem_mutex); 339 | 340 | if (asma->size == 0) { 341 | ret = -EINVAL; 342 | goto out; 343 | } 344 | 345 | if (!asma->file) { 346 | ret = -EBADF; 347 | goto out; 348 | } 349 | 350 | ret = vfs_llseek(asma->file, offset, origin); 351 | if (ret < 0) 352 | goto out; 353 | 354 | /** Copy f_pos from backing file, since f_ops->llseek() sets it */ 355 | file->f_pos = asma->file->f_pos; 356 | 357 | out: 358 | mutex_unlock(&ashmem_mutex); 359 | return ret; 360 | } 361 | 362 | static inline vm_flags_t calc_vm_may_flags(unsigned long prot) 363 | { 364 | return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) | 365 | _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | 366 | _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); 367 | } 368 | 369 | static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) 370 | { 371 | struct ashmem_area *asma = file->private_data; 372 | int ret = 0; 373 | 374 | mutex_lock(&ashmem_mutex); 375 | 376 | /* user needs to SET_SIZE before mapping */ 377 | if (unlikely(!asma->size)) { 378 | ret = -EINVAL; 379 | goto out; 380 | } 381 | 382 | /* requested protection bits must match our allowed protection mask */ 383 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) || defined(VZKERNEL) 384 | if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & 385 | calc_vm_prot_bits(PROT_MASK, 0))) { 386 | #else 387 | if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & 388 | calc_vm_prot_bits(PROT_MASK))) { 389 | #endif 390 | ret = -EPERM; 391 | goto out; 392 | } 393 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 3, 0) 394 | vm_flags_clear(vma, calc_vm_may_flags(~asma->prot_mask)); 395 | #else 396 | vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); 397 | #endif 398 | 399 | if (!asma->file) { 400 | char *name = ASHMEM_NAME_DEF; 401 | struct file *vmfile; 402 | 403 | if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') 404 | name = asma->name; 405 | 406 | /* ... and allocate the backing shmem file */ 407 | vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); 408 | if (IS_ERR(vmfile)) { 409 | ret = PTR_ERR(vmfile); 410 | goto out; 411 | } 412 | asma->file = vmfile; 413 | } 414 | get_file(asma->file); 415 | 416 | /* 417 | * XXX - Reworked to use shmem_zero_setup() instead of 418 | * shmem_set_file while we're in staging. -jstultz 419 | */ 420 | if (vma->vm_flags & VM_SHARED) { 421 | ret = shmem_zero_setup(vma); 422 | if (ret) { 423 | fput(asma->file); 424 | goto out; 425 | } 426 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) 427 | } else { 428 | vma_set_anonymous(vma); 429 | } 430 | #else 431 | } 432 | #endif 433 | 434 | if (vma->vm_file) 435 | fput(vma->vm_file); 436 | vma->vm_file = asma->file; 437 | 438 | out: 439 | mutex_unlock(&ashmem_mutex); 440 | return ret; 441 | } 442 | 443 | /* 444 | * ashmem_shrink - our cache shrinker, called from mm/vmscan.c 445 | * 446 | * 'nr_to_scan' is the number of objects to scan for freeing. 447 | * 448 | * 'gfp_mask' is the mask of the allocation that got us into this mess. 449 | * 450 | * Return value is the number of objects freed or -1 if we cannot 451 | * proceed without risk of deadlock (due to gfp_mask). 452 | * 453 | * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial 454 | * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' 455 | * pages freed. 456 | */ 457 | static unsigned long 458 | ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 459 | { 460 | struct ashmem_range *range, *next; 461 | unsigned long freed = 0; 462 | 463 | /* We might recurse into filesystem code, so bail out if necessary */ 464 | if (!(sc->gfp_mask & __GFP_FS)) 465 | return SHRINK_STOP; 466 | 467 | mutex_lock(&ashmem_mutex); 468 | list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { 469 | loff_t start = range->pgstart * PAGE_SIZE; 470 | loff_t end = (range->pgend + 1) * PAGE_SIZE; 471 | 472 | vfs_fallocate(range->asma->file, 473 | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 474 | start, end - start); 475 | range->purged = ASHMEM_WAS_PURGED; 476 | lru_del(range); 477 | 478 | freed += range_size(range); 479 | if (--sc->nr_to_scan <= 0) 480 | break; 481 | } 482 | mutex_unlock(&ashmem_mutex); 483 | return freed; 484 | } 485 | 486 | static unsigned long 487 | ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 488 | { 489 | /* 490 | * note that lru_count is count of pages on the lru, not a count of 491 | * objects on the list. This means the scan function needs to return the 492 | * number of pages freed, not the number of objects scanned. 493 | */ 494 | return lru_count; 495 | } 496 | 497 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) 498 | static struct shrinker *ashmem_shrinker; 499 | #else 500 | static struct shrinker ashmem_shrinker = { 501 | .count_objects = ashmem_shrink_count, 502 | .scan_objects = ashmem_shrink_scan, 503 | /* 504 | * XXX (dchinner): I wish people would comment on why they need on 505 | * significant changes to the default value here 506 | */ 507 | .seeks = DEFAULT_SEEKS * 4, 508 | }; 509 | #endif 510 | 511 | static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) 512 | { 513 | int ret = 0; 514 | 515 | mutex_lock(&ashmem_mutex); 516 | 517 | /* the user can only remove, not add, protection bits */ 518 | if (unlikely((asma->prot_mask & prot) != prot)) { 519 | ret = -EINVAL; 520 | goto out; 521 | } 522 | 523 | /* does the application expect PROT_READ to imply PROT_EXEC? */ 524 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 525 | prot |= PROT_EXEC; 526 | 527 | asma->prot_mask = prot; 528 | 529 | out: 530 | mutex_unlock(&ashmem_mutex); 531 | return ret; 532 | } 533 | 534 | static int set_name(struct ashmem_area *asma, void __user *name) 535 | { 536 | int len; 537 | int ret = 0; 538 | char local_name[ASHMEM_NAME_LEN]; 539 | 540 | /* 541 | * Holding the ashmem_mutex while doing a copy_from_user might cause 542 | * an data abort which would try to access mmap_sem. If another 543 | * thread has invoked ashmem_mmap then it will be holding the 544 | * semaphore and will be waiting for ashmem_mutex, there by leading to 545 | * deadlock. We'll release the mutex and take the name to a local 546 | * variable that does not need protection and later copy the local 547 | * variable to the structure member with lock held. 548 | */ 549 | len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); 550 | if (len < 0) 551 | return len; 552 | if (len == ASHMEM_NAME_LEN) 553 | local_name[ASHMEM_NAME_LEN - 1] = '\0'; 554 | mutex_lock(&ashmem_mutex); 555 | /* cannot change an existing mapping's name */ 556 | if (unlikely(asma->file)) 557 | ret = -EINVAL; 558 | else 559 | strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); 560 | 561 | mutex_unlock(&ashmem_mutex); 562 | return ret; 563 | } 564 | 565 | static int get_name(struct ashmem_area *asma, void __user *name) 566 | { 567 | int ret = 0; 568 | size_t len; 569 | /* 570 | * Have a local variable to which we'll copy the content 571 | * from asma with the lock held. Later we can copy this to the user 572 | * space safely without holding any locks. So even if we proceed to 573 | * wait for mmap_sem, it won't lead to deadlock. 574 | */ 575 | char local_name[ASHMEM_NAME_LEN]; 576 | 577 | mutex_lock(&ashmem_mutex); 578 | if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { 579 | /* 580 | * Copying only `len', instead of ASHMEM_NAME_LEN, bytes 581 | * prevents us from revealing one user's stack to another. 582 | */ 583 | len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; 584 | memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len); 585 | } else { 586 | len = sizeof(ASHMEM_NAME_DEF); 587 | memcpy(local_name, ASHMEM_NAME_DEF, len); 588 | } 589 | mutex_unlock(&ashmem_mutex); 590 | 591 | /* 592 | * Now we are just copying from the stack variable to userland 593 | * No lock held 594 | */ 595 | if (unlikely(copy_to_user(name, local_name, len))) 596 | ret = -EFAULT; 597 | return ret; 598 | } 599 | 600 | /* 601 | * ashmem_pin - pin the given ashmem region, returning whether it was 602 | * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). 603 | * 604 | * Caller must hold ashmem_mutex. 605 | */ 606 | static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) 607 | { 608 | struct ashmem_range *range, *next; 609 | int ret = ASHMEM_NOT_PURGED; 610 | 611 | list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 612 | /* moved past last applicable page; we can short circuit */ 613 | if (range_before_page(range, pgstart)) 614 | break; 615 | 616 | /* 617 | * The user can ask us to pin pages that span multiple ranges, 618 | * or to pin pages that aren't even unpinned, so this is messy. 619 | * 620 | * Four cases: 621 | * 1. The requested range subsumes an existing range, so we 622 | * just remove the entire matching range. 623 | * 2. The requested range overlaps the start of an existing 624 | * range, so we just update that range. 625 | * 3. The requested range overlaps the end of an existing 626 | * range, so we just update that range. 627 | * 4. The requested range punches a hole in an existing range, 628 | * so we have to update one side of the range and then 629 | * create a new range for the other side. 630 | */ 631 | if (page_range_in_range(range, pgstart, pgend)) { 632 | ret |= range->purged; 633 | 634 | /* Case #1: Easy. Just nuke the whole thing. */ 635 | if (page_range_subsumes_range(range, pgstart, pgend)) { 636 | range_del(range); 637 | continue; 638 | } 639 | 640 | /* Case #2: We overlap from the start, so adjust it */ 641 | if (range->pgstart >= pgstart) { 642 | range_shrink(range, pgend + 1, range->pgend); 643 | continue; 644 | } 645 | 646 | /* Case #3: We overlap from the rear, so adjust it */ 647 | if (range->pgend <= pgend) { 648 | range_shrink(range, range->pgstart, 649 | pgstart - 1); 650 | continue; 651 | } 652 | 653 | /* 654 | * Case #4: We eat a chunk out of the middle. A bit 655 | * more complicated, we allocate a new range for the 656 | * second half and adjust the first chunk's endpoint. 657 | */ 658 | range_alloc(asma, range, range->purged, 659 | pgend + 1, range->pgend); 660 | range_shrink(range, range->pgstart, pgstart - 1); 661 | break; 662 | } 663 | } 664 | 665 | return ret; 666 | } 667 | 668 | /* 669 | * ashmem_unpin - unpin the given range of pages. Returns zero on success. 670 | * 671 | * Caller must hold ashmem_mutex. 672 | */ 673 | static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) 674 | { 675 | struct ashmem_range *range, *next; 676 | unsigned int purged = ASHMEM_NOT_PURGED; 677 | 678 | restart: 679 | list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 680 | /* short circuit: this is our insertion point */ 681 | if (range_before_page(range, pgstart)) 682 | break; 683 | 684 | /* 685 | * The user can ask us to unpin pages that are already entirely 686 | * or partially pinned. We handle those two cases here. 687 | */ 688 | if (page_range_subsumed_by_range(range, pgstart, pgend)) 689 | return 0; 690 | if (page_range_in_range(range, pgstart, pgend)) { 691 | pgstart = min_t(size_t, range->pgstart, pgstart); 692 | pgend = max_t(size_t, range->pgend, pgend); 693 | purged |= range->purged; 694 | range_del(range); 695 | goto restart; 696 | } 697 | } 698 | 699 | return range_alloc(asma, range, purged, pgstart, pgend); 700 | } 701 | 702 | /* 703 | * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the 704 | * given interval are unpinned and ASHMEM_IS_PINNED otherwise. 705 | * 706 | * Caller must hold ashmem_mutex. 707 | */ 708 | static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, 709 | size_t pgend) 710 | { 711 | struct ashmem_range *range; 712 | int ret = ASHMEM_IS_PINNED; 713 | 714 | list_for_each_entry(range, &asma->unpinned_list, unpinned) { 715 | if (range_before_page(range, pgstart)) 716 | break; 717 | if (page_range_in_range(range, pgstart, pgend)) { 718 | ret = ASHMEM_IS_UNPINNED; 719 | break; 720 | } 721 | } 722 | 723 | return ret; 724 | } 725 | 726 | static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, 727 | void __user *p) 728 | { 729 | struct ashmem_pin pin; 730 | size_t pgstart, pgend; 731 | int ret = -EINVAL; 732 | 733 | if (unlikely(!asma->file)) 734 | return -EINVAL; 735 | 736 | if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) 737 | return -EFAULT; 738 | 739 | /* per custom, you can pass zero for len to mean "everything onward" */ 740 | if (!pin.len) 741 | pin.len = PAGE_ALIGN(asma->size) - pin.offset; 742 | 743 | if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) 744 | return -EINVAL; 745 | 746 | if (unlikely(((__u32)-1) - pin.offset < pin.len)) 747 | return -EINVAL; 748 | 749 | if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) 750 | return -EINVAL; 751 | 752 | pgstart = pin.offset / PAGE_SIZE; 753 | pgend = pgstart + (pin.len / PAGE_SIZE) - 1; 754 | 755 | mutex_lock(&ashmem_mutex); 756 | 757 | switch (cmd) { 758 | case ASHMEM_PIN: 759 | ret = ashmem_pin(asma, pgstart, pgend); 760 | break; 761 | case ASHMEM_UNPIN: 762 | ret = ashmem_unpin(asma, pgstart, pgend); 763 | break; 764 | case ASHMEM_GET_PIN_STATUS: 765 | ret = ashmem_get_pin_status(asma, pgstart, pgend); 766 | break; 767 | } 768 | 769 | mutex_unlock(&ashmem_mutex); 770 | 771 | return ret; 772 | } 773 | 774 | static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 775 | { 776 | struct ashmem_area *asma = file->private_data; 777 | long ret = -ENOTTY; 778 | 779 | switch (cmd) { 780 | case ASHMEM_SET_NAME: 781 | ret = set_name(asma, (void __user *)arg); 782 | break; 783 | case ASHMEM_GET_NAME: 784 | ret = get_name(asma, (void __user *)arg); 785 | break; 786 | case ASHMEM_SET_SIZE: 787 | ret = -EINVAL; 788 | if (!asma->file) { 789 | ret = 0; 790 | asma->size = (size_t)arg; 791 | } 792 | break; 793 | case ASHMEM_GET_SIZE: 794 | ret = asma->size; 795 | break; 796 | case ASHMEM_SET_PROT_MASK: 797 | ret = set_prot_mask(asma, arg); 798 | break; 799 | case ASHMEM_GET_PROT_MASK: 800 | ret = asma->prot_mask; 801 | break; 802 | case ASHMEM_PIN: 803 | case ASHMEM_UNPIN: 804 | case ASHMEM_GET_PIN_STATUS: 805 | ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg); 806 | break; 807 | case ASHMEM_PURGE_ALL_CACHES: 808 | ret = -EPERM; 809 | if (capable(CAP_SYS_ADMIN)) { 810 | struct shrink_control sc = { 811 | .gfp_mask = GFP_KERNEL, 812 | .nr_to_scan = LONG_MAX, 813 | }; 814 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) 815 | ret = ashmem_shrink_count(ashmem_shrinker, &sc); 816 | ashmem_shrink_scan(ashmem_shrinker, &sc); 817 | #else 818 | ret = ashmem_shrink_count(&ashmem_shrinker, &sc); 819 | ashmem_shrink_scan(&ashmem_shrinker, &sc); 820 | #endif 821 | } 822 | break; 823 | } 824 | 825 | return ret; 826 | } 827 | 828 | /* support of 32bit userspace on 64bit platforms */ 829 | #ifdef CONFIG_COMPAT 830 | static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, 831 | unsigned long arg) 832 | { 833 | switch (cmd) { 834 | case COMPAT_ASHMEM_SET_SIZE: 835 | cmd = ASHMEM_SET_SIZE; 836 | break; 837 | case COMPAT_ASHMEM_SET_PROT_MASK: 838 | cmd = ASHMEM_SET_PROT_MASK; 839 | break; 840 | } 841 | return ashmem_ioctl(file, cmd, arg); 842 | } 843 | #endif 844 | 845 | static const struct file_operations ashmem_fops = { 846 | .owner = THIS_MODULE, 847 | .open = ashmem_open, 848 | .release = ashmem_release, 849 | .read = ashmem_read, 850 | .llseek = ashmem_llseek, 851 | .mmap = ashmem_mmap, 852 | .unlocked_ioctl = ashmem_ioctl, 853 | #ifdef CONFIG_COMPAT 854 | .compat_ioctl = compat_ashmem_ioctl, 855 | #endif 856 | }; 857 | 858 | static struct miscdevice ashmem_misc = { 859 | .minor = MISC_DYNAMIC_MINOR, 860 | .name = "ashmem", 861 | .fops = &ashmem_fops, 862 | }; 863 | 864 | static int __init ashmem_init(void) 865 | { 866 | int ret; 867 | 868 | ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", 869 | sizeof(struct ashmem_area), 870 | 0, 0, NULL); 871 | if (unlikely(!ashmem_area_cachep)) { 872 | pr_err("failed to create slab cache\n"); 873 | return -ENOMEM; 874 | } 875 | 876 | ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", 877 | sizeof(struct ashmem_range), 878 | 0, 0, NULL); 879 | if (unlikely(!ashmem_range_cachep)) { 880 | pr_err("failed to create slab cache\n"); 881 | return -ENOMEM; 882 | } 883 | 884 | ret = misc_register(&ashmem_misc); 885 | if (unlikely(ret)) { 886 | pr_err("failed to register misc device!\n"); 887 | return ret; 888 | } 889 | 890 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) 891 | ashmem_shrinker = shrinker_alloc(0, "android-ashmem"); 892 | if (ashmem_shrinker) { 893 | ashmem_shrinker->count_objects = ashmem_shrink_count; 894 | ashmem_shrinker->scan_objects = ashmem_shrink_scan; 895 | ashmem_shrinker->seeks = DEFAULT_SEEKS * 4; 896 | shrinker_register(ashmem_shrinker); 897 | } else { 898 | return -ENOMEM; 899 | } 900 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(6,0,0)) 901 | register_shrinker(&ashmem_shrinker, "android-ashmem"); 902 | #else 903 | register_shrinker(&ashmem_shrinker); 904 | #endif 905 | 906 | return 0; 907 | } 908 | 909 | static void __exit ashmem_exit(void) 910 | { 911 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) 912 | shrinker_free(ashmem_shrinker); 913 | #else 914 | unregister_shrinker(&ashmem_shrinker); 915 | #endif 916 | 917 | misc_deregister(&ashmem_misc); 918 | 919 | kmem_cache_destroy(ashmem_range_cachep); 920 | kmem_cache_destroy(ashmem_area_cachep); 921 | } 922 | 923 | module_init(ashmem_init); 924 | module_exit(ashmem_exit); 925 | 926 | MODULE_LICENSE("GPL"); 927 | -------------------------------------------------------------------------------- /ashmem/ashmem.h: -------------------------------------------------------------------------------- 1 | /* 2 | * include/linux/ashmem.h 3 | * 4 | * Copyright 2008 Google Inc. 5 | * Author: Robert Love 6 | * 7 | * This file is dual licensed. It may be redistributed and/or modified 8 | * under the terms of the Apache 2.0 License OR version 2 of the GNU 9 | * General Public License. 10 | */ 11 | 12 | #ifndef _LINUX_ASHMEM_H 13 | #define _LINUX_ASHMEM_H 14 | 15 | #include 16 | #include 17 | #include 18 | 19 | #include "uapi/ashmem.h" 20 | 21 | /* support of 32bit userspace on 64bit platforms */ 22 | #ifdef CONFIG_COMPAT 23 | #define COMPAT_ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, compat_size_t) 24 | #define COMPAT_ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned int) 25 | #endif 26 | 27 | #endif /* _LINUX_ASHMEM_H */ 28 | -------------------------------------------------------------------------------- /ashmem/deps.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) 7 | 8 | #ifndef CONFIG_KPROBES 9 | # error "Your kernel does not support KProbes, but this is required to compile ashmem as a kernel module on kernel 5.7 and later" 10 | #endif 11 | 12 | typedef unsigned long (*kallsyms_lookup_name_t)(const char *name); 13 | 14 | static int dummy_kprobe_handler(struct kprobe *p, struct pt_regs *regs) 15 | { 16 | return 0; 17 | } 18 | 19 | static kallsyms_lookup_name_t get_kallsyms_lookup_name_ptr(void) 20 | { 21 | struct kprobe probe; 22 | int ret; 23 | kallsyms_lookup_name_t addr; 24 | 25 | memset(&probe, 0, sizeof(probe)); 26 | probe.pre_handler = dummy_kprobe_handler; 27 | probe.symbol_name = "kallsyms_lookup_name"; 28 | ret = register_kprobe(&probe); 29 | if (ret) 30 | return NULL; 31 | addr = (kallsyms_lookup_name_t) probe.addr; 32 | unregister_kprobe(&probe); 33 | 34 | return addr; 35 | } 36 | #endif 37 | 38 | /* 39 | * On kernel 5.7 and later, kallsyms_lookup_name() can no longer be called from a kernel 40 | * module for reasons described here: https://lwn.net/Articles/813350/ 41 | * As ashmem really needs to use kallsysms_lookup_name() to access some kernel 42 | * functions that otherwise wouldn't be accessible, KProbes are used on later 43 | * kernels to get the address of kallsysms_lookup_name(). The function is 44 | * afterwards used just as before. This is a very dirty hack though and the much 45 | * better solution would be if all the functions that are currently resolved 46 | * with kallsysms_lookup_name() would get an EXPORT_SYMBOL() annotation to 47 | * make them directly accessible to kernel modules. 48 | */ 49 | static unsigned long kallsyms_lookup_name_wrapper(const char *name) 50 | { 51 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) 52 | static kallsyms_lookup_name_t func_ptr = NULL; 53 | if (!func_ptr) 54 | func_ptr = get_kallsyms_lookup_name_ptr(); 55 | 56 | return func_ptr(name); 57 | #else 58 | return kallsyms_lookup_name(name); 59 | #endif 60 | } 61 | 62 | static int (*shmem_zero_setup_ptr)(struct vm_area_struct *) = NULL; 63 | 64 | int shmem_zero_setup(struct vm_area_struct *vma) 65 | { 66 | if (!shmem_zero_setup_ptr) 67 | shmem_zero_setup_ptr = kallsyms_lookup_name_wrapper("shmem_zero_setup"); 68 | return shmem_zero_setup_ptr(vma); 69 | } 70 | -------------------------------------------------------------------------------- /ashmem/dkms.conf: -------------------------------------------------------------------------------- 1 | PACKAGE_NAME="anbox-ashmem" 2 | PACKAGE_VERSION="1" 3 | CLEAN="make clean" 4 | MAKE[0]="make all KERNEL_SRC=/lib/modules/$kernelver/build" 5 | BUILT_MODULE_NAME[0]="ashmem_linux" 6 | DEST_MODULE_LOCATION[0]="/updates" 7 | AUTOINSTALL="yes" 8 | -------------------------------------------------------------------------------- /ashmem/uapi/ashmem.h: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/uapi/ashmem.h 3 | * 4 | * Copyright 2008 Google Inc. 5 | * Author: Robert Love 6 | * 7 | * This file is dual licensed. It may be redistributed and/or modified 8 | * under the terms of the Apache 2.0 License OR version 2 of the GNU 9 | * General Public License. 10 | */ 11 | 12 | #ifndef _UAPI_LINUX_ASHMEM_H 13 | #define _UAPI_LINUX_ASHMEM_H 14 | 15 | #include 16 | 17 | #define ASHMEM_NAME_LEN 256 18 | 19 | #define ASHMEM_NAME_DEF "dev/ashmem" 20 | 21 | /* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */ 22 | #define ASHMEM_NOT_PURGED 0 23 | #define ASHMEM_WAS_PURGED 1 24 | 25 | /* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */ 26 | #define ASHMEM_IS_UNPINNED 0 27 | #define ASHMEM_IS_PINNED 1 28 | 29 | struct ashmem_pin { 30 | __u32 offset; /* offset into region, in bytes, page-aligned */ 31 | __u32 len; /* length forward from offset, in bytes, page-aligned */ 32 | }; 33 | 34 | #define __ASHMEMIOC 0x77 35 | 36 | #define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN]) 37 | #define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN]) 38 | #define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t) 39 | #define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4) 40 | #define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long) 41 | #define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6) 42 | #define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin) 43 | #define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin) 44 | #define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9) 45 | #define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10) 46 | 47 | #endif /* _UAPI_LINUX_ASHMEM_H */ 48 | -------------------------------------------------------------------------------- /binder/Makefile: -------------------------------------------------------------------------------- 1 | ccflags-y += -I$(src) -Wno-int-conversion -DCONFIG_ANDROID_BINDER_DEVICES="\"binder\"" -DCONFIG_ANDROID_BINDERFS="y" 2 | obj-m := binder_linux.o 3 | binder_linux-y := deps.o binder.o binder_alloc.o binderfs.o 4 | 5 | KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build 6 | 7 | all: 8 | $(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD 9 | 10 | install: 11 | cp binder_linux.ko $(DESTDIR)/ 12 | 13 | clean: 14 | rm -rf *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions 15 | -------------------------------------------------------------------------------- /binder/binder.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 | /* 3 | * Copyright (C) 2008 Google, Inc. 4 | * 5 | * Based on, but no longer compatible with, the original 6 | * OpenBinder.org binder driver interface, which is: 7 | * 8 | * Copyright (c) 2005 Palmsource, Inc. 9 | * 10 | * This software is licensed under the terms of the GNU General Public 11 | * License version 2, as published by the Free Software Foundation, and 12 | * may be copied, distributed, and modified under those terms. 13 | * 14 | * This program is distributed in the hope that it will be useful, 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | * GNU General Public License for more details. 18 | * 19 | */ 20 | 21 | #ifndef _UAPI_LINUX_BINDER_H 22 | #define _UAPI_LINUX_BINDER_H 23 | 24 | #define BINDER_IPC_32BIT 1 25 | 26 | #include 27 | #include 28 | 29 | #define B_PACK_CHARS(c1, c2, c3, c4) \ 30 | ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) 31 | #define B_TYPE_LARGE 0x85 32 | 33 | enum { 34 | BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), 35 | BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), 36 | BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), 37 | BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), 38 | BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), 39 | BINDER_TYPE_FDA = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE), 40 | BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE), 41 | }; 42 | 43 | enum { 44 | FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, 45 | FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, 46 | 47 | /** 48 | * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts 49 | * 50 | * Only when set, causes senders to include their security 51 | * context 52 | */ 53 | FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000, 54 | }; 55 | 56 | #ifdef BINDER_IPC_32BIT 57 | typedef __u32 binder_size_t; 58 | typedef __u32 binder_uintptr_t; 59 | #else 60 | typedef __u64 binder_size_t; 61 | typedef __u64 binder_uintptr_t; 62 | #endif 63 | 64 | /** 65 | * struct binder_object_header - header shared by all binder metadata objects. 66 | * @type: type of the object 67 | */ 68 | struct binder_object_header { 69 | __u32 type; 70 | }; 71 | 72 | /* 73 | * This is the flattened representation of a Binder object for transfer 74 | * between processes. The 'offsets' supplied as part of a binder transaction 75 | * contains offsets into the data where these structures occur. The Binder 76 | * driver takes care of re-writing the structure type and data as it moves 77 | * between processes. 78 | */ 79 | struct flat_binder_object { 80 | struct binder_object_header hdr; 81 | __u32 flags; 82 | 83 | /* 8 bytes of data. */ 84 | union { 85 | binder_uintptr_t binder; /* local object */ 86 | __u32 handle; /* remote object */ 87 | }; 88 | 89 | /* extra data associated with local object */ 90 | binder_uintptr_t cookie; 91 | }; 92 | 93 | /** 94 | * struct binder_fd_object - describes a filedescriptor to be fixed up. 95 | * @hdr: common header structure 96 | * @pad_flags: padding to remain compatible with old userspace code 97 | * @pad_binder: padding to remain compatible with old userspace code 98 | * @fd: file descriptor 99 | * @cookie: opaque data, used by user-space 100 | */ 101 | struct binder_fd_object { 102 | struct binder_object_header hdr; 103 | __u32 pad_flags; 104 | union { 105 | binder_uintptr_t pad_binder; 106 | __u32 fd; 107 | }; 108 | 109 | binder_uintptr_t cookie; 110 | }; 111 | 112 | /* struct binder_buffer_object - object describing a userspace buffer 113 | * @hdr: common header structure 114 | * @flags: one or more BINDER_BUFFER_* flags 115 | * @buffer: address of the buffer 116 | * @length: length of the buffer 117 | * @parent: index in offset array pointing to parent buffer 118 | * @parent_offset: offset in @parent pointing to this buffer 119 | * 120 | * A binder_buffer object represents an object that the 121 | * binder kernel driver can copy verbatim to the target 122 | * address space. A buffer itself may be pointed to from 123 | * within another buffer, meaning that the pointer inside 124 | * that other buffer needs to be fixed up as well. This 125 | * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT 126 | * flag in @flags, by setting @parent buffer to the index 127 | * in the offset array pointing to the parent binder_buffer_object, 128 | * and by setting @parent_offset to the offset in the parent buffer 129 | * at which the pointer to this buffer is located. 130 | */ 131 | struct binder_buffer_object { 132 | struct binder_object_header hdr; 133 | __u32 flags; 134 | binder_uintptr_t buffer; 135 | binder_size_t length; 136 | binder_size_t parent; 137 | binder_size_t parent_offset; 138 | }; 139 | 140 | enum { 141 | BINDER_BUFFER_FLAG_HAS_PARENT = 0x01, 142 | }; 143 | 144 | /* struct binder_fd_array_object - object describing an array of fds in a buffer 145 | * @hdr: common header structure 146 | * @pad: padding to ensure correct alignment 147 | * @num_fds: number of file descriptors in the buffer 148 | * @parent: index in offset array to buffer holding the fd array 149 | * @parent_offset: start offset of fd array in the buffer 150 | * 151 | * A binder_fd_array object represents an array of file 152 | * descriptors embedded in a binder_buffer_object. It is 153 | * different from a regular binder_buffer_object because it 154 | * describes a list of file descriptors to fix up, not an opaque 155 | * blob of memory, and hence the kernel needs to treat it differently. 156 | * 157 | * An example of how this would be used is with Android's 158 | * native_handle_t object, which is a struct with a list of integers 159 | * and a list of file descriptors. The native_handle_t struct itself 160 | * will be represented by a struct binder_buffer_objct, whereas the 161 | * embedded list of file descriptors is represented by a 162 | * struct binder_fd_array_object with that binder_buffer_object as 163 | * a parent. 164 | */ 165 | struct binder_fd_array_object { 166 | struct binder_object_header hdr; 167 | __u32 pad; 168 | binder_size_t num_fds; 169 | binder_size_t parent; 170 | binder_size_t parent_offset; 171 | }; 172 | 173 | /* 174 | * On 64-bit platforms where user code may run in 32-bits the driver must 175 | * translate the buffer (and local binder) addresses appropriately. 176 | */ 177 | 178 | struct binder_write_read { 179 | binder_size_t write_size; /* bytes to write */ 180 | binder_size_t write_consumed; /* bytes consumed by driver */ 181 | binder_uintptr_t write_buffer; 182 | binder_size_t read_size; /* bytes to read */ 183 | binder_size_t read_consumed; /* bytes consumed by driver */ 184 | binder_uintptr_t read_buffer; 185 | }; 186 | 187 | /* Use with BINDER_VERSION, driver fills in fields. */ 188 | struct binder_version { 189 | /* driver protocol version -- increment with incompatible change */ 190 | __s32 protocol_version; 191 | }; 192 | 193 | /* This is the current protocol version. */ 194 | #ifdef BINDER_IPC_32BIT 195 | #define BINDER_CURRENT_PROTOCOL_VERSION 7 196 | #else 197 | #define BINDER_CURRENT_PROTOCOL_VERSION 8 198 | #endif 199 | 200 | /* 201 | * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields. 202 | * Set ptr to NULL for the first call to get the info for the first node, and 203 | * then repeat the call passing the previously returned value to get the next 204 | * nodes. ptr will be 0 when there are no more nodes. 205 | */ 206 | struct binder_node_debug_info { 207 | binder_uintptr_t ptr; 208 | binder_uintptr_t cookie; 209 | __u32 has_strong_ref; 210 | __u32 has_weak_ref; 211 | }; 212 | 213 | struct binder_node_info_for_ref { 214 | __u32 handle; 215 | __u32 strong_count; 216 | __u32 weak_count; 217 | __u32 reserved1; 218 | __u32 reserved2; 219 | __u32 reserved3; 220 | }; 221 | 222 | #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) 223 | #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) 224 | #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) 225 | #define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) 226 | #define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) 227 | #define BINDER_THREAD_EXIT _IOW('b', 8, __s32) 228 | #define BINDER_VERSION _IOWR('b', 9, struct binder_version) 229 | #define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) 230 | #define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref) 231 | #define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object) 232 | 233 | /* 234 | * NOTE: Two special error codes you should check for when calling 235 | * in to the driver are: 236 | * 237 | * EINTR -- The operation has been interupted. This should be 238 | * handled by retrying the ioctl() until a different error code 239 | * is returned. 240 | * 241 | * ECONNREFUSED -- The driver is no longer accepting operations 242 | * from your process. That is, the process is being destroyed. 243 | * You should handle this by exiting from your process. Note 244 | * that once this error code is returned, all further calls to 245 | * the driver from any thread will return this same code. 246 | */ 247 | 248 | enum transaction_flags { 249 | TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ 250 | TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ 251 | TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ 252 | TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ 253 | TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */ 254 | }; 255 | 256 | struct binder_transaction_data { 257 | /* The first two are only used for bcTRANSACTION and brTRANSACTION, 258 | * identifying the target and contents of the transaction. 259 | */ 260 | union { 261 | /* target descriptor of command transaction */ 262 | __u32 handle; 263 | /* target descriptor of return transaction */ 264 | binder_uintptr_t ptr; 265 | } target; 266 | binder_uintptr_t cookie; /* target object cookie */ 267 | __u32 code; /* transaction command */ 268 | 269 | /* General information about the transaction. */ 270 | __u32 flags; 271 | pid_t sender_pid; 272 | uid_t sender_euid; 273 | binder_size_t data_size; /* number of bytes of data */ 274 | binder_size_t offsets_size; /* number of bytes of offsets */ 275 | 276 | /* If this transaction is inline, the data immediately 277 | * follows here; otherwise, it ends with a pointer to 278 | * the data buffer. 279 | */ 280 | union { 281 | struct { 282 | /* transaction data */ 283 | binder_uintptr_t buffer; 284 | /* offsets from buffer to flat_binder_object structs */ 285 | binder_uintptr_t offsets; 286 | } ptr; 287 | __u8 buf[8]; 288 | } data; 289 | }; 290 | 291 | struct binder_transaction_data_secctx { 292 | struct binder_transaction_data transaction_data; 293 | binder_uintptr_t secctx; 294 | }; 295 | 296 | struct binder_transaction_data_sg { 297 | struct binder_transaction_data transaction_data; 298 | binder_size_t buffers_size; 299 | }; 300 | 301 | struct binder_ptr_cookie { 302 | binder_uintptr_t ptr; 303 | binder_uintptr_t cookie; 304 | }; 305 | 306 | struct binder_handle_cookie { 307 | __u32 handle; 308 | binder_uintptr_t cookie; 309 | } __packed; 310 | 311 | struct binder_pri_desc { 312 | __s32 priority; 313 | __u32 desc; 314 | }; 315 | 316 | struct binder_pri_ptr_cookie { 317 | __s32 priority; 318 | binder_uintptr_t ptr; 319 | binder_uintptr_t cookie; 320 | }; 321 | 322 | enum binder_driver_return_protocol { 323 | BR_ERROR = _IOR('r', 0, __s32), 324 | /* 325 | * int: error code 326 | */ 327 | 328 | BR_OK = _IO('r', 1), 329 | /* No parameters! */ 330 | 331 | BR_TRANSACTION_SEC_CTX = _IOR('r', 2, 332 | struct binder_transaction_data_secctx), 333 | /* 334 | * binder_transaction_data_secctx: the received command. 335 | */ 336 | BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), 337 | BR_REPLY = _IOR('r', 3, struct binder_transaction_data), 338 | /* 339 | * binder_transaction_data: the received command. 340 | */ 341 | 342 | BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), 343 | /* 344 | * not currently supported 345 | * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. 346 | * Else the remote object has acquired a primary reference. 347 | */ 348 | 349 | BR_DEAD_REPLY = _IO('r', 5), 350 | /* 351 | * The target of the last transaction (either a bcTRANSACTION or 352 | * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. 353 | */ 354 | 355 | BR_TRANSACTION_COMPLETE = _IO('r', 6), 356 | /* 357 | * No parameters... always refers to the last transaction requested 358 | * (including replies). Note that this will be sent even for 359 | * asynchronous transactions. 360 | */ 361 | 362 | BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), 363 | BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), 364 | BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), 365 | BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), 366 | /* 367 | * void *: ptr to binder 368 | * void *: cookie for binder 369 | */ 370 | 371 | BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), 372 | /* 373 | * not currently supported 374 | * int: priority 375 | * void *: ptr to binder 376 | * void *: cookie for binder 377 | */ 378 | 379 | BR_NOOP = _IO('r', 12), 380 | /* 381 | * No parameters. Do nothing and examine the next command. It exists 382 | * primarily so that we can replace it with a BR_SPAWN_LOOPER command. 383 | */ 384 | 385 | BR_SPAWN_LOOPER = _IO('r', 13), 386 | /* 387 | * No parameters. The driver has determined that a process has no 388 | * threads waiting to service incoming transactions. When a process 389 | * receives this command, it must spawn a new service thread and 390 | * register it via bcENTER_LOOPER. 391 | */ 392 | 393 | BR_FINISHED = _IO('r', 14), 394 | /* 395 | * not currently supported 396 | * stop threadpool thread 397 | */ 398 | 399 | BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t), 400 | /* 401 | * void *: cookie 402 | */ 403 | BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t), 404 | /* 405 | * void *: cookie 406 | */ 407 | 408 | BR_FAILED_REPLY = _IO('r', 17), 409 | /* 410 | * The last transaction (either a bcTRANSACTION or 411 | * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. 412 | */ 413 | }; 414 | 415 | enum binder_driver_command_protocol { 416 | BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), 417 | BC_REPLY = _IOW('c', 1, struct binder_transaction_data), 418 | /* 419 | * binder_transaction_data: the sent command. 420 | */ 421 | 422 | BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), 423 | /* 424 | * not currently supported 425 | * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. 426 | * Else you have acquired a primary reference on the object. 427 | */ 428 | 429 | BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t), 430 | /* 431 | * void *: ptr to transaction data received on a read 432 | */ 433 | 434 | BC_INCREFS = _IOW('c', 4, __u32), 435 | BC_ACQUIRE = _IOW('c', 5, __u32), 436 | BC_RELEASE = _IOW('c', 6, __u32), 437 | BC_DECREFS = _IOW('c', 7, __u32), 438 | /* 439 | * int: descriptor 440 | */ 441 | 442 | BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), 443 | BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), 444 | /* 445 | * void *: ptr to binder 446 | * void *: cookie for binder 447 | */ 448 | 449 | BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), 450 | /* 451 | * not currently supported 452 | * int: priority 453 | * int: descriptor 454 | */ 455 | 456 | BC_REGISTER_LOOPER = _IO('c', 11), 457 | /* 458 | * No parameters. 459 | * Register a spawned looper thread with the device. 460 | */ 461 | 462 | BC_ENTER_LOOPER = _IO('c', 12), 463 | BC_EXIT_LOOPER = _IO('c', 13), 464 | /* 465 | * No parameters. 466 | * These two commands are sent as an application-level thread 467 | * enters and exits the binder loop, respectively. They are 468 | * used so the binder can have an accurate count of the number 469 | * of looping threads it has available. 470 | */ 471 | 472 | BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, 473 | struct binder_handle_cookie), 474 | /* 475 | * int: handle 476 | * void *: cookie 477 | */ 478 | 479 | BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, 480 | struct binder_handle_cookie), 481 | /* 482 | * int: handle 483 | * void *: cookie 484 | */ 485 | 486 | BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t), 487 | /* 488 | * void *: cookie 489 | */ 490 | 491 | BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg), 492 | BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg), 493 | /* 494 | * binder_transaction_data_sg: the sent command. 495 | */ 496 | }; 497 | 498 | #endif /* _UAPI_LINUX_BINDER_H */ 499 | 500 | -------------------------------------------------------------------------------- /binder/binder_alloc.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0-only 2 | /* binder_alloc.c 3 | * 4 | * Android IPC Subsystem 5 | * 6 | * Copyright (C) 2007-2017 Google, Inc. 7 | */ 8 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include "binder_alloc.h" 28 | #include "binder_trace.h" 29 | 30 | struct list_lru binder_alloc_lru; 31 | 32 | static DEFINE_MUTEX(binder_alloc_mmap_lock); 33 | 34 | enum { 35 | BINDER_DEBUG_USER_ERROR = 1U << 0, 36 | BINDER_DEBUG_OPEN_CLOSE = 1U << 1, 37 | BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, 38 | BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, 39 | }; 40 | static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR; 41 | 42 | module_param_named(debug_mask, binder_alloc_debug_mask, 43 | uint, 0644); 44 | 45 | #define binder_alloc_debug(mask, x...) \ 46 | do { \ 47 | if (binder_alloc_debug_mask & mask) \ 48 | pr_info_ratelimited(x); \ 49 | } while (0) 50 | 51 | static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) 52 | { 53 | return list_entry(buffer->entry.next, struct binder_buffer, entry); 54 | } 55 | 56 | static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) 57 | { 58 | return list_entry(buffer->entry.prev, struct binder_buffer, entry); 59 | } 60 | 61 | static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, 62 | struct binder_buffer *buffer) 63 | { 64 | if (list_is_last(&buffer->entry, &alloc->buffers)) 65 | return alloc->buffer + alloc->buffer_size - buffer->user_data; 66 | return binder_buffer_next(buffer)->user_data - buffer->user_data; 67 | } 68 | 69 | static void binder_insert_free_buffer(struct binder_alloc *alloc, 70 | struct binder_buffer *new_buffer) 71 | { 72 | struct rb_node **p = &alloc->free_buffers.rb_node; 73 | struct rb_node *parent = NULL; 74 | struct binder_buffer *buffer; 75 | size_t buffer_size; 76 | size_t new_buffer_size; 77 | 78 | BUG_ON(!new_buffer->free); 79 | 80 | new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); 81 | 82 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 83 | "%d: add free buffer, size %zd, at %pK\n", 84 | alloc->pid, new_buffer_size, new_buffer); 85 | 86 | while (*p) { 87 | parent = *p; 88 | buffer = rb_entry(parent, struct binder_buffer, rb_node); 89 | BUG_ON(!buffer->free); 90 | 91 | buffer_size = binder_alloc_buffer_size(alloc, buffer); 92 | 93 | if (new_buffer_size < buffer_size) 94 | p = &parent->rb_left; 95 | else 96 | p = &parent->rb_right; 97 | } 98 | rb_link_node(&new_buffer->rb_node, parent, p); 99 | rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); 100 | } 101 | 102 | static void binder_insert_allocated_buffer_locked( 103 | struct binder_alloc *alloc, struct binder_buffer *new_buffer) 104 | { 105 | struct rb_node **p = &alloc->allocated_buffers.rb_node; 106 | struct rb_node *parent = NULL; 107 | struct binder_buffer *buffer; 108 | 109 | BUG_ON(new_buffer->free); 110 | 111 | while (*p) { 112 | parent = *p; 113 | buffer = rb_entry(parent, struct binder_buffer, rb_node); 114 | BUG_ON(buffer->free); 115 | 116 | if (new_buffer->user_data < buffer->user_data) 117 | p = &parent->rb_left; 118 | else if (new_buffer->user_data > buffer->user_data) 119 | p = &parent->rb_right; 120 | else 121 | BUG(); 122 | } 123 | rb_link_node(&new_buffer->rb_node, parent, p); 124 | rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); 125 | } 126 | 127 | static struct binder_buffer *binder_alloc_prepare_to_free_locked( 128 | struct binder_alloc *alloc, 129 | uintptr_t user_ptr) 130 | { 131 | struct rb_node *n = alloc->allocated_buffers.rb_node; 132 | struct binder_buffer *buffer; 133 | void __user *uptr; 134 | 135 | uptr = (void __user *)user_ptr; 136 | 137 | while (n) { 138 | buffer = rb_entry(n, struct binder_buffer, rb_node); 139 | BUG_ON(buffer->free); 140 | 141 | if (uptr < buffer->user_data) 142 | n = n->rb_left; 143 | else if (uptr > buffer->user_data) 144 | n = n->rb_right; 145 | else { 146 | /* 147 | * Guard against user threads attempting to 148 | * free the buffer when in use by kernel or 149 | * after it's already been freed. 150 | */ 151 | if (!buffer->allow_user_free) 152 | return ERR_PTR(-EPERM); 153 | buffer->allow_user_free = 0; 154 | return buffer; 155 | } 156 | } 157 | return NULL; 158 | } 159 | 160 | /** 161 | * binder_alloc_prepare_to_free() - get buffer given user ptr 162 | * @alloc: binder_alloc for this proc 163 | * @user_ptr: User pointer to buffer data 164 | * 165 | * Validate userspace pointer to buffer data and return buffer corresponding to 166 | * that user pointer. Search the rb tree for buffer that matches user data 167 | * pointer. 168 | * 169 | * Return: Pointer to buffer or NULL 170 | */ 171 | struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, 172 | uintptr_t user_ptr) 173 | { 174 | struct binder_buffer *buffer; 175 | 176 | mutex_lock(&alloc->mutex); 177 | buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); 178 | mutex_unlock(&alloc->mutex); 179 | return buffer; 180 | } 181 | 182 | static int binder_update_page_range(struct binder_alloc *alloc, int allocate, 183 | void __user *start, void __user *end) 184 | { 185 | void __user *page_addr; 186 | unsigned long user_page_addr; 187 | struct binder_lru_page *page; 188 | struct vm_area_struct *vma = NULL; 189 | struct mm_struct *mm = NULL; 190 | bool need_mm = false; 191 | 192 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 193 | "%d: %s pages %pK-%pK\n", alloc->pid, 194 | allocate ? "allocate" : "free", start, end); 195 | 196 | if (end <= start) 197 | return 0; 198 | 199 | trace_binder_update_page_range(alloc, allocate, start, end); 200 | 201 | if (allocate == 0) 202 | goto free_range; 203 | 204 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 205 | page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; 206 | if (!page->page_ptr) { 207 | need_mm = true; 208 | break; 209 | } 210 | } 211 | 212 | if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) 213 | mm = alloc->vma_vm_mm; 214 | 215 | if (mm) { 216 | mmap_read_lock(mm); 217 | vma = alloc->vma; 218 | } 219 | 220 | if (!vma && need_mm) { 221 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 222 | "%d: binder_alloc_buf failed to map pages in userspace, no vma\n", 223 | alloc->pid); 224 | goto err_no_vma; 225 | } 226 | 227 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 228 | int ret; 229 | bool on_lru; 230 | size_t index; 231 | 232 | index = (page_addr - alloc->buffer) / PAGE_SIZE; 233 | page = &alloc->pages[index]; 234 | 235 | if (page->page_ptr) { 236 | trace_binder_alloc_lru_start(alloc, index); 237 | 238 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,8,0)) 239 | on_lru = list_lru_del_obj(&binder_alloc_lru, &page->lru); 240 | #else 241 | on_lru = list_lru_del(&binder_alloc_lru, &page->lru); 242 | #endif 243 | WARN_ON(!on_lru); 244 | 245 | trace_binder_alloc_lru_end(alloc, index); 246 | continue; 247 | } 248 | 249 | if (WARN_ON(!vma)) 250 | goto err_page_ptr_cleared; 251 | 252 | trace_binder_alloc_page_start(alloc, index); 253 | page->page_ptr = alloc_page(GFP_KERNEL | 254 | __GFP_HIGHMEM | 255 | __GFP_ZERO); 256 | if (!page->page_ptr) { 257 | pr_err("%d: binder_alloc_buf failed for page at %pK\n", 258 | alloc->pid, page_addr); 259 | goto err_alloc_page_failed; 260 | } 261 | page->alloc = alloc; 262 | INIT_LIST_HEAD(&page->lru); 263 | 264 | user_page_addr = (uintptr_t)page_addr; 265 | ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); 266 | if (ret) { 267 | pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", 268 | alloc->pid, user_page_addr); 269 | goto err_vm_insert_page_failed; 270 | } 271 | 272 | if (index + 1 > alloc->pages_high) 273 | alloc->pages_high = index + 1; 274 | 275 | trace_binder_alloc_page_end(alloc, index); 276 | } 277 | if (mm) { 278 | mmap_read_unlock(mm); 279 | mmput(mm); 280 | } 281 | return 0; 282 | 283 | free_range: 284 | for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) { 285 | bool ret; 286 | size_t index; 287 | 288 | index = (page_addr - alloc->buffer) / PAGE_SIZE; 289 | page = &alloc->pages[index]; 290 | 291 | trace_binder_free_lru_start(alloc, index); 292 | 293 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,8,0)) 294 | ret = list_lru_add_obj(&binder_alloc_lru, &page->lru); 295 | #else 296 | ret = list_lru_add(&binder_alloc_lru, &page->lru); 297 | #endif 298 | WARN_ON(!ret); 299 | 300 | trace_binder_free_lru_end(alloc, index); 301 | if (page_addr == start) 302 | break; 303 | continue; 304 | 305 | err_vm_insert_page_failed: 306 | __free_page(page->page_ptr); 307 | page->page_ptr = NULL; 308 | err_alloc_page_failed: 309 | err_page_ptr_cleared: 310 | if (page_addr == start) 311 | break; 312 | } 313 | err_no_vma: 314 | if (mm) { 315 | mmap_read_unlock(mm); 316 | mmput(mm); 317 | } 318 | return vma ? -ENOMEM : -ESRCH; 319 | } 320 | 321 | 322 | static inline void binder_alloc_set_vma(struct binder_alloc *alloc, 323 | struct vm_area_struct *vma) 324 | { 325 | if (vma) 326 | alloc->vma_vm_mm = vma->vm_mm; 327 | /* 328 | * If we see alloc->vma is not NULL, buffer data structures set up 329 | * completely. Look at smp_rmb side binder_alloc_get_vma. 330 | * We also want to guarantee new alloc->vma_vm_mm is always visible 331 | * if alloc->vma is set. 332 | */ 333 | smp_wmb(); 334 | alloc->vma = vma; 335 | } 336 | 337 | static inline struct vm_area_struct *binder_alloc_get_vma( 338 | struct binder_alloc *alloc) 339 | { 340 | struct vm_area_struct *vma = NULL; 341 | 342 | if (alloc->vma) { 343 | /* Look at description in binder_alloc_set_vma */ 344 | smp_rmb(); 345 | vma = alloc->vma; 346 | } 347 | return vma; 348 | } 349 | 350 | static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid) 351 | { 352 | /* 353 | * Find the amount and size of buffers allocated by the current caller; 354 | * The idea is that once we cross the threshold, whoever is responsible 355 | * for the low async space is likely to try to send another async txn, 356 | * and at some point we'll catch them in the act. This is more efficient 357 | * than keeping a map per pid. 358 | */ 359 | struct rb_node *n; 360 | struct binder_buffer *buffer; 361 | size_t total_alloc_size = 0; 362 | size_t num_buffers = 0; 363 | 364 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; 365 | n = rb_next(n)) { 366 | buffer = rb_entry(n, struct binder_buffer, rb_node); 367 | if (buffer->pid != pid) 368 | continue; 369 | if (!buffer->async_transaction) 370 | continue; 371 | total_alloc_size += binder_alloc_buffer_size(alloc, buffer) 372 | + sizeof(struct binder_buffer); 373 | num_buffers++; 374 | } 375 | 376 | /* 377 | * Warn if this pid has more than 50 transactions, or more than 50% of 378 | * async space (which is 25% of total buffer size). 379 | */ 380 | if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { 381 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 382 | "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n", 383 | alloc->pid, pid, num_buffers, total_alloc_size); 384 | } 385 | } 386 | 387 | static struct binder_buffer *binder_alloc_new_buf_locked( 388 | struct binder_alloc *alloc, 389 | size_t data_size, 390 | size_t offsets_size, 391 | size_t extra_buffers_size, 392 | int is_async, 393 | int pid) 394 | { 395 | struct rb_node *n = alloc->free_buffers.rb_node; 396 | struct binder_buffer *buffer; 397 | size_t buffer_size; 398 | struct rb_node *best_fit = NULL; 399 | void __user *has_page_addr; 400 | void __user *end_page_addr; 401 | size_t size, data_offsets_size; 402 | int ret; 403 | 404 | if (!binder_alloc_get_vma(alloc)) { 405 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 406 | "%d: binder_alloc_buf, no vma\n", 407 | alloc->pid); 408 | return ERR_PTR(-ESRCH); 409 | } 410 | 411 | data_offsets_size = ALIGN(data_size, sizeof(void *)) + 412 | ALIGN(offsets_size, sizeof(void *)); 413 | 414 | if (data_offsets_size < data_size || data_offsets_size < offsets_size) { 415 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 416 | "%d: got transaction with invalid size %zd-%zd\n", 417 | alloc->pid, data_size, offsets_size); 418 | return ERR_PTR(-EINVAL); 419 | } 420 | size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); 421 | if (size < data_offsets_size || size < extra_buffers_size) { 422 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 423 | "%d: got transaction with invalid extra_buffers_size %zd\n", 424 | alloc->pid, extra_buffers_size); 425 | return ERR_PTR(-EINVAL); 426 | } 427 | if (is_async && 428 | alloc->free_async_space < size + sizeof(struct binder_buffer)) { 429 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 430 | "%d: binder_alloc_buf size %zd failed, no async space left\n", 431 | alloc->pid, size); 432 | return ERR_PTR(-ENOSPC); 433 | } 434 | 435 | /* Pad 0-size buffers so they get assigned unique addresses */ 436 | size = max(size, sizeof(void *)); 437 | 438 | while (n) { 439 | buffer = rb_entry(n, struct binder_buffer, rb_node); 440 | BUG_ON(!buffer->free); 441 | buffer_size = binder_alloc_buffer_size(alloc, buffer); 442 | 443 | if (size < buffer_size) { 444 | best_fit = n; 445 | n = n->rb_left; 446 | } else if (size > buffer_size) 447 | n = n->rb_right; 448 | else { 449 | best_fit = n; 450 | break; 451 | } 452 | } 453 | if (best_fit == NULL) { 454 | size_t allocated_buffers = 0; 455 | size_t largest_alloc_size = 0; 456 | size_t total_alloc_size = 0; 457 | size_t free_buffers = 0; 458 | size_t largest_free_size = 0; 459 | size_t total_free_size = 0; 460 | 461 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; 462 | n = rb_next(n)) { 463 | buffer = rb_entry(n, struct binder_buffer, rb_node); 464 | buffer_size = binder_alloc_buffer_size(alloc, buffer); 465 | allocated_buffers++; 466 | total_alloc_size += buffer_size; 467 | if (buffer_size > largest_alloc_size) 468 | largest_alloc_size = buffer_size; 469 | } 470 | for (n = rb_first(&alloc->free_buffers); n != NULL; 471 | n = rb_next(n)) { 472 | buffer = rb_entry(n, struct binder_buffer, rb_node); 473 | buffer_size = binder_alloc_buffer_size(alloc, buffer); 474 | free_buffers++; 475 | total_free_size += buffer_size; 476 | if (buffer_size > largest_free_size) 477 | largest_free_size = buffer_size; 478 | } 479 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 480 | "%d: binder_alloc_buf size %zd failed, no address space\n", 481 | alloc->pid, size); 482 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 483 | "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", 484 | total_alloc_size, allocated_buffers, 485 | largest_alloc_size, total_free_size, 486 | free_buffers, largest_free_size); 487 | return ERR_PTR(-ENOSPC); 488 | } 489 | if (n == NULL) { 490 | buffer = rb_entry(best_fit, struct binder_buffer, rb_node); 491 | buffer_size = binder_alloc_buffer_size(alloc, buffer); 492 | } 493 | 494 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 495 | "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", 496 | alloc->pid, size, buffer, buffer_size); 497 | 498 | has_page_addr = (void __user *) 499 | (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK); 500 | WARN_ON(n && buffer_size != size); 501 | end_page_addr = 502 | (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); 503 | if (end_page_addr > has_page_addr) 504 | end_page_addr = has_page_addr; 505 | ret = binder_update_page_range(alloc, 1, (void __user *) 506 | PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); 507 | if (ret) 508 | return ERR_PTR(ret); 509 | 510 | if (buffer_size != size) { 511 | struct binder_buffer *new_buffer; 512 | 513 | new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 514 | if (!new_buffer) { 515 | pr_err("%s: %d failed to alloc new buffer struct\n", 516 | __func__, alloc->pid); 517 | goto err_alloc_buf_struct_failed; 518 | } 519 | new_buffer->user_data = (u8 __user *)buffer->user_data + size; 520 | list_add(&new_buffer->entry, &buffer->entry); 521 | new_buffer->free = 1; 522 | binder_insert_free_buffer(alloc, new_buffer); 523 | } 524 | 525 | rb_erase(best_fit, &alloc->free_buffers); 526 | buffer->free = 0; 527 | buffer->allow_user_free = 0; 528 | binder_insert_allocated_buffer_locked(alloc, buffer); 529 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 530 | "%d: binder_alloc_buf size %zd got %pK\n", 531 | alloc->pid, size, buffer); 532 | buffer->data_size = data_size; 533 | buffer->offsets_size = offsets_size; 534 | buffer->async_transaction = is_async; 535 | buffer->extra_buffers_size = extra_buffers_size; 536 | buffer->pid = pid; 537 | if (is_async) { 538 | alloc->free_async_space -= size + sizeof(struct binder_buffer); 539 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 540 | "%d: binder_alloc_buf size %zd async free %zd\n", 541 | alloc->pid, size, alloc->free_async_space); 542 | if (alloc->free_async_space < alloc->buffer_size / 10) { 543 | /* 544 | * Start detecting spammers once we have less than 20% 545 | * of async space left (which is less than 10% of total 546 | * buffer size). 547 | */ 548 | debug_low_async_space_locked(alloc, pid); 549 | } 550 | } 551 | return buffer; 552 | 553 | err_alloc_buf_struct_failed: 554 | binder_update_page_range(alloc, 0, (void __user *) 555 | PAGE_ALIGN((uintptr_t)buffer->user_data), 556 | end_page_addr); 557 | return ERR_PTR(-ENOMEM); 558 | } 559 | 560 | /** 561 | * binder_alloc_new_buf() - Allocate a new binder buffer 562 | * @alloc: binder_alloc for this proc 563 | * @data_size: size of user data buffer 564 | * @offsets_size: user specified buffer offset 565 | * @extra_buffers_size: size of extra space for meta-data (eg, security context) 566 | * @is_async: buffer for async transaction 567 | * @pid: pid to attribute allocation to (used for debugging) 568 | * 569 | * Allocate a new buffer given the requested sizes. Returns 570 | * the kernel version of the buffer pointer. The size allocated 571 | * is the sum of the three given sizes (each rounded up to 572 | * pointer-sized boundary) 573 | * 574 | * Return: The allocated buffer or %NULL if error 575 | */ 576 | struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, 577 | size_t data_size, 578 | size_t offsets_size, 579 | size_t extra_buffers_size, 580 | int is_async, 581 | int pid) 582 | { 583 | struct binder_buffer *buffer; 584 | 585 | mutex_lock(&alloc->mutex); 586 | buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, 587 | extra_buffers_size, is_async, pid); 588 | mutex_unlock(&alloc->mutex); 589 | return buffer; 590 | } 591 | 592 | static void __user *buffer_start_page(struct binder_buffer *buffer) 593 | { 594 | return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK); 595 | } 596 | 597 | static void __user *prev_buffer_end_page(struct binder_buffer *buffer) 598 | { 599 | return (void __user *) 600 | (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK); 601 | } 602 | 603 | static void binder_delete_free_buffer(struct binder_alloc *alloc, 604 | struct binder_buffer *buffer) 605 | { 606 | struct binder_buffer *prev, *next = NULL; 607 | bool to_free = true; 608 | 609 | BUG_ON(alloc->buffers.next == &buffer->entry); 610 | prev = binder_buffer_prev(buffer); 611 | BUG_ON(!prev->free); 612 | if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { 613 | to_free = false; 614 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 615 | "%d: merge free, buffer %pK share page with %pK\n", 616 | alloc->pid, buffer->user_data, 617 | prev->user_data); 618 | } 619 | 620 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { 621 | next = binder_buffer_next(buffer); 622 | if (buffer_start_page(next) == buffer_start_page(buffer)) { 623 | to_free = false; 624 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 625 | "%d: merge free, buffer %pK share page with %pK\n", 626 | alloc->pid, 627 | buffer->user_data, 628 | next->user_data); 629 | } 630 | } 631 | 632 | if (PAGE_ALIGNED(buffer->user_data)) { 633 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 634 | "%d: merge free, buffer start %pK is page aligned\n", 635 | alloc->pid, buffer->user_data); 636 | to_free = false; 637 | } 638 | 639 | if (to_free) { 640 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 641 | "%d: merge free, buffer %pK do not share page with %pK or %pK\n", 642 | alloc->pid, buffer->user_data, 643 | prev->user_data, 644 | next ? next->user_data : NULL); 645 | binder_update_page_range(alloc, 0, buffer_start_page(buffer), 646 | buffer_start_page(buffer) + PAGE_SIZE); 647 | } 648 | list_del(&buffer->entry); 649 | kfree(buffer); 650 | } 651 | 652 | static void binder_free_buf_locked(struct binder_alloc *alloc, 653 | struct binder_buffer *buffer) 654 | { 655 | size_t size, buffer_size; 656 | 657 | buffer_size = binder_alloc_buffer_size(alloc, buffer); 658 | 659 | size = ALIGN(buffer->data_size, sizeof(void *)) + 660 | ALIGN(buffer->offsets_size, sizeof(void *)) + 661 | ALIGN(buffer->extra_buffers_size, sizeof(void *)); 662 | 663 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 664 | "%d: binder_free_buf %pK size %zd buffer_size %zd\n", 665 | alloc->pid, buffer, size, buffer_size); 666 | 667 | BUG_ON(buffer->free); 668 | BUG_ON(size > buffer_size); 669 | BUG_ON(buffer->transaction != NULL); 670 | BUG_ON(buffer->user_data < alloc->buffer); 671 | BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); 672 | 673 | if (buffer->async_transaction) { 674 | alloc->free_async_space += size + sizeof(struct binder_buffer); 675 | 676 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 677 | "%d: binder_free_buf size %zd async free %zd\n", 678 | alloc->pid, size, alloc->free_async_space); 679 | } 680 | 681 | binder_update_page_range(alloc, 0, 682 | (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data), 683 | (void __user *)(((uintptr_t) 684 | buffer->user_data + buffer_size) & PAGE_MASK)); 685 | 686 | rb_erase(&buffer->rb_node, &alloc->allocated_buffers); 687 | buffer->free = 1; 688 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { 689 | struct binder_buffer *next = binder_buffer_next(buffer); 690 | 691 | if (next->free) { 692 | rb_erase(&next->rb_node, &alloc->free_buffers); 693 | binder_delete_free_buffer(alloc, next); 694 | } 695 | } 696 | if (alloc->buffers.next != &buffer->entry) { 697 | struct binder_buffer *prev = binder_buffer_prev(buffer); 698 | 699 | if (prev->free) { 700 | binder_delete_free_buffer(alloc, buffer); 701 | rb_erase(&prev->rb_node, &alloc->free_buffers); 702 | buffer = prev; 703 | } 704 | } 705 | binder_insert_free_buffer(alloc, buffer); 706 | } 707 | 708 | static void binder_alloc_clear_buf(struct binder_alloc *alloc, 709 | struct binder_buffer *buffer); 710 | /** 711 | * binder_alloc_free_buf() - free a binder buffer 712 | * @alloc: binder_alloc for this proc 713 | * @buffer: kernel pointer to buffer 714 | * 715 | * Free the buffer allocated via binder_alloc_new_buf() 716 | */ 717 | void binder_alloc_free_buf(struct binder_alloc *alloc, 718 | struct binder_buffer *buffer) 719 | { 720 | /* 721 | * We could eliminate the call to binder_alloc_clear_buf() 722 | * from binder_alloc_deferred_release() by moving this to 723 | * binder_alloc_free_buf_locked(). However, that could 724 | * increase contention for the alloc mutex if clear_on_free 725 | * is used frequently for large buffers. The mutex is not 726 | * needed for correctness here. 727 | */ 728 | if (buffer->clear_on_free) { 729 | binder_alloc_clear_buf(alloc, buffer); 730 | buffer->clear_on_free = false; 731 | } 732 | mutex_lock(&alloc->mutex); 733 | binder_free_buf_locked(alloc, buffer); 734 | mutex_unlock(&alloc->mutex); 735 | } 736 | 737 | /** 738 | * binder_alloc_mmap_handler() - map virtual address space for proc 739 | * @alloc: alloc structure for this proc 740 | * @vma: vma passed to mmap() 741 | * 742 | * Called by binder_mmap() to initialize the space specified in 743 | * vma for allocating binder buffers 744 | * 745 | * Return: 746 | * 0 = success 747 | * -EBUSY = address space already mapped 748 | * -ENOMEM = failed to map memory to given address space 749 | */ 750 | int binder_alloc_mmap_handler(struct binder_alloc *alloc, 751 | struct vm_area_struct *vma) 752 | { 753 | int ret; 754 | const char *failure_string; 755 | struct binder_buffer *buffer; 756 | 757 | mutex_lock(&binder_alloc_mmap_lock); 758 | if (alloc->buffer_size) { 759 | ret = -EBUSY; 760 | failure_string = "already mapped"; 761 | goto err_already_mapped; 762 | } 763 | alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, 764 | SZ_4M); 765 | mutex_unlock(&binder_alloc_mmap_lock); 766 | 767 | alloc->buffer = (void __user *)vma->vm_start; 768 | 769 | alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, 770 | sizeof(alloc->pages[0]), 771 | GFP_KERNEL); 772 | if (alloc->pages == NULL) { 773 | ret = -ENOMEM; 774 | failure_string = "alloc page array"; 775 | goto err_alloc_pages_failed; 776 | } 777 | 778 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 779 | if (!buffer) { 780 | ret = -ENOMEM; 781 | failure_string = "alloc buffer struct"; 782 | goto err_alloc_buf_struct_failed; 783 | } 784 | 785 | buffer->user_data = alloc->buffer; 786 | list_add(&buffer->entry, &alloc->buffers); 787 | buffer->free = 1; 788 | binder_insert_free_buffer(alloc, buffer); 789 | alloc->free_async_space = alloc->buffer_size / 2; 790 | binder_alloc_set_vma(alloc, vma); 791 | mmgrab(alloc->vma_vm_mm); 792 | 793 | return 0; 794 | 795 | err_alloc_buf_struct_failed: 796 | kfree(alloc->pages); 797 | alloc->pages = NULL; 798 | err_alloc_pages_failed: 799 | alloc->buffer = NULL; 800 | mutex_lock(&binder_alloc_mmap_lock); 801 | alloc->buffer_size = 0; 802 | err_already_mapped: 803 | mutex_unlock(&binder_alloc_mmap_lock); 804 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 805 | "%s: %d %lx-%lx %s failed %d\n", __func__, 806 | alloc->pid, vma->vm_start, vma->vm_end, 807 | failure_string, ret); 808 | return ret; 809 | } 810 | 811 | 812 | void binder_alloc_deferred_release(struct binder_alloc *alloc) 813 | { 814 | struct rb_node *n; 815 | int buffers, page_count; 816 | struct binder_buffer *buffer; 817 | 818 | buffers = 0; 819 | mutex_lock(&alloc->mutex); 820 | BUG_ON(alloc->vma); 821 | 822 | while ((n = rb_first(&alloc->allocated_buffers))) { 823 | buffer = rb_entry(n, struct binder_buffer, rb_node); 824 | 825 | /* Transaction should already have been freed */ 826 | BUG_ON(buffer->transaction); 827 | 828 | if (buffer->clear_on_free) { 829 | binder_alloc_clear_buf(alloc, buffer); 830 | buffer->clear_on_free = false; 831 | } 832 | binder_free_buf_locked(alloc, buffer); 833 | buffers++; 834 | } 835 | 836 | while (!list_empty(&alloc->buffers)) { 837 | buffer = list_first_entry(&alloc->buffers, 838 | struct binder_buffer, entry); 839 | WARN_ON(!buffer->free); 840 | 841 | list_del(&buffer->entry); 842 | WARN_ON_ONCE(!list_empty(&alloc->buffers)); 843 | kfree(buffer); 844 | } 845 | 846 | page_count = 0; 847 | if (alloc->pages) { 848 | int i; 849 | 850 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { 851 | void __user *page_addr; 852 | bool on_lru; 853 | 854 | if (!alloc->pages[i].page_ptr) 855 | continue; 856 | 857 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,8,0)) 858 | on_lru = list_lru_del_obj(&binder_alloc_lru, 859 | &alloc->pages[i].lru); 860 | #else 861 | on_lru = list_lru_del(&binder_alloc_lru, 862 | &alloc->pages[i].lru); 863 | #endif 864 | page_addr = alloc->buffer + i * PAGE_SIZE; 865 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 866 | "%s: %d: page %d at %pK %s\n", 867 | __func__, alloc->pid, i, page_addr, 868 | on_lru ? "on lru" : "active"); 869 | __free_page(alloc->pages[i].page_ptr); 870 | page_count++; 871 | } 872 | kfree(alloc->pages); 873 | } 874 | mutex_unlock(&alloc->mutex); 875 | if (alloc->vma_vm_mm) 876 | mmdrop(alloc->vma_vm_mm); 877 | 878 | binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, 879 | "%s: %d buffers %d, pages %d\n", 880 | __func__, alloc->pid, buffers, page_count); 881 | } 882 | 883 | static void print_binder_buffer(struct seq_file *m, const char *prefix, 884 | struct binder_buffer *buffer) 885 | { 886 | seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", 887 | prefix, buffer->debug_id, buffer->user_data, 888 | buffer->data_size, buffer->offsets_size, 889 | buffer->extra_buffers_size, 890 | buffer->transaction ? "active" : "delivered"); 891 | } 892 | 893 | /** 894 | * binder_alloc_print_allocated() - print buffer info 895 | * @m: seq_file for output via seq_printf() 896 | * @alloc: binder_alloc for this proc 897 | * 898 | * Prints information about every buffer associated with 899 | * the binder_alloc state to the given seq_file 900 | */ 901 | void binder_alloc_print_allocated(struct seq_file *m, 902 | struct binder_alloc *alloc) 903 | { 904 | struct rb_node *n; 905 | 906 | mutex_lock(&alloc->mutex); 907 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) 908 | print_binder_buffer(m, " buffer", 909 | rb_entry(n, struct binder_buffer, rb_node)); 910 | mutex_unlock(&alloc->mutex); 911 | } 912 | 913 | /** 914 | * binder_alloc_print_pages() - print page usage 915 | * @m: seq_file for output via seq_printf() 916 | * @alloc: binder_alloc for this proc 917 | */ 918 | void binder_alloc_print_pages(struct seq_file *m, 919 | struct binder_alloc *alloc) 920 | { 921 | struct binder_lru_page *page; 922 | int i; 923 | int active = 0; 924 | int lru = 0; 925 | int free = 0; 926 | 927 | mutex_lock(&alloc->mutex); 928 | /* 929 | * Make sure the binder_alloc is fully initialized, otherwise we might 930 | * read inconsistent state. 931 | */ 932 | if (binder_alloc_get_vma(alloc) != NULL) { 933 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { 934 | page = &alloc->pages[i]; 935 | if (!page->page_ptr) 936 | free++; 937 | else if (list_empty(&page->lru)) 938 | active++; 939 | else 940 | lru++; 941 | } 942 | } 943 | mutex_unlock(&alloc->mutex); 944 | seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); 945 | seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); 946 | } 947 | 948 | /** 949 | * binder_alloc_get_allocated_count() - return count of buffers 950 | * @alloc: binder_alloc for this proc 951 | * 952 | * Return: count of allocated buffers 953 | */ 954 | int binder_alloc_get_allocated_count(struct binder_alloc *alloc) 955 | { 956 | struct rb_node *n; 957 | int count = 0; 958 | 959 | mutex_lock(&alloc->mutex); 960 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) 961 | count++; 962 | mutex_unlock(&alloc->mutex); 963 | return count; 964 | } 965 | 966 | 967 | /** 968 | * binder_alloc_vma_close() - invalidate address space 969 | * @alloc: binder_alloc for this proc 970 | * 971 | * Called from binder_vma_close() when releasing address space. 972 | * Clears alloc->vma to prevent new incoming transactions from 973 | * allocating more buffers. 974 | */ 975 | void binder_alloc_vma_close(struct binder_alloc *alloc) 976 | { 977 | binder_alloc_set_vma(alloc, NULL); 978 | } 979 | 980 | /** 981 | * binder_alloc_free_page() - shrinker callback to free pages 982 | * @item: item to free 983 | * @lock: lock protecting the item 984 | * @cb_arg: callback argument 985 | * 986 | * Called from list_lru_walk() in binder_shrink_scan() to free 987 | * up pages when the system is under memory pressure. 988 | */ 989 | enum lru_status binder_alloc_free_page(struct list_head *item, 990 | struct list_lru_one *lru, 991 | spinlock_t *lock, 992 | void *cb_arg) 993 | __must_hold(lock) 994 | { 995 | struct mm_struct *mm = NULL; 996 | struct binder_lru_page *page = container_of(item, 997 | struct binder_lru_page, 998 | lru); 999 | struct binder_alloc *alloc; 1000 | uintptr_t page_addr; 1001 | size_t index; 1002 | struct vm_area_struct *vma; 1003 | 1004 | alloc = page->alloc; 1005 | if (!mutex_trylock(&alloc->mutex)) 1006 | goto err_get_alloc_mutex_failed; 1007 | 1008 | if (!page->page_ptr) 1009 | goto err_page_already_freed; 1010 | 1011 | index = page - alloc->pages; 1012 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 1013 | 1014 | mm = alloc->vma_vm_mm; 1015 | if (!mmget_not_zero(mm)) 1016 | goto err_mmget; 1017 | if (!mmap_read_trylock(mm)) 1018 | goto err_mmap_read_lock_failed; 1019 | vma = binder_alloc_get_vma(alloc); 1020 | 1021 | list_lru_isolate(lru, item); 1022 | spin_unlock(lock); 1023 | 1024 | if (vma) { 1025 | trace_binder_unmap_user_start(alloc, index); 1026 | 1027 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) 1028 | zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL); 1029 | #else 1030 | zap_page_range(vma, page_addr, PAGE_SIZE); 1031 | #endif 1032 | 1033 | trace_binder_unmap_user_end(alloc, index); 1034 | } 1035 | mmap_read_unlock(mm); 1036 | mmput_async(mm); 1037 | 1038 | trace_binder_unmap_kernel_start(alloc, index); 1039 | 1040 | __free_page(page->page_ptr); 1041 | page->page_ptr = NULL; 1042 | 1043 | trace_binder_unmap_kernel_end(alloc, index); 1044 | 1045 | spin_lock(lock); 1046 | mutex_unlock(&alloc->mutex); 1047 | return LRU_REMOVED_RETRY; 1048 | 1049 | err_mmap_read_lock_failed: 1050 | mmput_async(mm); 1051 | err_mmget: 1052 | err_page_already_freed: 1053 | mutex_unlock(&alloc->mutex); 1054 | err_get_alloc_mutex_failed: 1055 | return LRU_SKIP; 1056 | } 1057 | 1058 | static unsigned long 1059 | binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 1060 | { 1061 | unsigned long ret = list_lru_count(&binder_alloc_lru); 1062 | return ret; 1063 | } 1064 | 1065 | static unsigned long 1066 | binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 1067 | { 1068 | unsigned long ret; 1069 | 1070 | ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, 1071 | NULL, sc->nr_to_scan); 1072 | return ret; 1073 | } 1074 | 1075 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) 1076 | static struct shrinker *binder_shrinker; 1077 | #else 1078 | static struct shrinker binder_shrinker = { 1079 | .count_objects = binder_shrink_count, 1080 | .scan_objects = binder_shrink_scan, 1081 | .seeks = DEFAULT_SEEKS, 1082 | }; 1083 | #endif 1084 | 1085 | /** 1086 | * binder_alloc_init() - called by binder_open() for per-proc initialization 1087 | * @alloc: binder_alloc for this proc 1088 | * 1089 | * Called from binder_open() to initialize binder_alloc fields for 1090 | * new binder proc 1091 | */ 1092 | void binder_alloc_init(struct binder_alloc *alloc) 1093 | { 1094 | alloc->pid = current->group_leader->pid; 1095 | mutex_init(&alloc->mutex); 1096 | INIT_LIST_HEAD(&alloc->buffers); 1097 | } 1098 | 1099 | int binder_alloc_shrinker_init(void) 1100 | { 1101 | int ret = list_lru_init(&binder_alloc_lru); 1102 | 1103 | if (ret == 0) { 1104 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) 1105 | binder_shrinker = shrinker_alloc(0, "android-binder"); 1106 | if (binder_shrinker) { 1107 | binder_shrinker->count_objects = binder_shrink_count; 1108 | binder_shrinker->scan_objects = binder_shrink_scan; 1109 | shrinker_register(binder_shrinker); 1110 | } else { 1111 | ret = -ENOMEM; 1112 | } 1113 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(6,0,0)) 1114 | ret = register_shrinker(&binder_shrinker, "android-binder"); 1115 | #else 1116 | ret = register_shrinker(&binder_shrinker); 1117 | #endif 1118 | if (ret) 1119 | list_lru_destroy(&binder_alloc_lru); 1120 | } 1121 | return ret; 1122 | } 1123 | 1124 | void binder_alloc_shrinker_exit(void) 1125 | { 1126 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) 1127 | shrinker_free(binder_shrinker); 1128 | #else 1129 | unregister_shrinker(&binder_shrinker); 1130 | #endif 1131 | list_lru_destroy(&binder_alloc_lru); 1132 | } 1133 | 1134 | /** 1135 | * check_buffer() - verify that buffer/offset is safe to access 1136 | * @alloc: binder_alloc for this proc 1137 | * @buffer: binder buffer to be accessed 1138 | * @offset: offset into @buffer data 1139 | * @bytes: bytes to access from offset 1140 | * 1141 | * Check that the @offset/@bytes are within the size of the given 1142 | * @buffer and that the buffer is currently active and not freeable. 1143 | * Offsets must also be multiples of sizeof(u32). The kernel is 1144 | * allowed to touch the buffer in two cases: 1145 | * 1146 | * 1) when the buffer is being created: 1147 | * (buffer->free == 0 && buffer->allow_user_free == 0) 1148 | * 2) when the buffer is being torn down: 1149 | * (buffer->free == 0 && buffer->transaction == NULL). 1150 | * 1151 | * Return: true if the buffer is safe to access 1152 | */ 1153 | static inline bool check_buffer(struct binder_alloc *alloc, 1154 | struct binder_buffer *buffer, 1155 | binder_size_t offset, size_t bytes) 1156 | { 1157 | size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); 1158 | 1159 | return buffer_size >= bytes && 1160 | offset <= buffer_size - bytes && 1161 | IS_ALIGNED(offset, sizeof(u32)) && 1162 | !buffer->free && 1163 | (!buffer->allow_user_free || !buffer->transaction); 1164 | } 1165 | 1166 | /** 1167 | * binder_alloc_get_page() - get kernel pointer for given buffer offset 1168 | * @alloc: binder_alloc for this proc 1169 | * @buffer: binder buffer to be accessed 1170 | * @buffer_offset: offset into @buffer data 1171 | * @pgoffp: address to copy final page offset to 1172 | * 1173 | * Lookup the struct page corresponding to the address 1174 | * at @buffer_offset into @buffer->user_data. If @pgoffp is not 1175 | * NULL, the byte-offset into the page is written there. 1176 | * 1177 | * The caller is responsible to ensure that the offset points 1178 | * to a valid address within the @buffer and that @buffer is 1179 | * not freeable by the user. Since it can't be freed, we are 1180 | * guaranteed that the corresponding elements of @alloc->pages[] 1181 | * cannot change. 1182 | * 1183 | * Return: struct page 1184 | */ 1185 | static struct page *binder_alloc_get_page(struct binder_alloc *alloc, 1186 | struct binder_buffer *buffer, 1187 | binder_size_t buffer_offset, 1188 | pgoff_t *pgoffp) 1189 | { 1190 | binder_size_t buffer_space_offset = buffer_offset + 1191 | (buffer->user_data - alloc->buffer); 1192 | pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; 1193 | size_t index = buffer_space_offset >> PAGE_SHIFT; 1194 | struct binder_lru_page *lru_page; 1195 | 1196 | lru_page = &alloc->pages[index]; 1197 | *pgoffp = pgoff; 1198 | return lru_page->page_ptr; 1199 | } 1200 | 1201 | /** 1202 | * binder_alloc_clear_buf() - zero out buffer 1203 | * @alloc: binder_alloc for this proc 1204 | * @buffer: binder buffer to be cleared 1205 | * 1206 | * memset the given buffer to 0 1207 | */ 1208 | static void binder_alloc_clear_buf(struct binder_alloc *alloc, 1209 | struct binder_buffer *buffer) 1210 | { 1211 | size_t bytes = binder_alloc_buffer_size(alloc, buffer); 1212 | binder_size_t buffer_offset = 0; 1213 | 1214 | while (bytes) { 1215 | unsigned long size; 1216 | struct page *page; 1217 | pgoff_t pgoff; 1218 | void *kptr; 1219 | 1220 | page = binder_alloc_get_page(alloc, buffer, 1221 | buffer_offset, &pgoff); 1222 | size = min_t(size_t, bytes, PAGE_SIZE - pgoff); 1223 | kptr = kmap(page) + pgoff; 1224 | memset(kptr, 0, size); 1225 | kunmap(page); 1226 | bytes -= size; 1227 | buffer_offset += size; 1228 | } 1229 | } 1230 | 1231 | /** 1232 | * binder_alloc_copy_user_to_buffer() - copy src user to tgt user 1233 | * @alloc: binder_alloc for this proc 1234 | * @buffer: binder buffer to be accessed 1235 | * @buffer_offset: offset into @buffer data 1236 | * @from: userspace pointer to source buffer 1237 | * @bytes: bytes to copy 1238 | * 1239 | * Copy bytes from source userspace to target buffer. 1240 | * 1241 | * Return: bytes remaining to be copied 1242 | */ 1243 | unsigned long 1244 | binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, 1245 | struct binder_buffer *buffer, 1246 | binder_size_t buffer_offset, 1247 | const void __user *from, 1248 | size_t bytes) 1249 | { 1250 | if (!check_buffer(alloc, buffer, buffer_offset, bytes)) 1251 | return bytes; 1252 | 1253 | while (bytes) { 1254 | unsigned long size; 1255 | unsigned long ret; 1256 | struct page *page; 1257 | pgoff_t pgoff; 1258 | void *kptr; 1259 | 1260 | page = binder_alloc_get_page(alloc, buffer, 1261 | buffer_offset, &pgoff); 1262 | size = min_t(size_t, bytes, PAGE_SIZE - pgoff); 1263 | kptr = kmap(page) + pgoff; 1264 | ret = copy_from_user(kptr, from, size); 1265 | kunmap(page); 1266 | if (ret) 1267 | return bytes - size + ret; 1268 | bytes -= size; 1269 | from += size; 1270 | buffer_offset += size; 1271 | } 1272 | return 0; 1273 | } 1274 | 1275 | static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc, 1276 | bool to_buffer, 1277 | struct binder_buffer *buffer, 1278 | binder_size_t buffer_offset, 1279 | void *ptr, 1280 | size_t bytes) 1281 | { 1282 | /* All copies must be 32-bit aligned and 32-bit size */ 1283 | if (!check_buffer(alloc, buffer, buffer_offset, bytes)) 1284 | return -EINVAL; 1285 | 1286 | while (bytes) { 1287 | unsigned long size; 1288 | struct page *page; 1289 | pgoff_t pgoff; 1290 | void *tmpptr; 1291 | void *base_ptr; 1292 | 1293 | page = binder_alloc_get_page(alloc, buffer, 1294 | buffer_offset, &pgoff); 1295 | size = min_t(size_t, bytes, PAGE_SIZE - pgoff); 1296 | base_ptr = kmap_atomic(page); 1297 | tmpptr = base_ptr + pgoff; 1298 | if (to_buffer) 1299 | memcpy(tmpptr, ptr, size); 1300 | else 1301 | memcpy(ptr, tmpptr, size); 1302 | /* 1303 | * kunmap_atomic() takes care of flushing the cache 1304 | * if this device has VIVT cache arch 1305 | */ 1306 | kunmap_atomic(base_ptr); 1307 | bytes -= size; 1308 | pgoff = 0; 1309 | ptr = ptr + size; 1310 | buffer_offset += size; 1311 | } 1312 | return 0; 1313 | } 1314 | 1315 | int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, 1316 | struct binder_buffer *buffer, 1317 | binder_size_t buffer_offset, 1318 | void *src, 1319 | size_t bytes) 1320 | { 1321 | return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, 1322 | src, bytes); 1323 | } 1324 | 1325 | int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, 1326 | void *dest, 1327 | struct binder_buffer *buffer, 1328 | binder_size_t buffer_offset, 1329 | size_t bytes) 1330 | { 1331 | return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, 1332 | dest, bytes); 1333 | } 1334 | 1335 | -------------------------------------------------------------------------------- /binder/binder_alloc.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* 3 | * Copyright (C) 2017 Google, Inc. 4 | */ 5 | 6 | #ifndef _LINUX_BINDER_ALLOC_H 7 | #define _LINUX_BINDER_ALLOC_H 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | extern struct list_lru binder_alloc_lru; 19 | struct binder_transaction; 20 | 21 | /** 22 | * struct binder_buffer - buffer used for binder transactions 23 | * @entry: entry alloc->buffers 24 | * @rb_node: node for allocated_buffers/free_buffers rb trees 25 | * @free: %true if buffer is free 26 | * @clear_on_free: %true if buffer must be zeroed after use 27 | * @allow_user_free: %true if user is allowed to free buffer 28 | * @async_transaction: %true if buffer is in use for an async txn 29 | * @debug_id: unique ID for debugging 30 | * @transaction: pointer to associated struct binder_transaction 31 | * @target_node: struct binder_node associated with this buffer 32 | * @data_size: size of @transaction data 33 | * @offsets_size: size of array of offsets 34 | * @extra_buffers_size: size of space for other objects (like sg lists) 35 | * @user_data: user pointer to base of buffer space 36 | * @pid: pid to attribute the buffer to (caller) 37 | * 38 | * Bookkeeping structure for binder transaction buffers 39 | */ 40 | struct binder_buffer { 41 | struct list_head entry; /* free and allocated entries by address */ 42 | struct rb_node rb_node; /* free entry by size or allocated entry */ 43 | /* by address */ 44 | unsigned free:1; 45 | unsigned clear_on_free:1; 46 | unsigned allow_user_free:1; 47 | unsigned async_transaction:1; 48 | unsigned debug_id:28; 49 | 50 | struct binder_transaction *transaction; 51 | 52 | struct binder_node *target_node; 53 | size_t data_size; 54 | size_t offsets_size; 55 | size_t extra_buffers_size; 56 | void __user *user_data; 57 | int pid; 58 | }; 59 | 60 | /** 61 | * struct binder_lru_page - page object used for binder shrinker 62 | * @page_ptr: pointer to physical page in mmap'd space 63 | * @lru: entry in binder_alloc_lru 64 | * @alloc: binder_alloc for a proc 65 | */ 66 | struct binder_lru_page { 67 | struct list_head lru; 68 | struct page *page_ptr; 69 | struct binder_alloc *alloc; 70 | }; 71 | 72 | /** 73 | * struct binder_alloc - per-binder proc state for binder allocator 74 | * @vma: vm_area_struct passed to mmap_handler 75 | * (invarient after mmap) 76 | * @tsk: tid for task that called init for this proc 77 | * (invariant after init) 78 | * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap) 79 | * @buffer: base of per-proc address space mapped via mmap 80 | * @buffers: list of all buffers for this proc 81 | * @free_buffers: rb tree of buffers available for allocation 82 | * sorted by size 83 | * @allocated_buffers: rb tree of allocated buffers sorted by address 84 | * @free_async_space: VA space available for async buffers. This is 85 | * initialized at mmap time to 1/2 the full VA space 86 | * @pages: array of binder_lru_page 87 | * @buffer_size: size of address space specified via mmap 88 | * @pid: pid for associated binder_proc (invariant after init) 89 | * @pages_high: high watermark of offset in @pages 90 | * 91 | * Bookkeeping structure for per-proc address space management for binder 92 | * buffers. It is normally initialized during binder_init() and binder_mmap() 93 | * calls. The address space is used for both user-visible buffers and for 94 | * struct binder_buffer objects used to track the user buffers 95 | */ 96 | struct binder_alloc { 97 | struct mutex mutex; 98 | struct vm_area_struct *vma; 99 | struct mm_struct *vma_vm_mm; 100 | void __user *buffer; 101 | struct list_head buffers; 102 | struct rb_root free_buffers; 103 | struct rb_root allocated_buffers; 104 | size_t free_async_space; 105 | struct binder_lru_page *pages; 106 | size_t buffer_size; 107 | uint32_t buffer_free; 108 | int pid; 109 | size_t pages_high; 110 | }; 111 | 112 | #ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST 113 | void binder_selftest_alloc(struct binder_alloc *alloc); 114 | #else 115 | static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} 116 | #endif 117 | enum lru_status binder_alloc_free_page(struct list_head *item, 118 | struct list_lru_one *lru, 119 | spinlock_t *lock, void *cb_arg); 120 | extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, 121 | size_t data_size, 122 | size_t offsets_size, 123 | size_t extra_buffers_size, 124 | int is_async, 125 | int pid); 126 | extern void binder_alloc_init(struct binder_alloc *alloc); 127 | extern int binder_alloc_shrinker_init(void); 128 | extern void binder_alloc_shrinker_exit(void); 129 | extern void binder_alloc_vma_close(struct binder_alloc *alloc); 130 | extern struct binder_buffer * 131 | binder_alloc_prepare_to_free(struct binder_alloc *alloc, 132 | uintptr_t user_ptr); 133 | extern void binder_alloc_free_buf(struct binder_alloc *alloc, 134 | struct binder_buffer *buffer); 135 | extern int binder_alloc_mmap_handler(struct binder_alloc *alloc, 136 | struct vm_area_struct *vma); 137 | extern void binder_alloc_deferred_release(struct binder_alloc *alloc); 138 | extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc); 139 | extern void binder_alloc_print_allocated(struct seq_file *m, 140 | struct binder_alloc *alloc); 141 | void binder_alloc_print_pages(struct seq_file *m, 142 | struct binder_alloc *alloc); 143 | 144 | /** 145 | * binder_alloc_get_free_async_space() - get free space available for async 146 | * @alloc: binder_alloc for this proc 147 | * 148 | * Return: the bytes remaining in the address-space for async transactions 149 | */ 150 | static inline size_t 151 | binder_alloc_get_free_async_space(struct binder_alloc *alloc) 152 | { 153 | size_t free_async_space; 154 | 155 | mutex_lock(&alloc->mutex); 156 | free_async_space = alloc->free_async_space; 157 | mutex_unlock(&alloc->mutex); 158 | return free_async_space; 159 | } 160 | 161 | unsigned long 162 | binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, 163 | struct binder_buffer *buffer, 164 | binder_size_t buffer_offset, 165 | const void __user *from, 166 | size_t bytes); 167 | 168 | int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, 169 | struct binder_buffer *buffer, 170 | binder_size_t buffer_offset, 171 | void *src, 172 | size_t bytes); 173 | 174 | int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, 175 | void *dest, 176 | struct binder_buffer *buffer, 177 | binder_size_t buffer_offset, 178 | size_t bytes); 179 | 180 | #endif /* _LINUX_BINDER_ALLOC_H */ 181 | 182 | -------------------------------------------------------------------------------- /binder/binder_internal.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0 */ 2 | 3 | #ifndef _LINUX_BINDER_INTERNAL_H 4 | #define _LINUX_BINDER_INTERNAL_H 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | struct binder_context { 17 | struct binder_node *binder_context_mgr_node; 18 | struct mutex context_mgr_node_lock; 19 | kuid_t binder_context_mgr_uid; 20 | const char *name; 21 | }; 22 | 23 | /** 24 | * struct binder_device - information about a binder device node 25 | * @hlist: list of binder devices (only used for devices requested via 26 | * CONFIG_ANDROID_BINDER_DEVICES) 27 | * @miscdev: information about a binder character device node 28 | * @context: binder context information 29 | * @binderfs_inode: This is the inode of the root dentry of the super block 30 | * belonging to a binderfs mount. 31 | */ 32 | struct binder_device { 33 | struct hlist_node hlist; 34 | struct miscdevice miscdev; 35 | struct binder_context context; 36 | struct inode *binderfs_inode; 37 | refcount_t ref; 38 | }; 39 | 40 | /** 41 | * binderfs_mount_opts - mount options for binderfs 42 | * @max: maximum number of allocatable binderfs binder devices 43 | * @stats_mode: enable binder stats in binderfs. 44 | */ 45 | struct binderfs_mount_opts { 46 | int max; 47 | int stats_mode; 48 | }; 49 | 50 | /** 51 | * binderfs_info - information about a binderfs mount 52 | * @ipc_ns: The ipc namespace the binderfs mount belongs to. 53 | * @control_dentry: This records the dentry of this binderfs mount 54 | * binder-control device. 55 | * @root_uid: uid that needs to be used when a new binder device is 56 | * created. 57 | * @root_gid: gid that needs to be used when a new binder device is 58 | * created. 59 | * @mount_opts: The mount options in use. 60 | * @device_count: The current number of allocated binder devices. 61 | * @proc_log_dir: Pointer to the directory dentry containing process-specific 62 | * logs. 63 | */ 64 | struct binderfs_info { 65 | struct ipc_namespace *ipc_ns; 66 | struct dentry *control_dentry; 67 | kuid_t root_uid; 68 | kgid_t root_gid; 69 | struct binderfs_mount_opts mount_opts; 70 | int device_count; 71 | struct dentry *proc_log_dir; 72 | }; 73 | 74 | extern const struct file_operations binder_fops; 75 | 76 | extern char *binder_devices_param; 77 | 78 | #ifdef CONFIG_ANDROID_BINDERFS 79 | extern bool is_binderfs_device(const struct inode *inode); 80 | extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name, 81 | const struct file_operations *fops, 82 | void *data); 83 | extern void binderfs_remove_file(struct dentry *dentry); 84 | #else 85 | static inline bool is_binderfs_device(const struct inode *inode) 86 | { 87 | return false; 88 | } 89 | static inline struct dentry *binderfs_create_file(struct dentry *dir, 90 | const char *name, 91 | const struct file_operations *fops, 92 | void *data) 93 | { 94 | return NULL; 95 | } 96 | static inline void binderfs_remove_file(struct dentry *dentry) {} 97 | #endif 98 | 99 | #ifdef CONFIG_ANDROID_BINDERFS 100 | extern int __init init_binderfs(void); 101 | extern void __exit exit_binderfs(void); 102 | #else 103 | static inline int __init init_binderfs(void) 104 | { 105 | return 0; 106 | } 107 | static inline void __exit exit_binderfs(void) 108 | { 109 | } 110 | #endif 111 | 112 | int binder_stats_show(struct seq_file *m, void *unused); 113 | DEFINE_SHOW_ATTRIBUTE(binder_stats); 114 | 115 | int binder_state_show(struct seq_file *m, void *unused); 116 | DEFINE_SHOW_ATTRIBUTE(binder_state); 117 | 118 | int binder_transactions_show(struct seq_file *m, void *unused); 119 | DEFINE_SHOW_ATTRIBUTE(binder_transactions); 120 | 121 | int binder_transaction_log_show(struct seq_file *m, void *unused); 122 | DEFINE_SHOW_ATTRIBUTE(binder_transaction_log); 123 | 124 | struct binder_transaction_log_entry { 125 | int debug_id; 126 | int debug_id_done; 127 | int call_type; 128 | int from_proc; 129 | int from_thread; 130 | int target_handle; 131 | int to_proc; 132 | int to_thread; 133 | int to_node; 134 | int data_size; 135 | int offsets_size; 136 | int return_error_line; 137 | uint32_t return_error; 138 | uint32_t return_error_param; 139 | char context_name[BINDERFS_MAX_NAME + 1]; 140 | }; 141 | 142 | struct binder_transaction_log { 143 | atomic_t cur; 144 | bool full; 145 | struct binder_transaction_log_entry entry[32]; 146 | }; 147 | 148 | extern struct binder_transaction_log binder_transaction_log; 149 | extern struct binder_transaction_log binder_transaction_log_failed; 150 | #endif /* _LINUX_BINDER_INTERNAL_H */ 151 | -------------------------------------------------------------------------------- /binder/binder_trace.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* 3 | * Copyright (C) 2012 Google, Inc. 4 | */ 5 | 6 | #undef TRACE_SYSTEM 7 | #define TRACE_SYSTEM binder 8 | 9 | #if !defined(_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) 10 | #define _BINDER_TRACE_H 11 | 12 | #include 13 | 14 | struct binder_buffer; 15 | struct binder_node; 16 | struct binder_proc; 17 | struct binder_alloc; 18 | struct binder_ref_data; 19 | struct binder_thread; 20 | struct binder_transaction; 21 | 22 | TRACE_EVENT(binder_ioctl, 23 | TP_PROTO(unsigned int cmd, unsigned long arg), 24 | TP_ARGS(cmd, arg), 25 | 26 | TP_STRUCT__entry( 27 | __field(unsigned int, cmd) 28 | __field(unsigned long, arg) 29 | ), 30 | TP_fast_assign( 31 | __entry->cmd = cmd; 32 | __entry->arg = arg; 33 | ), 34 | TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg) 35 | ); 36 | 37 | DECLARE_EVENT_CLASS(binder_lock_class, 38 | TP_PROTO(const char *tag), 39 | TP_ARGS(tag), 40 | TP_STRUCT__entry( 41 | __field(const char *, tag) 42 | ), 43 | TP_fast_assign( 44 | __entry->tag = tag; 45 | ), 46 | TP_printk("tag=%s", __entry->tag) 47 | ); 48 | 49 | #define DEFINE_BINDER_LOCK_EVENT(name) \ 50 | DEFINE_EVENT(binder_lock_class, name, \ 51 | TP_PROTO(const char *func), \ 52 | TP_ARGS(func)) 53 | 54 | DEFINE_BINDER_LOCK_EVENT(binder_lock); 55 | DEFINE_BINDER_LOCK_EVENT(binder_locked); 56 | DEFINE_BINDER_LOCK_EVENT(binder_unlock); 57 | 58 | DECLARE_EVENT_CLASS(binder_function_return_class, 59 | TP_PROTO(int ret), 60 | TP_ARGS(ret), 61 | TP_STRUCT__entry( 62 | __field(int, ret) 63 | ), 64 | TP_fast_assign( 65 | __entry->ret = ret; 66 | ), 67 | TP_printk("ret=%d", __entry->ret) 68 | ); 69 | 70 | #define DEFINE_BINDER_FUNCTION_RETURN_EVENT(name) \ 71 | DEFINE_EVENT(binder_function_return_class, name, \ 72 | TP_PROTO(int ret), \ 73 | TP_ARGS(ret)) 74 | 75 | DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done); 76 | DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done); 77 | DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done); 78 | 79 | TRACE_EVENT(binder_wait_for_work, 80 | TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo), 81 | TP_ARGS(proc_work, transaction_stack, thread_todo), 82 | 83 | TP_STRUCT__entry( 84 | __field(bool, proc_work) 85 | __field(bool, transaction_stack) 86 | __field(bool, thread_todo) 87 | ), 88 | TP_fast_assign( 89 | __entry->proc_work = proc_work; 90 | __entry->transaction_stack = transaction_stack; 91 | __entry->thread_todo = thread_todo; 92 | ), 93 | TP_printk("proc_work=%d transaction_stack=%d thread_todo=%d", 94 | __entry->proc_work, __entry->transaction_stack, 95 | __entry->thread_todo) 96 | ); 97 | 98 | TRACE_EVENT(binder_transaction, 99 | TP_PROTO(bool reply, struct binder_transaction *t, 100 | struct binder_node *target_node), 101 | TP_ARGS(reply, t, target_node), 102 | TP_STRUCT__entry( 103 | __field(int, debug_id) 104 | __field(int, target_node) 105 | __field(int, to_proc) 106 | __field(int, to_thread) 107 | __field(int, reply) 108 | __field(unsigned int, code) 109 | __field(unsigned int, flags) 110 | ), 111 | TP_fast_assign( 112 | __entry->debug_id = t->debug_id; 113 | __entry->target_node = target_node ? target_node->debug_id : 0; 114 | __entry->to_proc = t->to_proc->pid; 115 | __entry->to_thread = t->to_thread ? t->to_thread->pid : 0; 116 | __entry->reply = reply; 117 | __entry->code = t->code; 118 | __entry->flags = t->flags; 119 | ), 120 | TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x", 121 | __entry->debug_id, __entry->target_node, 122 | __entry->to_proc, __entry->to_thread, 123 | __entry->reply, __entry->flags, __entry->code) 124 | ); 125 | 126 | TRACE_EVENT(binder_transaction_received, 127 | TP_PROTO(struct binder_transaction *t), 128 | TP_ARGS(t), 129 | 130 | TP_STRUCT__entry( 131 | __field(int, debug_id) 132 | ), 133 | TP_fast_assign( 134 | __entry->debug_id = t->debug_id; 135 | ), 136 | TP_printk("transaction=%d", __entry->debug_id) 137 | ); 138 | 139 | TRACE_EVENT(binder_transaction_node_to_ref, 140 | TP_PROTO(struct binder_transaction *t, struct binder_node *node, 141 | struct binder_ref_data *rdata), 142 | TP_ARGS(t, node, rdata), 143 | 144 | TP_STRUCT__entry( 145 | __field(int, debug_id) 146 | __field(int, node_debug_id) 147 | __field(binder_uintptr_t, node_ptr) 148 | __field(int, ref_debug_id) 149 | __field(uint32_t, ref_desc) 150 | ), 151 | TP_fast_assign( 152 | __entry->debug_id = t->debug_id; 153 | __entry->node_debug_id = node->debug_id; 154 | __entry->node_ptr = node->ptr; 155 | __entry->ref_debug_id = rdata->debug_id; 156 | __entry->ref_desc = rdata->desc; 157 | ), 158 | TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d", 159 | __entry->debug_id, __entry->node_debug_id, 160 | (u64)__entry->node_ptr, 161 | __entry->ref_debug_id, __entry->ref_desc) 162 | ); 163 | 164 | TRACE_EVENT(binder_transaction_ref_to_node, 165 | TP_PROTO(struct binder_transaction *t, struct binder_node *node, 166 | struct binder_ref_data *rdata), 167 | TP_ARGS(t, node, rdata), 168 | 169 | TP_STRUCT__entry( 170 | __field(int, debug_id) 171 | __field(int, ref_debug_id) 172 | __field(uint32_t, ref_desc) 173 | __field(int, node_debug_id) 174 | __field(binder_uintptr_t, node_ptr) 175 | ), 176 | TP_fast_assign( 177 | __entry->debug_id = t->debug_id; 178 | __entry->ref_debug_id = rdata->debug_id; 179 | __entry->ref_desc = rdata->desc; 180 | __entry->node_debug_id = node->debug_id; 181 | __entry->node_ptr = node->ptr; 182 | ), 183 | TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx", 184 | __entry->debug_id, __entry->node_debug_id, 185 | __entry->ref_debug_id, __entry->ref_desc, 186 | (u64)__entry->node_ptr) 187 | ); 188 | 189 | TRACE_EVENT(binder_transaction_ref_to_ref, 190 | TP_PROTO(struct binder_transaction *t, struct binder_node *node, 191 | struct binder_ref_data *src_ref, 192 | struct binder_ref_data *dest_ref), 193 | TP_ARGS(t, node, src_ref, dest_ref), 194 | 195 | TP_STRUCT__entry( 196 | __field(int, debug_id) 197 | __field(int, node_debug_id) 198 | __field(int, src_ref_debug_id) 199 | __field(uint32_t, src_ref_desc) 200 | __field(int, dest_ref_debug_id) 201 | __field(uint32_t, dest_ref_desc) 202 | ), 203 | TP_fast_assign( 204 | __entry->debug_id = t->debug_id; 205 | __entry->node_debug_id = node->debug_id; 206 | __entry->src_ref_debug_id = src_ref->debug_id; 207 | __entry->src_ref_desc = src_ref->desc; 208 | __entry->dest_ref_debug_id = dest_ref->debug_id; 209 | __entry->dest_ref_desc = dest_ref->desc; 210 | ), 211 | TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ref=%d dest_desc=%d", 212 | __entry->debug_id, __entry->node_debug_id, 213 | __entry->src_ref_debug_id, __entry->src_ref_desc, 214 | __entry->dest_ref_debug_id, __entry->dest_ref_desc) 215 | ); 216 | 217 | TRACE_EVENT(binder_transaction_fd_send, 218 | TP_PROTO(struct binder_transaction *t, int fd, size_t offset), 219 | TP_ARGS(t, fd, offset), 220 | 221 | TP_STRUCT__entry( 222 | __field(int, debug_id) 223 | __field(int, fd) 224 | __field(size_t, offset) 225 | ), 226 | TP_fast_assign( 227 | __entry->debug_id = t->debug_id; 228 | __entry->fd = fd; 229 | __entry->offset = offset; 230 | ), 231 | TP_printk("transaction=%d src_fd=%d offset=%zu", 232 | __entry->debug_id, __entry->fd, __entry->offset) 233 | ); 234 | 235 | TRACE_EVENT(binder_transaction_fd_recv, 236 | TP_PROTO(struct binder_transaction *t, int fd, size_t offset), 237 | TP_ARGS(t, fd, offset), 238 | 239 | TP_STRUCT__entry( 240 | __field(int, debug_id) 241 | __field(int, fd) 242 | __field(size_t, offset) 243 | ), 244 | TP_fast_assign( 245 | __entry->debug_id = t->debug_id; 246 | __entry->fd = fd; 247 | __entry->offset = offset; 248 | ), 249 | TP_printk("transaction=%d dest_fd=%d offset=%zu", 250 | __entry->debug_id, __entry->fd, __entry->offset) 251 | ); 252 | 253 | DECLARE_EVENT_CLASS(binder_buffer_class, 254 | TP_PROTO(struct binder_buffer *buf), 255 | TP_ARGS(buf), 256 | TP_STRUCT__entry( 257 | __field(int, debug_id) 258 | __field(size_t, data_size) 259 | __field(size_t, offsets_size) 260 | __field(size_t, extra_buffers_size) 261 | ), 262 | TP_fast_assign( 263 | __entry->debug_id = buf->debug_id; 264 | __entry->data_size = buf->data_size; 265 | __entry->offsets_size = buf->offsets_size; 266 | __entry->extra_buffers_size = buf->extra_buffers_size; 267 | ), 268 | TP_printk("transaction=%d data_size=%zd offsets_size=%zd extra_buffers_size=%zd", 269 | __entry->debug_id, __entry->data_size, __entry->offsets_size, 270 | __entry->extra_buffers_size) 271 | ); 272 | 273 | DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf, 274 | TP_PROTO(struct binder_buffer *buffer), 275 | TP_ARGS(buffer)); 276 | 277 | DEFINE_EVENT(binder_buffer_class, binder_transaction_buffer_release, 278 | TP_PROTO(struct binder_buffer *buffer), 279 | TP_ARGS(buffer)); 280 | 281 | DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release, 282 | TP_PROTO(struct binder_buffer *buffer), 283 | TP_ARGS(buffer)); 284 | 285 | TRACE_EVENT(binder_update_page_range, 286 | TP_PROTO(struct binder_alloc *alloc, bool allocate, 287 | void __user *start, void __user *end), 288 | TP_ARGS(alloc, allocate, start, end), 289 | TP_STRUCT__entry( 290 | __field(int, proc) 291 | __field(bool, allocate) 292 | __field(size_t, offset) 293 | __field(size_t, size) 294 | ), 295 | TP_fast_assign( 296 | __entry->proc = alloc->pid; 297 | __entry->allocate = allocate; 298 | __entry->offset = start - alloc->buffer; 299 | __entry->size = end - start; 300 | ), 301 | TP_printk("proc=%d allocate=%d offset=%zu size=%zu", 302 | __entry->proc, __entry->allocate, 303 | __entry->offset, __entry->size) 304 | ); 305 | 306 | DECLARE_EVENT_CLASS(binder_lru_page_class, 307 | TP_PROTO(const struct binder_alloc *alloc, size_t page_index), 308 | TP_ARGS(alloc, page_index), 309 | TP_STRUCT__entry( 310 | __field(int, proc) 311 | __field(size_t, page_index) 312 | ), 313 | TP_fast_assign( 314 | __entry->proc = alloc->pid; 315 | __entry->page_index = page_index; 316 | ), 317 | TP_printk("proc=%d page_index=%zu", 318 | __entry->proc, __entry->page_index) 319 | ); 320 | 321 | DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start, 322 | TP_PROTO(const struct binder_alloc *alloc, size_t page_index), 323 | TP_ARGS(alloc, page_index)); 324 | 325 | DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end, 326 | TP_PROTO(const struct binder_alloc *alloc, size_t page_index), 327 | TP_ARGS(alloc, page_index)); 328 | 329 | DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start, 330 | TP_PROTO(const struct binder_alloc *alloc, size_t page_index), 331 | TP_ARGS(alloc, page_index)); 332 | 333 | DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end, 334 | TP_PROTO(const struct binder_alloc *alloc, size_t page_index), 335 | TP_ARGS(alloc, page_index)); 336 | 337 | DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start, 338 | TP_PROTO(const struct binder_alloc *alloc, size_t page_index), 339 | TP_ARGS(alloc, page_index)); 340 | 341 | DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end, 342 | TP_PROTO(const struct binder_alloc *alloc, size_t page_index), 343 | TP_ARGS(alloc, page_index)); 344 | 345 | DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start, 346 | TP_PROTO(const struct binder_alloc *alloc, size_t page_index), 347 | TP_ARGS(alloc, page_index)); 348 | 349 | DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end, 350 | TP_PROTO(const struct binder_alloc *alloc, size_t page_index), 351 | TP_ARGS(alloc, page_index)); 352 | 353 | DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start, 354 | TP_PROTO(const struct binder_alloc *alloc, size_t page_index), 355 | TP_ARGS(alloc, page_index)); 356 | 357 | DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end, 358 | TP_PROTO(const struct binder_alloc *alloc, size_t page_index), 359 | TP_ARGS(alloc, page_index)); 360 | 361 | TRACE_EVENT(binder_command, 362 | TP_PROTO(uint32_t cmd), 363 | TP_ARGS(cmd), 364 | TP_STRUCT__entry( 365 | __field(uint32_t, cmd) 366 | ), 367 | TP_fast_assign( 368 | __entry->cmd = cmd; 369 | ), 370 | TP_printk("cmd=0x%x %s", 371 | __entry->cmd, 372 | _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_command_strings) ? 373 | binder_command_strings[_IOC_NR(__entry->cmd)] : 374 | "unknown") 375 | ); 376 | 377 | TRACE_EVENT(binder_return, 378 | TP_PROTO(uint32_t cmd), 379 | TP_ARGS(cmd), 380 | TP_STRUCT__entry( 381 | __field(uint32_t, cmd) 382 | ), 383 | TP_fast_assign( 384 | __entry->cmd = cmd; 385 | ), 386 | TP_printk("cmd=0x%x %s", 387 | __entry->cmd, 388 | _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_return_strings) ? 389 | binder_return_strings[_IOC_NR(__entry->cmd)] : 390 | "unknown") 391 | ); 392 | 393 | #endif /* _BINDER_TRACE_H */ 394 | 395 | #undef TRACE_INCLUDE_PATH 396 | #undef TRACE_INCLUDE_FILE 397 | #define TRACE_INCLUDE_PATH . 398 | #define TRACE_INCLUDE_FILE binder_trace 399 | #include 400 | -------------------------------------------------------------------------------- /binder/binderfs.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | 38 | #include "binder_internal.h" 39 | #include "deps.h" 40 | 41 | #define FIRST_INODE 1 42 | #define SECOND_INODE 2 43 | #define INODE_OFFSET 3 44 | #define INTSTRLEN 21 45 | #define BINDERFS_MAX_MINOR (1U << MINORBITS) 46 | /* Ensure that the initial ipc namespace always has devices available. */ 47 | #define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4) 48 | 49 | static dev_t binderfs_dev; 50 | static DEFINE_MUTEX(binderfs_minors_mutex); 51 | static DEFINE_IDA(binderfs_minors); 52 | 53 | enum binderfs_param { 54 | Opt_max, 55 | Opt_stats_mode, 56 | }; 57 | 58 | enum binderfs_stats_mode { 59 | binderfs_stats_mode_unset, 60 | binderfs_stats_mode_global, 61 | }; 62 | 63 | static const struct constant_table binderfs_param_stats[] = { 64 | { "global", binderfs_stats_mode_global }, 65 | {} 66 | }; 67 | 68 | static const struct fs_parameter_spec binderfs_fs_parameters[] = { 69 | fsparam_u32("max", Opt_max), 70 | fsparam_enum("stats", Opt_stats_mode, binderfs_param_stats), 71 | {} 72 | }; 73 | 74 | static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb) 75 | { 76 | return sb->s_fs_info; 77 | } 78 | 79 | bool is_binderfs_device(const struct inode *inode) 80 | { 81 | if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC) 82 | return true; 83 | 84 | return false; 85 | } 86 | 87 | /** 88 | * binderfs_binder_device_create - allocate inode from super block of a 89 | * binderfs mount 90 | * @ref_inode: inode from wich the super block will be taken 91 | * @userp: buffer to copy information about new device for userspace to 92 | * @req: struct binderfs_device as copied from userspace 93 | * 94 | * This function allocates a new binder_device and reserves a new minor 95 | * number for it. 96 | * Minor numbers are limited and tracked globally in binderfs_minors. The 97 | * function will stash a struct binder_device for the specific binder 98 | * device in i_private of the inode. 99 | * It will go on to allocate a new inode from the super block of the 100 | * filesystem mount, stash a struct binder_device in its i_private field 101 | * and attach a dentry to that inode. 102 | * 103 | * Return: 0 on success, negative errno on failure 104 | */ 105 | static int binderfs_binder_device_create(struct inode *ref_inode, 106 | struct binderfs_device __user *userp, 107 | struct binderfs_device *req) 108 | { 109 | int minor, ret; 110 | struct dentry *dentry, *root; 111 | struct binder_device *device; 112 | char *name = NULL; 113 | size_t name_len; 114 | struct inode *inode = NULL; 115 | struct super_block *sb = ref_inode->i_sb; 116 | struct binderfs_info *info = sb->s_fs_info; 117 | #if defined(CONFIG_IPC_NS) 118 | bool use_reserve = (info->ipc_ns == get_init_ipc_ns_ptr()); 119 | #else 120 | bool use_reserve = true; 121 | #endif 122 | 123 | /* Reserve new minor number for the new device. */ 124 | mutex_lock(&binderfs_minors_mutex); 125 | if (++info->device_count <= info->mount_opts.max) 126 | minor = ida_alloc_max(&binderfs_minors, 127 | use_reserve ? BINDERFS_MAX_MINOR : 128 | BINDERFS_MAX_MINOR_CAPPED, 129 | GFP_KERNEL); 130 | else 131 | minor = -ENOSPC; 132 | if (minor < 0) { 133 | --info->device_count; 134 | mutex_unlock(&binderfs_minors_mutex); 135 | return minor; 136 | } 137 | mutex_unlock(&binderfs_minors_mutex); 138 | 139 | ret = -ENOMEM; 140 | device = kzalloc(sizeof(*device), GFP_KERNEL); 141 | if (!device) 142 | goto err; 143 | 144 | inode = new_inode(sb); 145 | if (!inode) 146 | goto err; 147 | 148 | inode->i_ino = minor + INODE_OFFSET; 149 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) 150 | simple_inode_init_ts(inode); 151 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(6,6,0)) 152 | inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); 153 | #else 154 | inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); 155 | #endif 156 | init_special_inode(inode, S_IFCHR | 0600, 157 | MKDEV(MAJOR(binderfs_dev), minor)); 158 | inode->i_fop = &binder_fops; 159 | inode->i_uid = info->root_uid; 160 | inode->i_gid = info->root_gid; 161 | 162 | req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */ 163 | name_len = strlen(req->name); 164 | /* Make sure to include terminating NUL byte */ 165 | name = kmemdup(req->name, name_len + 1, GFP_KERNEL); 166 | if (!name) 167 | goto err; 168 | 169 | refcount_set(&device->ref, 1); 170 | device->binderfs_inode = inode; 171 | device->context.binder_context_mgr_uid = INVALID_UID; 172 | device->context.name = name; 173 | device->miscdev.name = name; 174 | device->miscdev.minor = minor; 175 | mutex_init(&device->context.context_mgr_node_lock); 176 | 177 | req->major = MAJOR(binderfs_dev); 178 | req->minor = minor; 179 | 180 | if (userp && copy_to_user(userp, req, sizeof(*req))) { 181 | ret = -EFAULT; 182 | goto err; 183 | } 184 | 185 | root = sb->s_root; 186 | inode_lock(d_inode(root)); 187 | 188 | /* look it up */ 189 | dentry = lookup_one_len(name, root, name_len); 190 | if (IS_ERR(dentry)) { 191 | inode_unlock(d_inode(root)); 192 | ret = PTR_ERR(dentry); 193 | goto err; 194 | } 195 | 196 | if (d_really_is_positive(dentry)) { 197 | /* already exists */ 198 | dput(dentry); 199 | inode_unlock(d_inode(root)); 200 | ret = -EEXIST; 201 | goto err; 202 | } 203 | 204 | inode->i_private = device; 205 | d_instantiate(dentry, inode); 206 | fsnotify_create(root->d_inode, dentry); 207 | inode_unlock(d_inode(root)); 208 | 209 | return 0; 210 | 211 | err: 212 | kfree(name); 213 | kfree(device); 214 | mutex_lock(&binderfs_minors_mutex); 215 | --info->device_count; 216 | ida_free(&binderfs_minors, minor); 217 | mutex_unlock(&binderfs_minors_mutex); 218 | iput(inode); 219 | 220 | return ret; 221 | } 222 | 223 | /** 224 | * binderfs_ctl_ioctl - handle binder device node allocation requests 225 | * 226 | * The request handler for the binder-control device. All requests operate on 227 | * the binderfs mount the binder-control device resides in: 228 | * - BINDER_CTL_ADD 229 | * Allocate a new binder device. 230 | * 231 | * Return: 0 on success, negative errno on failure 232 | */ 233 | static long binder_ctl_ioctl(struct file *file, unsigned int cmd, 234 | unsigned long arg) 235 | { 236 | int ret = -EINVAL; 237 | struct inode *inode = file_inode(file); 238 | struct binderfs_device __user *device = (struct binderfs_device __user *)arg; 239 | struct binderfs_device device_req; 240 | 241 | switch (cmd) { 242 | case BINDER_CTL_ADD: 243 | ret = copy_from_user(&device_req, device, sizeof(device_req)); 244 | if (ret) { 245 | ret = -EFAULT; 246 | break; 247 | } 248 | 249 | ret = binderfs_binder_device_create(inode, device, &device_req); 250 | break; 251 | default: 252 | break; 253 | } 254 | 255 | return ret; 256 | } 257 | 258 | static void binderfs_evict_inode(struct inode *inode) 259 | { 260 | struct binder_device *device = inode->i_private; 261 | struct binderfs_info *info = BINDERFS_SB(inode->i_sb); 262 | 263 | clear_inode(inode); 264 | 265 | if (!S_ISCHR(inode->i_mode) || !device) 266 | return; 267 | 268 | mutex_lock(&binderfs_minors_mutex); 269 | --info->device_count; 270 | ida_free(&binderfs_minors, device->miscdev.minor); 271 | mutex_unlock(&binderfs_minors_mutex); 272 | 273 | if (refcount_dec_and_test(&device->ref)) { 274 | kfree(device->context.name); 275 | kfree(device); 276 | } 277 | } 278 | 279 | static int binderfs_fs_context_parse_param(struct fs_context *fc, 280 | struct fs_parameter *param) 281 | { 282 | int opt; 283 | struct binderfs_mount_opts *ctx = fc->fs_private; 284 | struct fs_parse_result result; 285 | 286 | opt = fs_parse(fc, binderfs_fs_parameters, param, &result); 287 | if (opt < 0) 288 | return opt; 289 | 290 | switch (opt) { 291 | case Opt_max: 292 | if (result.uint_32 > BINDERFS_MAX_MINOR) 293 | return invalfc(fc, "Bad value for '%s'", param->key); 294 | 295 | ctx->max = result.uint_32; 296 | break; 297 | case Opt_stats_mode: 298 | if (!capable(CAP_SYS_ADMIN)) 299 | return -EPERM; 300 | 301 | ctx->stats_mode = result.uint_32; 302 | break; 303 | default: 304 | return invalfc(fc, "Unsupported parameter '%s'", param->key); 305 | } 306 | 307 | return 0; 308 | } 309 | 310 | static int binderfs_fs_context_reconfigure(struct fs_context *fc) 311 | { 312 | struct binderfs_mount_opts *ctx = fc->fs_private; 313 | struct binderfs_info *info = BINDERFS_SB(fc->root->d_sb); 314 | 315 | if (info->mount_opts.stats_mode != ctx->stats_mode) 316 | return invalfc(fc, "Binderfs stats mode cannot be changed during a remount"); 317 | 318 | info->mount_opts.stats_mode = ctx->stats_mode; 319 | info->mount_opts.max = ctx->max; 320 | return 0; 321 | } 322 | 323 | static int binderfs_show_options(struct seq_file *seq, struct dentry *root) 324 | { 325 | struct binderfs_info *info = BINDERFS_SB(root->d_sb); 326 | 327 | if (info->mount_opts.max <= BINDERFS_MAX_MINOR) 328 | seq_printf(seq, ",max=%d", info->mount_opts.max); 329 | 330 | switch (info->mount_opts.stats_mode) { 331 | case binderfs_stats_mode_unset: 332 | break; 333 | case binderfs_stats_mode_global: 334 | seq_printf(seq, ",stats=global"); 335 | break; 336 | } 337 | 338 | return 0; 339 | } 340 | 341 | static void binderfs_put_super(struct super_block *sb) 342 | { 343 | struct binderfs_info *info = sb->s_fs_info; 344 | 345 | if (info && info->ipc_ns) 346 | put_ipc_ns(info->ipc_ns); 347 | 348 | kfree(info); 349 | sb->s_fs_info = NULL; 350 | } 351 | 352 | static const struct super_operations binderfs_super_ops = { 353 | .evict_inode = binderfs_evict_inode, 354 | .show_options = binderfs_show_options, 355 | .statfs = simple_statfs, 356 | .put_super = binderfs_put_super, 357 | }; 358 | 359 | static inline bool is_binderfs_control_device(const struct dentry *dentry) 360 | { 361 | struct binderfs_info *info = dentry->d_sb->s_fs_info; 362 | 363 | return info->control_dentry == dentry; 364 | } 365 | 366 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) 367 | static int binderfs_rename(struct mnt_idmap *idmap, 368 | struct inode *old_dir, struct dentry *old_dentry, 369 | struct inode *new_dir, struct dentry *new_dentry, 370 | unsigned int flags) 371 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,12,0)) 372 | static int binderfs_rename(struct user_namespace *namespace, struct inode *old_dir, 373 | struct dentry *old_dentry, struct inode *new_dir, 374 | struct dentry *new_dentry, unsigned int flags) 375 | #else 376 | static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry, 377 | struct inode *new_dir, struct dentry *new_dentry, 378 | unsigned int flags) 379 | #endif 380 | { 381 | if (is_binderfs_control_device(old_dentry) || 382 | is_binderfs_control_device(new_dentry)) 383 | return -EPERM; 384 | 385 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) 386 | return simple_rename(idmap, old_dir, old_dentry, new_dir, 387 | new_dentry, flags); 388 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,12,0)) 389 | return simple_rename(namespace, old_dir, old_dentry, new_dir, new_dentry, flags); 390 | #else 391 | return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags); 392 | #endif 393 | } 394 | 395 | static int binderfs_unlink(struct inode *dir, struct dentry *dentry) 396 | { 397 | if (is_binderfs_control_device(dentry)) 398 | return -EPERM; 399 | 400 | return simple_unlink(dir, dentry); 401 | } 402 | 403 | static const struct file_operations binder_ctl_fops = { 404 | .owner = THIS_MODULE, 405 | .open = nonseekable_open, 406 | .unlocked_ioctl = binder_ctl_ioctl, 407 | .compat_ioctl = binder_ctl_ioctl, 408 | .llseek = noop_llseek, 409 | }; 410 | 411 | /** 412 | * binderfs_binder_ctl_create - create a new binder-control device 413 | * @sb: super block of the binderfs mount 414 | * 415 | * This function creates a new binder-control device node in the binderfs mount 416 | * referred to by @sb. 417 | * 418 | * Return: 0 on success, negative errno on failure 419 | */ 420 | static int binderfs_binder_ctl_create(struct super_block *sb) 421 | { 422 | int minor, ret; 423 | struct dentry *dentry; 424 | struct binder_device *device; 425 | struct inode *inode = NULL; 426 | struct dentry *root = sb->s_root; 427 | struct binderfs_info *info = sb->s_fs_info; 428 | #if defined(CONFIG_IPC_NS) 429 | bool use_reserve = (info->ipc_ns == get_init_ipc_ns_ptr()); 430 | #else 431 | bool use_reserve = true; 432 | #endif 433 | 434 | device = kzalloc(sizeof(*device), GFP_KERNEL); 435 | if (!device) 436 | return -ENOMEM; 437 | 438 | /* If we have already created a binder-control node, return. */ 439 | if (info->control_dentry) { 440 | ret = 0; 441 | goto out; 442 | } 443 | 444 | ret = -ENOMEM; 445 | inode = new_inode(sb); 446 | if (!inode) 447 | goto out; 448 | 449 | /* Reserve a new minor number for the new device. */ 450 | mutex_lock(&binderfs_minors_mutex); 451 | minor = ida_alloc_max(&binderfs_minors, 452 | use_reserve ? BINDERFS_MAX_MINOR : 453 | BINDERFS_MAX_MINOR_CAPPED, 454 | GFP_KERNEL); 455 | mutex_unlock(&binderfs_minors_mutex); 456 | if (minor < 0) { 457 | ret = minor; 458 | goto out; 459 | } 460 | 461 | inode->i_ino = SECOND_INODE; 462 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) 463 | simple_inode_init_ts(inode); 464 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(6,6,0)) 465 | inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); 466 | #else 467 | inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); 468 | #endif 469 | init_special_inode(inode, S_IFCHR | 0600, 470 | MKDEV(MAJOR(binderfs_dev), minor)); 471 | inode->i_fop = &binder_ctl_fops; 472 | inode->i_uid = info->root_uid; 473 | inode->i_gid = info->root_gid; 474 | 475 | refcount_set(&device->ref, 1); 476 | device->binderfs_inode = inode; 477 | device->miscdev.minor = minor; 478 | 479 | dentry = d_alloc_name(root, "binder-control"); 480 | if (!dentry) 481 | goto out; 482 | 483 | inode->i_private = device; 484 | info->control_dentry = dentry; 485 | d_add(dentry, inode); 486 | 487 | return 0; 488 | 489 | out: 490 | kfree(device); 491 | iput(inode); 492 | 493 | return ret; 494 | } 495 | 496 | static const struct inode_operations binderfs_dir_inode_operations = { 497 | .lookup = simple_lookup, 498 | .rename = binderfs_rename, 499 | .unlink = binderfs_unlink, 500 | }; 501 | 502 | static struct inode *binderfs_make_inode(struct super_block *sb, int mode) 503 | { 504 | struct inode *ret; 505 | 506 | ret = new_inode(sb); 507 | if (ret) { 508 | ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET); 509 | ret->i_mode = mode; 510 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) 511 | simple_inode_init_ts(ret); 512 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(6,6,0)) 513 | ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret); 514 | #else 515 | ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret); 516 | #endif 517 | } 518 | return ret; 519 | } 520 | 521 | static struct dentry *binderfs_create_dentry(struct dentry *parent, 522 | const char *name) 523 | { 524 | struct dentry *dentry; 525 | 526 | dentry = lookup_one_len(name, parent, strlen(name)); 527 | if (IS_ERR(dentry)) 528 | return dentry; 529 | 530 | /* Return error if the file/dir already exists. */ 531 | if (d_really_is_positive(dentry)) { 532 | dput(dentry); 533 | return ERR_PTR(-EEXIST); 534 | } 535 | 536 | return dentry; 537 | } 538 | 539 | void binderfs_remove_file(struct dentry *dentry) 540 | { 541 | struct inode *parent_inode; 542 | 543 | parent_inode = d_inode(dentry->d_parent); 544 | inode_lock(parent_inode); 545 | if (simple_positive(dentry)) { 546 | dget(dentry); 547 | simple_unlink(parent_inode, dentry); 548 | d_delete(dentry); 549 | dput(dentry); 550 | } 551 | inode_unlock(parent_inode); 552 | } 553 | 554 | struct dentry *binderfs_create_file(struct dentry *parent, const char *name, 555 | const struct file_operations *fops, 556 | void *data) 557 | { 558 | struct dentry *dentry; 559 | struct inode *new_inode, *parent_inode; 560 | struct super_block *sb; 561 | 562 | parent_inode = d_inode(parent); 563 | inode_lock(parent_inode); 564 | 565 | dentry = binderfs_create_dentry(parent, name); 566 | if (IS_ERR(dentry)) 567 | goto out; 568 | 569 | sb = parent_inode->i_sb; 570 | new_inode = binderfs_make_inode(sb, S_IFREG | 0444); 571 | if (!new_inode) { 572 | dput(dentry); 573 | dentry = ERR_PTR(-ENOMEM); 574 | goto out; 575 | } 576 | 577 | new_inode->i_fop = fops; 578 | new_inode->i_private = data; 579 | d_instantiate(dentry, new_inode); 580 | fsnotify_create(parent_inode, dentry); 581 | 582 | out: 583 | inode_unlock(parent_inode); 584 | return dentry; 585 | } 586 | 587 | static struct dentry *binderfs_create_dir(struct dentry *parent, 588 | const char *name) 589 | { 590 | struct dentry *dentry; 591 | struct inode *new_inode, *parent_inode; 592 | struct super_block *sb; 593 | 594 | parent_inode = d_inode(parent); 595 | inode_lock(parent_inode); 596 | 597 | dentry = binderfs_create_dentry(parent, name); 598 | if (IS_ERR(dentry)) 599 | goto out; 600 | 601 | sb = parent_inode->i_sb; 602 | new_inode = binderfs_make_inode(sb, S_IFDIR | 0755); 603 | if (!new_inode) { 604 | dput(dentry); 605 | dentry = ERR_PTR(-ENOMEM); 606 | goto out; 607 | } 608 | 609 | new_inode->i_fop = &simple_dir_operations; 610 | new_inode->i_op = &simple_dir_inode_operations; 611 | 612 | set_nlink(new_inode, 2); 613 | d_instantiate(dentry, new_inode); 614 | inc_nlink(parent_inode); 615 | fsnotify_mkdir(parent_inode, dentry); 616 | 617 | out: 618 | inode_unlock(parent_inode); 619 | return dentry; 620 | } 621 | 622 | static int init_binder_logs(struct super_block *sb) 623 | { 624 | struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir; 625 | struct binderfs_info *info; 626 | int ret = 0; 627 | 628 | binder_logs_root_dir = binderfs_create_dir(sb->s_root, 629 | "binder_logs"); 630 | if (IS_ERR(binder_logs_root_dir)) { 631 | ret = PTR_ERR(binder_logs_root_dir); 632 | goto out; 633 | } 634 | 635 | dentry = binderfs_create_file(binder_logs_root_dir, "stats", 636 | &binder_stats_fops, NULL); 637 | if (IS_ERR(dentry)) { 638 | ret = PTR_ERR(dentry); 639 | goto out; 640 | } 641 | 642 | dentry = binderfs_create_file(binder_logs_root_dir, "state", 643 | &binder_state_fops, NULL); 644 | if (IS_ERR(dentry)) { 645 | ret = PTR_ERR(dentry); 646 | goto out; 647 | } 648 | 649 | dentry = binderfs_create_file(binder_logs_root_dir, "transactions", 650 | &binder_transactions_fops, NULL); 651 | if (IS_ERR(dentry)) { 652 | ret = PTR_ERR(dentry); 653 | goto out; 654 | } 655 | 656 | dentry = binderfs_create_file(binder_logs_root_dir, 657 | "transaction_log", 658 | &binder_transaction_log_fops, 659 | &binder_transaction_log); 660 | if (IS_ERR(dentry)) { 661 | ret = PTR_ERR(dentry); 662 | goto out; 663 | } 664 | 665 | dentry = binderfs_create_file(binder_logs_root_dir, 666 | "failed_transaction_log", 667 | &binder_transaction_log_fops, 668 | &binder_transaction_log_failed); 669 | if (IS_ERR(dentry)) { 670 | ret = PTR_ERR(dentry); 671 | goto out; 672 | } 673 | 674 | proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc"); 675 | if (IS_ERR(proc_log_dir)) { 676 | ret = PTR_ERR(proc_log_dir); 677 | goto out; 678 | } 679 | info = sb->s_fs_info; 680 | info->proc_log_dir = proc_log_dir; 681 | 682 | out: 683 | return ret; 684 | } 685 | 686 | static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc) 687 | { 688 | int ret; 689 | struct binderfs_info *info; 690 | struct binderfs_mount_opts *ctx = fc->fs_private; 691 | struct inode *inode = NULL; 692 | struct binderfs_device device_info = {}; 693 | const char *name; 694 | size_t len; 695 | 696 | sb->s_blocksize = PAGE_SIZE; 697 | sb->s_blocksize_bits = PAGE_SHIFT; 698 | 699 | /* 700 | * The binderfs filesystem can be mounted by userns root in a 701 | * non-initial userns. By default such mounts have the SB_I_NODEV flag 702 | * set in s_iflags to prevent security issues where userns root can 703 | * just create random device nodes via mknod() since it owns the 704 | * filesystem mount. But binderfs does not allow to create any files 705 | * including devices nodes. The only way to create binder devices nodes 706 | * is through the binder-control device which userns root is explicitly 707 | * allowed to do. So removing the SB_I_NODEV flag from s_iflags is both 708 | * necessary and safe. 709 | */ 710 | sb->s_iflags &= ~SB_I_NODEV; 711 | sb->s_iflags |= SB_I_NOEXEC; 712 | sb->s_magic = BINDERFS_SUPER_MAGIC; 713 | sb->s_op = &binderfs_super_ops; 714 | sb->s_time_gran = 1; 715 | 716 | sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); 717 | if (!sb->s_fs_info) 718 | return -ENOMEM; 719 | info = sb->s_fs_info; 720 | 721 | info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns); 722 | 723 | info->root_gid = make_kgid(sb->s_user_ns, 0); 724 | if (!gid_valid(info->root_gid)) 725 | info->root_gid = GLOBAL_ROOT_GID; 726 | info->root_uid = make_kuid(sb->s_user_ns, 0); 727 | if (!uid_valid(info->root_uid)) 728 | info->root_uid = GLOBAL_ROOT_UID; 729 | info->mount_opts.max = ctx->max; 730 | info->mount_opts.stats_mode = ctx->stats_mode; 731 | 732 | inode = new_inode(sb); 733 | if (!inode) 734 | return -ENOMEM; 735 | 736 | inode->i_ino = FIRST_INODE; 737 | inode->i_fop = &simple_dir_operations; 738 | inode->i_mode = S_IFDIR | 0755; 739 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) 740 | simple_inode_init_ts(inode); 741 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(6,6,0)) 742 | inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); 743 | #else 744 | inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); 745 | #endif 746 | inode->i_op = &binderfs_dir_inode_operations; 747 | set_nlink(inode, 2); 748 | 749 | sb->s_root = d_make_root(inode); 750 | if (!sb->s_root) 751 | return -ENOMEM; 752 | 753 | ret = binderfs_binder_ctl_create(sb); 754 | if (ret) 755 | return ret; 756 | 757 | name = binder_devices_param; 758 | for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) { 759 | strscpy(device_info.name, name, len + 1); 760 | ret = binderfs_binder_device_create(inode, NULL, &device_info); 761 | if (ret) 762 | return ret; 763 | name += len; 764 | if (*name == ',') 765 | name++; 766 | } 767 | 768 | if (info->mount_opts.stats_mode == binderfs_stats_mode_global) 769 | return init_binder_logs(sb); 770 | 771 | return 0; 772 | } 773 | 774 | static int binderfs_fs_context_get_tree(struct fs_context *fc) 775 | { 776 | return get_tree_nodev(fc, binderfs_fill_super); 777 | } 778 | 779 | static void binderfs_fs_context_free(struct fs_context *fc) 780 | { 781 | struct binderfs_mount_opts *ctx = fc->fs_private; 782 | 783 | kfree(ctx); 784 | } 785 | 786 | static const struct fs_context_operations binderfs_fs_context_ops = { 787 | .free = binderfs_fs_context_free, 788 | .get_tree = binderfs_fs_context_get_tree, 789 | .parse_param = binderfs_fs_context_parse_param, 790 | .reconfigure = binderfs_fs_context_reconfigure, 791 | }; 792 | 793 | static int binderfs_init_fs_context(struct fs_context *fc) 794 | { 795 | struct binderfs_mount_opts *ctx; 796 | 797 | ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL); 798 | if (!ctx) 799 | return -ENOMEM; 800 | 801 | ctx->max = BINDERFS_MAX_MINOR; 802 | ctx->stats_mode = binderfs_stats_mode_unset; 803 | 804 | fc->fs_private = ctx; 805 | fc->ops = &binderfs_fs_context_ops; 806 | 807 | return 0; 808 | } 809 | 810 | static struct file_system_type binder_fs_type = { 811 | .name = "binder", 812 | .init_fs_context = binderfs_init_fs_context, 813 | .parameters = binderfs_fs_parameters, 814 | .kill_sb = kill_litter_super, 815 | .fs_flags = FS_USERNS_MOUNT, 816 | }; 817 | 818 | int __init init_binderfs(void) 819 | { 820 | int ret; 821 | const char *name; 822 | size_t len; 823 | 824 | /* Verify that the default binderfs device names are valid. */ 825 | name = binder_devices_param; 826 | for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) { 827 | if (len > BINDERFS_MAX_NAME) 828 | return -E2BIG; 829 | name += len; 830 | if (*name == ',') 831 | name++; 832 | } 833 | 834 | /* Allocate new major number for binderfs. */ 835 | ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR, 836 | "binder"); 837 | if (ret) 838 | return ret; 839 | 840 | ret = register_filesystem(&binder_fs_type); 841 | if (ret) { 842 | unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR); 843 | return ret; 844 | } 845 | 846 | return ret; 847 | } 848 | 849 | void __exit exit_binderfs(void) 850 | { 851 | unregister_filesystem(&binder_fs_type); 852 | unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR); 853 | } 854 | 855 | MODULE_LICENSE("GPL v2"); 856 | -------------------------------------------------------------------------------- /binder/deps.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "deps.h" 16 | 17 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) 18 | 19 | #ifndef CONFIG_KPROBES 20 | # error "Your kernel does not support KProbes, but this is required to compile binder as a kernel module on kernel 5.7 and later" 21 | #endif 22 | 23 | typedef unsigned long (*kallsyms_lookup_name_t)(const char *name); 24 | 25 | static int dummy_kprobe_handler(struct kprobe *p, struct pt_regs *regs) 26 | { 27 | return 0; 28 | } 29 | 30 | static kallsyms_lookup_name_t get_kallsyms_lookup_name_ptr(void) 31 | { 32 | struct kprobe probe; 33 | int ret; 34 | kallsyms_lookup_name_t addr; 35 | 36 | memset(&probe, 0, sizeof(probe)); 37 | probe.pre_handler = dummy_kprobe_handler; 38 | probe.symbol_name = "kallsyms_lookup_name"; 39 | ret = register_kprobe(&probe); 40 | if (ret) 41 | return NULL; 42 | addr = (kallsyms_lookup_name_t) probe.addr; 43 | unregister_kprobe(&probe); 44 | 45 | return addr; 46 | } 47 | #endif 48 | 49 | /* 50 | * On kernel 5.7 and later, kallsyms_lookup_name() can no longer be called from a kernel 51 | * module for reasons described here: https://lwn.net/Articles/813350/ 52 | * As binder really needs to use kallsysms_lookup_name() to access some kernel 53 | * functions that otherwise wouldn't be accessible, KProbes are used on later 54 | * kernels to get the address of kallsysms_lookup_name(). The function is 55 | * afterwards used just as before. This is a very dirty hack though and the much 56 | * better solution would be if all the functions that are currently resolved 57 | * with kallsysms_lookup_name() would get an EXPORT_SYMBOL() annotation to 58 | * make them directly accessible to kernel modules. 59 | */ 60 | static unsigned long kallsyms_lookup_name_wrapper(const char *name) 61 | { 62 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) 63 | static kallsyms_lookup_name_t func_ptr = NULL; 64 | if (!func_ptr) 65 | func_ptr = get_kallsyms_lookup_name_ptr(); 66 | 67 | return func_ptr(name); 68 | #else 69 | return kallsyms_lookup_name(name); 70 | #endif 71 | } 72 | 73 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,19,0)) 74 | static struct file *(*close_fd_get_file_ptr)(unsigned int fd) 75 | #else 76 | static int (*close_fd_get_file_ptr)(unsigned int fd, struct file **res) 77 | #endif 78 | = NULL; 79 | 80 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,8,0)) 81 | struct file *file_close_fd(unsigned int fd) 82 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,19,0)) 83 | struct file *close_fd_get_file(unsigned int fd) 84 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,11,0)) 85 | int close_fd_get_file(unsigned int fd, struct file **res) 86 | #else 87 | int __close_fd_get_file(unsigned int fd, struct file **res) 88 | #endif 89 | { 90 | if (!close_fd_get_file_ptr) 91 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,8,0)) 92 | close_fd_get_file_ptr = kallsyms_lookup_name_wrapper("file_close_fd"); 93 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,11,0)) 94 | close_fd_get_file_ptr = kallsyms_lookup_name_wrapper("close_fd_get_file"); 95 | #else 96 | close_fd_get_file_ptr = kallsyms_lookup_name_wrapper("__close_fd_get_file"); 97 | #endif 98 | 99 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,19,0)) 100 | return close_fd_get_file_ptr(fd); 101 | #else 102 | return close_fd_get_file_ptr(fd, res); 103 | #endif 104 | } 105 | 106 | static int (*can_nice_ptr)(const struct task_struct *, const int) = NULL; 107 | 108 | int can_nice(const struct task_struct *p, const int nice) 109 | { 110 | if (!can_nice_ptr) 111 | can_nice_ptr = kallsyms_lookup_name_wrapper("can_nice"); 112 | return can_nice_ptr(p, nice); 113 | } 114 | 115 | static void (*mmput_async_ptr)(struct mm_struct *mm) = NULL; 116 | 117 | void mmput_async(struct mm_struct *mm) 118 | { 119 | if (!mmput_async_ptr) 120 | mmput_async_ptr = kallsyms_lookup_name_wrapper("mmput_async"); 121 | return mmput_async_ptr(mm); 122 | } 123 | 124 | static int (*security_binder_set_context_mgr_ptr)(struct task_struct *mgr) = NULL; 125 | 126 | int security_binder_set_context_mgr(struct task_struct *mgr) 127 | { 128 | if (!security_binder_set_context_mgr_ptr) 129 | security_binder_set_context_mgr_ptr = kallsyms_lookup_name_wrapper("security_binder_set_context_mgr"); 130 | return security_binder_set_context_mgr_ptr(mgr); 131 | } 132 | 133 | static int (*security_binder_transaction_ptr)(struct task_struct *from, struct task_struct *to) = NULL; 134 | 135 | int security_binder_transaction(struct task_struct *from, struct task_struct *to) 136 | { 137 | if (!security_binder_transaction_ptr) 138 | security_binder_transaction_ptr = kallsyms_lookup_name_wrapper("security_binder_transaction"); 139 | return security_binder_transaction_ptr(from, to); 140 | } 141 | 142 | static int (*security_binder_transfer_binder_ptr)(struct task_struct *from, struct task_struct *to) = NULL; 143 | 144 | int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to) 145 | { 146 | if (!security_binder_transfer_binder_ptr) 147 | security_binder_transfer_binder_ptr = kallsyms_lookup_name_wrapper("security_binder_transfer_binder"); 148 | return security_binder_transfer_binder_ptr(from, to); 149 | } 150 | 151 | static int (*security_binder_transfer_file_ptr)(struct task_struct *from, struct task_struct *to, struct file *file) = NULL; 152 | 153 | int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file) 154 | { 155 | if (!security_binder_transfer_file_ptr) 156 | security_binder_transfer_file_ptr = kallsyms_lookup_name_wrapper("security_binder_transfer_file"); 157 | return security_binder_transfer_file_ptr(from, to, file); 158 | } 159 | 160 | static int (*task_work_add_ptr)(struct task_struct *task, struct callback_head *work, 161 | enum task_work_notify_mode notify) = NULL; 162 | 163 | int task_work_add(struct task_struct *task, struct callback_head *work, 164 | enum task_work_notify_mode notify) 165 | { 166 | if (!task_work_add_ptr) 167 | task_work_add_ptr = kallsyms_lookup_name_wrapper("task_work_add"); 168 | return task_work_add_ptr(task, work, notify); 169 | } 170 | 171 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) 172 | static void (*zap_page_range_single_ptr)(struct vm_area_struct *, unsigned long, unsigned long, struct zap_details *) = NULL; 173 | 174 | void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) 175 | { 176 | if (!zap_page_range_single_ptr) 177 | zap_page_range_single_ptr = kallsyms_lookup_name_wrapper("zap_page_range_single"); 178 | zap_page_range_single_ptr(vma, address, size, details); 179 | } 180 | #else 181 | static void (*zap_page_range_ptr)(struct vm_area_struct *, unsigned long, unsigned long) = NULL; 182 | 183 | void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size) 184 | { 185 | if (!zap_page_range_ptr) 186 | zap_page_range_ptr = kallsyms_lookup_name_wrapper("zap_page_range"); 187 | zap_page_range_ptr(vma, address, size); 188 | } 189 | #endif 190 | 191 | static void (*put_ipc_ns_ptr)(struct ipc_namespace *ns) = NULL; 192 | 193 | void put_ipc_ns(struct ipc_namespace *ns) 194 | { 195 | if (!put_ipc_ns_ptr) 196 | put_ipc_ns_ptr = kallsyms_lookup_name_wrapper("put_ipc_ns"); 197 | put_ipc_ns_ptr(ns); 198 | } 199 | 200 | static struct ipc_namespace *init_ipc_ns_ptr = NULL; 201 | 202 | struct ipc_namespace *get_init_ipc_ns_ptr(void) 203 | { 204 | if (!init_ipc_ns_ptr) 205 | init_ipc_ns_ptr = kallsyms_lookup_name_wrapper("init_ipc_ns"); 206 | return init_ipc_ns_ptr; 207 | } 208 | -------------------------------------------------------------------------------- /binder/deps.h: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | 3 | #include 4 | 5 | struct ipc_namespace* get_init_ipc_ns_ptr(void); 6 | -------------------------------------------------------------------------------- /binder/dkms.conf: -------------------------------------------------------------------------------- 1 | PACKAGE_NAME="anbox-binder" 2 | PACKAGE_VERSION="1" 3 | CLEAN="make clean" 4 | MAKE[0]="make all KERNEL_SRC=/lib/modules/$kernelver/build" 5 | BUILT_MODULE_NAME[0]="binder_linux" 6 | DEST_MODULE_LOCATION[0]="/updates" 7 | AUTOINSTALL="yes" 8 | -------------------------------------------------------------------------------- /debian/README.Debian: -------------------------------------------------------------------------------- 1 | MODULE_NAME DKMS module for Debian 2 | 3 | This package was automatically generated by the DKMS system, 4 | for distribution on Debian based operating systems. 5 | 6 | -------------------------------------------------------------------------------- /debian/changelog: -------------------------------------------------------------------------------- 1 | -Puspendu Banerjee:2024-04-01T20:41:13-05:00 - Added packaging guide and compat fix 2 | -Puspendu Banerjee:2024-04-01T13:38:37-05:00 - removed obsolete name="%k", added binder symlink (#1) 3 | -ssfdust:2024-01-24T12:45:28+08:00 - Fixes for kernel 6.8 4 | -ssfdust:2024-01-06T10:20:56+08:00 - Fix for kernel 6.7+ 5 | -musover:2023-12-03T18:18:36+01:00 - Added conditional matching for kernels >= 6.3.0 6 | -musover:2023-11-23T18:06:01+01:00 - Fix build for 6.6 7 | -Zhang Hua:2023-11-01T14:49:49+08:00 - Fix build for kernel 6.6 8 | -TheSola10:2023-05-05T22:35:17+02:00 - Fixes for kernel 6.3+ 9 | -@Kumar:2023-04-18T11:16:36+05:30 - Update README.md 10 | -SonarBeserk:2022-10-30T01:07:38-04:00 - Create UNINSTALL.sh script and update README.md 11 | -munix9<44939650+munix9@users.noreply.github.com>:2023-01-30T14:30:02+01:00 - fix for kernel 6.1 12 | -Zhang Hua:2023-01-29T16:01:27+08:00 - Add TASK_FREEZABLE 13 | -Noob Zhang<17194552+zhanghua000@users.noreply.github.com>:2022-12-24T12:50:18+00:00 - Fix build on 6.1 kernel 14 | -zhanghua000:2022-10-15T15:34:44+08:00 - Fix build on 6.0 kernel 15 | -TheSola10:2022-08-09T17:39:31+02:00 - Fixed page fault by matching in-tree binder behavior 16 | -TheSola10:2022-08-08T10:32:25+02:00 - Patches for kernel 5.19 17 | -Etaash Mathamsetty<45927311+Etaash-mathamsetty@users.noreply.github.com>:2022-05-24T21:52:05-04:00 - patches for 5.18 kernel 18 | -Dhiego Cassiano Fogaça Barbosa:2021-11-23T17:49:24-03:00 - sync with android binder.c 19 | -Dhiego Cassiano Fogaça Barbosa:2021-11-13T13:51:20-03:00 - compilation fix for kernel 5.15.2 20 | -Christian Hoff:2021-10-29T17:50:10+02:00 - Compile fixes for kernels 5.11, 5.12 & 5.13 21 | -Christian Hoff:2021-09-26T18:54:51+02:00 - Update binder to the latest version 22 | -Christian Hoff:2021-03-08T21:23:59+01:00 - Another compile fix for kernel >= 5.8 23 | -Christian Hoff:2021-03-08T20:44:14+01:00 - Compile fixes for kernel >= 5.8 24 | -Christian Hoff:2021-03-08T20:19:16+01:00 - Fix compilation of binder and ashmem on kernel 5.7 and later 25 | -Simon Fels:2020-06-14T10:15:28+02:00 - Merge pull request #43 from pevik/kernel-v5.4 26 | -Simon Fels:2020-06-14T10:15:07+02:00 - Merge pull request #45 from devhammed/feat/add_script_that_automates_install_steps 27 | -Hammed Oyedele:2020-03-04T10:25:36+01:00 - docs: describe the purpose of the INSTALL.sh 28 | -Hammed Oyedele:2020-03-04T10:23:32+01:00 - feat: add script that automates install steps 29 | -Petr Vorel:2020-01-01T20:29:30+01:00 - travis: Add kernel v5.4 30 | -Simon Fels:2019-11-14T15:13:46-08:00 - Merge pull request #38 from bentolor/fix/kernel-5.3 31 | -Benjamin Schmid:2019-10-29T12:12:20+01:00 - Add KVER=5.2 and KVER=5.3 to travis configuration 32 | -Simon Fels:2019-07-13T11:32:21+02:00 - Merge pull request #19 from 86423355844265459587182778/master 33 | -Anonymous<>:2019-07-04T19:02:11+09:00 - Update Travis configuration 34 | -Anonymous<>:2019-07-04T18:54:57+09:00 - Update Travis configuration 35 | -Anonymous<>:2019-07-04T18:44:11+09:00 - Update Travis configuration 36 | -Anonymous<>:2019-07-04T18:37:54+09:00 - Update Travis configuration to use GCC 8 37 | -Anonymous<>:2019-06-04T15:43:43+09:00 - Add KVER=5.0 and KVER=5.1 to travis configuration 38 | -Anonymous<>:2019-05-12T12:45:44+09:00 - Fix compilation on kernels >= 5.1 39 | -Simon Fels:2018-09-08T14:57:43+02:00 - Merge pull request #10 from mkhon/linux-3.10 40 | -Max Khon:2018-09-06T07:30:17-04:00 - Fix build on vzkernel 3.10 41 | -Simon Fels:2018-09-02T13:45:47+02:00 - Update changelog for release 12 and 13 42 | -Simon Fels:2018-09-02T12:20:21+02:00 - Merge pull request #9 from morphis/fix-sigbus-with-newer-kernels 43 | -Simon Fels:2018-09-02T12:10:07+02:00 - ashmem: account for older kernel which don't have vma_set_anonymous 44 | -John Stultz:2018-07-31T10:17:04-07:00 - staging: ashmem: Fix SIGBUS crash when traversing mmaped ashmem pages 45 | -Simon Fels:2018-07-13T08:04:06+02:00 - Merge pull request #5 from megies/patch-1 46 | -Tobias Megies:2018-07-12T15:38:08+02:00 - Update README.md 47 | -Simon Fels:2018-06-23T14:21:13+02:00 - Merge pull request #3 from zhsj/add-install-doc 48 | -Shengjing Zhu:2018-06-22T20:56:45+08:00 - add install instruction 49 | -Simon Fels:2018-06-12T08:44:22+02:00 - Merge pull request #2 from zhsj/travis 50 | -Shengjing Zhu:2018-06-09T15:35:18+08:00 - add travis to test module build 51 | -Simon Fels:2018-06-08T18:25:50+02:00 - Merge pull request #1 from zhsj/master 52 | -Shengjing Zhu:2018-06-08T23:55:30+08:00 - improve debian package 53 | -Simon Fels:2018-06-08T17:29:53+02:00 - Add travis CI build status to our README 54 | -Simon Fels:2018-06-08T17:24:42+02:00 - scripts: add travis build support 55 | -Simon Fels:2018-06-08T17:17:43+02:00 - Import kernel module source from original Anbox repository 56 | -------------------------------------------------------------------------------- /debian/compat: -------------------------------------------------------------------------------- 1 | 10 2 | -------------------------------------------------------------------------------- /debian/control: -------------------------------------------------------------------------------- 1 | Source: anbox-modules 2 | Section: kernel 3 | Priority: optional 4 | Maintainer: Simon Fels 5 | Uploaders: 6 | Shengjing Zhu , 7 | Build-Depends: 8 | debhelper (>= 9), 9 | dkms, 10 | Standards-Version: 4.1.4 11 | Homepage: https://anbox.io 12 | Vcs-Browser: https://github.com/anbox/anbox-modules 13 | Vcs-Git: https://github.com/anbox/anbox-modules.git 14 | 15 | Package: anbox-modules-dkms 16 | Architecture: all 17 | Depends: 18 | ${misc:Depends}, 19 | Description: Android kernel driver (binder, ashmem) in DKMS format. 20 | . 21 | This package contains a out-of-tree version of the core Android 22 | kernel functionalities binder and ashmem. 23 | -------------------------------------------------------------------------------- /debian/copyright: -------------------------------------------------------------------------------- 1 | Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 2 | Upstream-Name: anbox-modules 3 | Source: http://github.com/anbox/anbox-modules 4 | 5 | Files: * 6 | Copyright: 2008-2012 Google Inc. 7 | License: GPL-2 8 | 9 | Files: debian/* 10 | Copyright: 2016-2018, Simon Fels 11 | 2018, Shengjing Zhu 12 | License: GPL-3 13 | 14 | License: GPL-2 15 | This package is free software; you can redistribute it and/or modify 16 | it under the terms of the GNU General Public License as published by 17 | the Free Software Foundation; version 2 of the License. 18 | . 19 | This package is distributed in the hope that it will be useful, 20 | but WITHOUT ANY WARRANTY; without even the implied warranty of 21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 | GNU General Public License for more details. 23 | . 24 | You should have received a copy of the GNU General Public License 25 | along with this program. If not, see 26 | . 27 | On Debian systems, the complete text of the GNU General 28 | Public License version 2 can be found in "/usr/share/common-licenses/GPL-2". 29 | 30 | License: GPL-3 31 | This program is free software: you can redistribute it and/or modify 32 | it under the terms of the GNU General Public License as published by 33 | the Free Software Foundation, version 3 of the License. 34 | . 35 | This package is distributed in the hope that it will be useful, 36 | but WITHOUT ANY WARRANTY; without even the implied warranty of 37 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 38 | GNU General Public License for more details. 39 | . 40 | You should have received a copy of the GNU General Public License 41 | along with this program. If not, see . 42 | . 43 | On Debian systems, the complete text of the GNU General 44 | Public License version 3 can be found in "/usr/share/common-licenses/GPL-3". 45 | -------------------------------------------------------------------------------- /debian/dirs: -------------------------------------------------------------------------------- 1 | usr/src/anbox-1 2 | etc/modules-load.d/ 3 | -------------------------------------------------------------------------------- /debian/dkms: -------------------------------------------------------------------------------- 1 | PACKAGE_NAME="anbox" 2 | PACKAGE_VERSION="1" 3 | CLEAN="make -C ashmem clean && make -C binder clean" 4 | MAKE[0]="'make' -j$parallel_jobs -C ashmem KERNEL_SRC=$kernel_source_dir && make -j$parallel_jobs -C binder KERNEL_SRC=$kernel_source_dir" 5 | BUILT_MODULE_NAME[0]="ashmem_linux" 6 | BUILT_MODULE_LOCATION[0]="ashmem" 7 | DEST_MODULE_LOCATION[0]="/updates" 8 | BUILT_MODULE_NAME[1]="binder_linux" 9 | BUILT_MODULE_LOCATION[1]="binder" 10 | DEST_MODULE_LOCATION[1]="/updates" 11 | AUTOINSTALL="yes" 12 | -------------------------------------------------------------------------------- /debian/install: -------------------------------------------------------------------------------- 1 | ashmem usr/src/anbox-1 2 | binder usr/src/anbox-1 3 | anbox.conf /etc/modules-load.d/ 4 | -------------------------------------------------------------------------------- /debian/rules: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make -f 2 | # -*- makefile -*- 3 | 4 | %: 5 | dh $@ --with dkms 6 | -------------------------------------------------------------------------------- /debian/source/format: -------------------------------------------------------------------------------- 1 | 3.0 (native) 2 | -------------------------------------------------------------------------------- /debian/source/options: -------------------------------------------------------------------------------- 1 | tar-ignore = ".git" 2 | tar-ignore = "*.swp" 3 | -------------------------------------------------------------------------------- /debian/udev: -------------------------------------------------------------------------------- 1 | ../99-anbox.rules -------------------------------------------------------------------------------- /scripts/build-against-kernel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | KVER=${1:-master} 6 | CC=${2:-gcc} 7 | 8 | src_dir="../linux-${KVER}" 9 | 10 | if [ "${KVER}" = "master" ]; then 11 | archive=master.tar.gz 12 | else 13 | archive="v${KVER}.tar.gz" 14 | fi 15 | 16 | if [ ! -d "${src_dir}" ]; then 17 | wget -O - "https://github.com/torvalds/linux/archive/${archive}" | tar -C ../ -xz 18 | fi 19 | 20 | ( 21 | cd "$src_dir" || exit 1 22 | make allmodconfig CC=${CC} HOSTCC=${CC} 23 | make prepare CC=${CC} HOSTCC=${CC} 24 | make scripts CC=${CC} HOSTCC=${CC} 25 | ) 26 | 27 | ( 28 | cd ashmem || exit 1 29 | make KERNEL_SRC="../${src_dir}" CC=${CC} HOSTCC=${CC} 30 | ) 31 | 32 | ( 33 | cd binder || exit 1 34 | make KERNEL_SRC="../${src_dir}" CC=${CC} HOSTCC=${CC} 35 | ) 36 | -------------------------------------------------------------------------------- /scripts/build-with-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | docker pull ubuntu:16.04 3 | docker run -i -t -v $PWD:/anbox ubuntu:16.04 /anbox/scripts/clean-build.sh 4 | -------------------------------------------------------------------------------- /scripts/clean-build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -ex 4 | 5 | apt-get update -qq 6 | apt-get install -qq -y \ 7 | build-essential \ 8 | debhelper \ 9 | git 10 | 11 | apt-get clean 12 | 13 | cd /anbox 14 | 15 | cleanup() { 16 | # In cases where anbox comes directly from a checked out Android 17 | # build environment we miss some symlinks which are present on 18 | # the host and don't have a valid git repository in that case. 19 | if [ -d .git ] ; then 20 | git clean -fdx . 21 | git reset --hard 22 | fi 23 | } 24 | 25 | cleanup 26 | 27 | apt-get install -y build-essential curl devscripts gdebi-core dkms dh-systemd 28 | apt-get install -y $(gdebi --quiet --apt-line ./debian/control) 29 | debuild -us -uc 30 | --------------------------------------------------------------------------------