├── .travis.yml ├── 99-anbox.rules ├── INSTALL.sh ├── README.md ├── anbox.conf ├── ashmem ├── Makefile ├── ashmem.c ├── ashmem.h ├── deps.c ├── dkms.conf └── uapi │ └── ashmem.h ├── binder ├── Makefile ├── binder.c ├── binder.h ├── binder_trace.h ├── deps.c └── dkms.conf ├── debian ├── README.Debian ├── changelog ├── compat ├── control ├── copyright ├── dirs ├── dkms ├── install ├── rules ├── source │ ├── format │ └── options └── udev └── scripts ├── build-against-kernel.sh ├── build-with-docker.sh └── clean-build.sh /.travis.yml: -------------------------------------------------------------------------------- 1 | language: c 2 | os: linux 3 | sudo: false 4 | 5 | addons: 6 | apt: 7 | sources: 8 | - ubuntu-toolchain-r-test 9 | packages: 10 | - bison 11 | - flex 12 | - libelf-dev 13 | - dpkg-dev 14 | - debhelper 15 | - dkms 16 | - fakeroot 17 | - gcc-8 18 | 19 | env: 20 | - KVER=4.4 21 | - KVER=4.8 22 | - KVER=4.9 23 | - KVER=4.13 24 | - KVER=4.14 25 | - KVER=4.15 26 | - KVER=4.16 27 | - KVER=4.17 28 | - KVER=5.0 && CC=gcc-8 29 | - KVER=5.1 && CC=gcc-8 30 | - KVER=5.2 && CC=gcc-8 31 | - KVER=5.3 && CC=gcc-8 32 | - KVER=5.4 && CC=gcc-8 33 | - KVER=master && CC=gcc-8 34 | 35 | matrix: 36 | allow_failures: 37 | - env: KVER=master 38 | include: 39 | - script: 40 | - dpkg-buildpackage -us -uc 41 | env: KVER="Debian Package Building" 42 | 43 | script: 44 | - ./scripts/build-against-kernel.sh ${KVER} ${CC} 45 | -------------------------------------------------------------------------------- /99-anbox.rules: -------------------------------------------------------------------------------- 1 | KERNEL=="ashmem", NAME="%k", MODE="0666" 2 | KERNEL=="binder*", NAME="%k", MODE="0666" 3 | -------------------------------------------------------------------------------- /INSTALL.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # First install the configuration files: 4 | sudo cp anbox.conf /etc/modules-load.d/ 5 | sudo cp 99-anbox.rules /lib/udev/rules.d/ 6 | 7 | # Then copy the module sources to /usr/src/: 8 | sudo cp -rT ashmem /usr/src/anbox-ashmem-1 9 | sudo cp -rT binder /usr/src/anbox-binder-1 10 | 11 | # Finally use dkms to build and install: 12 | sudo dkms install anbox-ashmem/1 13 | sudo dkms install anbox-binder/1 14 | 15 | # Verify by loading these modules and checking the created devices: 16 | sudo modprobe ashmem_linux 17 | sudo modprobe binder_linux 18 | lsmod | grep -e ashmem_linux -e binder_linux 19 | ls -alh /dev/binder /dev/ashmem 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/anbox/anbox-modules.svg?branch=master)](https://travis-ci.org/anbox/anbox-modules) 2 | 3 | # Anbox Kernel Modules 4 | 5 | This repository contains the kernel modules necessary to run the Anbox 6 | Android container runtime. They're split out of the original Anbox 7 | repository to make packaging in various Linux distributions easier. 8 | 9 | # Install Instruction 10 | 11 | You need to have `dkms` and linux-headers on your system. You can install them by 12 | `sudo apt install dkms` or `sudo yum install dkms` (`dkms` is available in epel repo 13 | for CentOS). 14 | 15 | Package name for linux-headers varies on different distributions, e.g. 16 | `linux-headers-generic` (Ubuntu), `linux-headers-amd64` (Debian), 17 | `kernel-devel` (CentOS, Fedora), `kernel-default-devel` (openSUSE). 18 | 19 | 20 | You can either run `./INSTALL.sh` script to automate the installation steps or follow them manually below: 21 | 22 | * First install the configuration files: 23 | 24 | ``` 25 | $ sudo cp anbox.conf /etc/modules-load.d/ 26 | $ sudo cp 99-anbox.rules /lib/udev/rules.d/ 27 | ``` 28 | 29 | * Then copy the module sources to `/usr/src/`: 30 | 31 | ``` 32 | $ sudo cp -rT ashmem /usr/src/anbox-ashmem-1 33 | $ sudo cp -rT binder /usr/src/anbox-binder-1 34 | ``` 35 | 36 | * Finally use `dkms` to build and install: 37 | 38 | ``` 39 | $ sudo dkms install anbox-ashmem/1 40 | $ sudo dkms install anbox-binder/1 41 | ``` 42 | 43 | You can verify by loading these modules and checking the created devices: 44 | 45 | ``` 46 | $ sudo modprobe ashmem_linux 47 | $ sudo modprobe binder_linux 48 | $ lsmod | grep -e ashmem_linux -e binder_linux 49 | $ ls -alh /dev/binder /dev/ashmem 50 | ``` 51 | 52 | You are expected to see output like: 53 | 54 | ``` 55 | binder_linux 114688 0 56 | ashmem_linux 16384 0 57 | crw-rw-rw- 1 root root 10, 55 Jun 19 16:30 /dev/ashmem 58 | crw-rw-rw- 1 root root 511, 0 Jun 19 16:30 /dev/binder 59 | ``` -------------------------------------------------------------------------------- /anbox.conf: -------------------------------------------------------------------------------- 1 | ashmem_linux 2 | binder_linux 3 | -------------------------------------------------------------------------------- /ashmem/Makefile: -------------------------------------------------------------------------------- 1 | ccflags-y += -I$(src) -Wno-error=implicit-int -Wno-int-conversion 2 | obj-m := ashmem_linux.o 3 | ashmem_linux-y := deps.o ashmem.o 4 | 5 | KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build 6 | VZ= $(shell uname -r | grep vz) 7 | ifneq ($(VZ),) 8 | ccflags-y += -DVZKERNEL 9 | endif 10 | 11 | all: 12 | $(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD 13 | 14 | install: 15 | cp ashmem_linux.ko $(DESTDIR)/ 16 | 17 | clean: 18 | rm -rf deps.h *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions 19 | -------------------------------------------------------------------------------- /ashmem/ashmem.c: -------------------------------------------------------------------------------- 1 | /* mm/ashmem.c 2 | * 3 | * Anonymous Shared Memory Subsystem, ashmem 4 | * 5 | * Copyright (C) 2008 Google, Inc. 6 | * 7 | * Robert Love 8 | * 9 | * This software is licensed under the terms of the GNU General Public 10 | * License version 2, as published by the Free Software Foundation, and 11 | * may be copied, distributed, and modified under those terms. 12 | * 13 | * This program is distributed in the hope that it will be useful, 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | * GNU General Public License for more details. 17 | */ 18 | 19 | #define pr_fmt(fmt) "ashmem: " fmt 20 | 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include "ashmem.h" 38 | 39 | #define ASHMEM_NAME_PREFIX "dev/ashmem/" 40 | #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) 41 | #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) 42 | 43 | /** 44 | * struct ashmem_area - The anonymous shared memory area 45 | * @name: The optional name in /proc/pid/maps 46 | * @unpinned_list: The list of all ashmem areas 47 | * @file: The shmem-based backing file 48 | * @size: The size of the mapping, in bytes 49 | * @prot_mask: The allowed protection bits, as vm_flags 50 | * 51 | * The lifecycle of this structure is from our parent file's open() until 52 | * its release(). It is also protected by 'ashmem_mutex' 53 | * 54 | * Warning: Mappings do NOT pin this structure; It dies on close() 55 | */ 56 | struct ashmem_area { 57 | char name[ASHMEM_FULL_NAME_LEN]; 58 | struct list_head unpinned_list; 59 | struct file *file; 60 | size_t size; 61 | unsigned long prot_mask; 62 | }; 63 | 64 | /** 65 | * struct ashmem_range - A range of unpinned/evictable pages 66 | * @lru: The entry in the LRU list 67 | * @unpinned: The entry in its area's unpinned list 68 | * @asma: The associated anonymous shared memory area. 69 | * @pgstart: The starting page (inclusive) 70 | * @pgend: The ending page (inclusive) 71 | * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) 72 | * 73 | * The lifecycle of this structure is from unpin to pin. 74 | * It is protected by 'ashmem_mutex' 75 | */ 76 | struct ashmem_range { 77 | struct list_head lru; 78 | struct list_head unpinned; 79 | struct ashmem_area *asma; 80 | size_t pgstart; 81 | size_t pgend; 82 | unsigned int purged; 83 | }; 84 | 85 | /* LRU list of unpinned pages, protected by ashmem_mutex */ 86 | static LIST_HEAD(ashmem_lru_list); 87 | 88 | /* 89 | * long lru_count - The count of pages on our LRU list. 90 | * 91 | * This is protected by ashmem_mutex. 92 | */ 93 | static unsigned long lru_count; 94 | 95 | /* 96 | * ashmem_mutex - protects the list of and each individual ashmem_area 97 | * 98 | * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem 99 | */ 100 | static DEFINE_MUTEX(ashmem_mutex); 101 | 102 | static struct kmem_cache *ashmem_area_cachep __read_mostly; 103 | static struct kmem_cache *ashmem_range_cachep __read_mostly; 104 | 105 | #define range_size(range) \ 106 | ((range)->pgend - (range)->pgstart + 1) 107 | 108 | #define range_on_lru(range) \ 109 | ((range)->purged == ASHMEM_NOT_PURGED) 110 | 111 | #define page_range_subsumes_range(range, start, end) \ 112 | (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) 113 | 114 | #define page_range_subsumed_by_range(range, start, end) \ 115 | (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) 116 | 117 | #define page_in_range(range, page) \ 118 | (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) 119 | 120 | #define page_range_in_range(range, start, end) \ 121 | (page_in_range(range, start) || page_in_range(range, end) || \ 122 | page_range_subsumes_range(range, start, end)) 123 | 124 | #define range_before_page(range, page) \ 125 | ((range)->pgend < (page)) 126 | 127 | #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) 128 | 129 | /** 130 | * lru_add() - Adds a range of memory to the LRU list 131 | * @range: The memory range being added. 132 | * 133 | * The range is first added to the end (tail) of the LRU list. 134 | * After this, the size of the range is added to @lru_count 135 | */ 136 | static inline void lru_add(struct ashmem_range *range) 137 | { 138 | list_add_tail(&range->lru, &ashmem_lru_list); 139 | lru_count += range_size(range); 140 | } 141 | 142 | /** 143 | * lru_del() - Removes a range of memory from the LRU list 144 | * @range: The memory range being removed 145 | * 146 | * The range is first deleted from the LRU list. 147 | * After this, the size of the range is removed from @lru_count 148 | */ 149 | static inline void lru_del(struct ashmem_range *range) 150 | { 151 | list_del(&range->lru); 152 | lru_count -= range_size(range); 153 | } 154 | 155 | /** 156 | * range_alloc() - Allocates and initializes a new ashmem_range structure 157 | * @asma: The associated ashmem_area 158 | * @prev_range: The previous ashmem_range in the sorted asma->unpinned list 159 | * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) 160 | * @start: The starting page (inclusive) 161 | * @end: The ending page (inclusive) 162 | * 163 | * This function is protected by ashmem_mutex. 164 | * 165 | * Return: 0 if successful, or -ENOMEM if there is an error 166 | */ 167 | static int range_alloc(struct ashmem_area *asma, 168 | struct ashmem_range *prev_range, unsigned int purged, 169 | size_t start, size_t end) 170 | { 171 | struct ashmem_range *range; 172 | 173 | range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); 174 | if (unlikely(!range)) 175 | return -ENOMEM; 176 | 177 | range->asma = asma; 178 | range->pgstart = start; 179 | range->pgend = end; 180 | range->purged = purged; 181 | 182 | list_add_tail(&range->unpinned, &prev_range->unpinned); 183 | 184 | if (range_on_lru(range)) 185 | lru_add(range); 186 | 187 | return 0; 188 | } 189 | 190 | /** 191 | * range_del() - Deletes and dealloctes an ashmem_range structure 192 | * @range: The associated ashmem_range that has previously been allocated 193 | */ 194 | static void range_del(struct ashmem_range *range) 195 | { 196 | list_del(&range->unpinned); 197 | if (range_on_lru(range)) 198 | lru_del(range); 199 | kmem_cache_free(ashmem_range_cachep, range); 200 | } 201 | 202 | /** 203 | * range_shrink() - Shrinks an ashmem_range 204 | * @range: The associated ashmem_range being shrunk 205 | * @start: The starting byte of the new range 206 | * @end: The ending byte of the new range 207 | * 208 | * This does not modify the data inside the existing range in any way - It 209 | * simply shrinks the boundaries of the range. 210 | * 211 | * Theoretically, with a little tweaking, this could eventually be changed 212 | * to range_resize, and expand the lru_count if the new range is larger. 213 | */ 214 | static inline void range_shrink(struct ashmem_range *range, 215 | size_t start, size_t end) 216 | { 217 | size_t pre = range_size(range); 218 | 219 | range->pgstart = start; 220 | range->pgend = end; 221 | 222 | if (range_on_lru(range)) 223 | lru_count -= pre - range_size(range); 224 | } 225 | 226 | /** 227 | * ashmem_open() - Opens an Anonymous Shared Memory structure 228 | * @inode: The backing file's index node(?) 229 | * @file: The backing file 230 | * 231 | * Please note that the ashmem_area is not returned by this function - It is 232 | * instead written to "file->private_data". 233 | * 234 | * Return: 0 if successful, or another code if unsuccessful. 235 | */ 236 | static int ashmem_open(struct inode *inode, struct file *file) 237 | { 238 | struct ashmem_area *asma; 239 | int ret; 240 | 241 | ret = generic_file_open(inode, file); 242 | if (unlikely(ret)) 243 | return ret; 244 | 245 | asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); 246 | if (unlikely(!asma)) 247 | return -ENOMEM; 248 | 249 | INIT_LIST_HEAD(&asma->unpinned_list); 250 | memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); 251 | asma->prot_mask = PROT_MASK; 252 | file->private_data = asma; 253 | 254 | return 0; 255 | } 256 | 257 | /** 258 | * ashmem_release() - Releases an Anonymous Shared Memory structure 259 | * @ignored: The backing file's Index Node(?) - It is ignored here. 260 | * @file: The backing file 261 | * 262 | * Return: 0 if successful. If it is anything else, go have a coffee and 263 | * try again. 264 | */ 265 | static int ashmem_release(struct inode *ignored, struct file *file) 266 | { 267 | struct ashmem_area *asma = file->private_data; 268 | struct ashmem_range *range, *next; 269 | 270 | mutex_lock(&ashmem_mutex); 271 | list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) 272 | range_del(range); 273 | mutex_unlock(&ashmem_mutex); 274 | 275 | if (asma->file) 276 | fput(asma->file); 277 | kmem_cache_free(ashmem_area_cachep, asma); 278 | 279 | return 0; 280 | } 281 | 282 | /** 283 | * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file 284 | * @file: The associated backing file. 285 | * @buf: The buffer of data being written to 286 | * @len: The number of bytes being read 287 | * @pos: The position of the first byte to read. 288 | * 289 | * Return: 0 if successful, or another return code if not. 290 | */ 291 | static ssize_t ashmem_read(struct file *file, char __user *buf, 292 | size_t len, loff_t *pos) 293 | { 294 | struct ashmem_area *asma = file->private_data; 295 | int ret = 0; 296 | 297 | mutex_lock(&ashmem_mutex); 298 | 299 | /* If size is not set, or set to 0, always return EOF. */ 300 | if (asma->size == 0) 301 | goto out_unlock; 302 | 303 | if (!asma->file) { 304 | ret = -EBADF; 305 | goto out_unlock; 306 | } 307 | 308 | mutex_unlock(&ashmem_mutex); 309 | 310 | /* 311 | * asma and asma->file are used outside the lock here. We assume 312 | * once asma->file is set it will never be changed, and will not 313 | * be destroyed until all references to the file are dropped and 314 | * ashmem_release is called. 315 | * 316 | * kernel_read supersedes vfs_read from kernel version 3.9 317 | */ 318 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) 319 | ret = __vfs_read(asma->file, buf, len, pos); 320 | #else 321 | ret = kernel_read(asma->file, buf, len, pos); 322 | #endif 323 | if (ret >= 0) 324 | /** Update backing file pos, since f_ops->read() doesn't */ 325 | asma->file->f_pos = *pos; 326 | return ret; 327 | 328 | out_unlock: 329 | mutex_unlock(&ashmem_mutex); 330 | return ret; 331 | } 332 | 333 | static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) 334 | { 335 | struct ashmem_area *asma = file->private_data; 336 | int ret; 337 | 338 | mutex_lock(&ashmem_mutex); 339 | 340 | if (asma->size == 0) { 341 | ret = -EINVAL; 342 | goto out; 343 | } 344 | 345 | if (!asma->file) { 346 | ret = -EBADF; 347 | goto out; 348 | } 349 | 350 | ret = vfs_llseek(asma->file, offset, origin); 351 | if (ret < 0) 352 | goto out; 353 | 354 | /** Copy f_pos from backing file, since f_ops->llseek() sets it */ 355 | file->f_pos = asma->file->f_pos; 356 | 357 | out: 358 | mutex_unlock(&ashmem_mutex); 359 | return ret; 360 | } 361 | 362 | static inline vm_flags_t calc_vm_may_flags(unsigned long prot) 363 | { 364 | return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) | 365 | _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | 366 | _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); 367 | } 368 | 369 | static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) 370 | { 371 | struct ashmem_area *asma = file->private_data; 372 | int ret = 0; 373 | 374 | mutex_lock(&ashmem_mutex); 375 | 376 | /* user needs to SET_SIZE before mapping */ 377 | if (unlikely(!asma->size)) { 378 | ret = -EINVAL; 379 | goto out; 380 | } 381 | 382 | /* requested protection bits must match our allowed protection mask */ 383 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) || defined(VZKERNEL) 384 | if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & 385 | calc_vm_prot_bits(PROT_MASK, 0))) { 386 | #else 387 | if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & 388 | calc_vm_prot_bits(PROT_MASK))) { 389 | #endif 390 | ret = -EPERM; 391 | goto out; 392 | } 393 | vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); 394 | 395 | if (!asma->file) { 396 | char *name = ASHMEM_NAME_DEF; 397 | struct file *vmfile; 398 | 399 | if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') 400 | name = asma->name; 401 | 402 | /* ... and allocate the backing shmem file */ 403 | vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); 404 | if (IS_ERR(vmfile)) { 405 | ret = PTR_ERR(vmfile); 406 | goto out; 407 | } 408 | asma->file = vmfile; 409 | } 410 | get_file(asma->file); 411 | 412 | /* 413 | * XXX - Reworked to use shmem_zero_setup() instead of 414 | * shmem_set_file while we're in staging. -jstultz 415 | */ 416 | if (vma->vm_flags & VM_SHARED) { 417 | ret = shmem_zero_setup(vma); 418 | if (ret) { 419 | fput(asma->file); 420 | goto out; 421 | } 422 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) 423 | } else { 424 | vma_set_anonymous(vma); 425 | } 426 | #else 427 | } 428 | #endif 429 | 430 | if (vma->vm_file) 431 | fput(vma->vm_file); 432 | vma->vm_file = asma->file; 433 | 434 | out: 435 | mutex_unlock(&ashmem_mutex); 436 | return ret; 437 | } 438 | 439 | /* 440 | * ashmem_shrink - our cache shrinker, called from mm/vmscan.c 441 | * 442 | * 'nr_to_scan' is the number of objects to scan for freeing. 443 | * 444 | * 'gfp_mask' is the mask of the allocation that got us into this mess. 445 | * 446 | * Return value is the number of objects freed or -1 if we cannot 447 | * proceed without risk of deadlock (due to gfp_mask). 448 | * 449 | * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial 450 | * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' 451 | * pages freed. 452 | */ 453 | static unsigned long 454 | ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 455 | { 456 | struct ashmem_range *range, *next; 457 | unsigned long freed = 0; 458 | 459 | /* We might recurse into filesystem code, so bail out if necessary */ 460 | if (!(sc->gfp_mask & __GFP_FS)) 461 | return SHRINK_STOP; 462 | 463 | mutex_lock(&ashmem_mutex); 464 | list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { 465 | loff_t start = range->pgstart * PAGE_SIZE; 466 | loff_t end = (range->pgend + 1) * PAGE_SIZE; 467 | 468 | vfs_fallocate(range->asma->file, 469 | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 470 | start, end - start); 471 | range->purged = ASHMEM_WAS_PURGED; 472 | lru_del(range); 473 | 474 | freed += range_size(range); 475 | if (--sc->nr_to_scan <= 0) 476 | break; 477 | } 478 | mutex_unlock(&ashmem_mutex); 479 | return freed; 480 | } 481 | 482 | static unsigned long 483 | ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 484 | { 485 | /* 486 | * note that lru_count is count of pages on the lru, not a count of 487 | * objects on the list. This means the scan function needs to return the 488 | * number of pages freed, not the number of objects scanned. 489 | */ 490 | return lru_count; 491 | } 492 | 493 | static struct shrinker ashmem_shrinker = { 494 | .count_objects = ashmem_shrink_count, 495 | .scan_objects = ashmem_shrink_scan, 496 | /* 497 | * XXX (dchinner): I wish people would comment on why they need on 498 | * significant changes to the default value here 499 | */ 500 | .seeks = DEFAULT_SEEKS * 4, 501 | }; 502 | 503 | static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) 504 | { 505 | int ret = 0; 506 | 507 | mutex_lock(&ashmem_mutex); 508 | 509 | /* the user can only remove, not add, protection bits */ 510 | if (unlikely((asma->prot_mask & prot) != prot)) { 511 | ret = -EINVAL; 512 | goto out; 513 | } 514 | 515 | /* does the application expect PROT_READ to imply PROT_EXEC? */ 516 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 517 | prot |= PROT_EXEC; 518 | 519 | asma->prot_mask = prot; 520 | 521 | out: 522 | mutex_unlock(&ashmem_mutex); 523 | return ret; 524 | } 525 | 526 | static int set_name(struct ashmem_area *asma, void __user *name) 527 | { 528 | int len; 529 | int ret = 0; 530 | char local_name[ASHMEM_NAME_LEN]; 531 | 532 | /* 533 | * Holding the ashmem_mutex while doing a copy_from_user might cause 534 | * an data abort which would try to access mmap_sem. If another 535 | * thread has invoked ashmem_mmap then it will be holding the 536 | * semaphore and will be waiting for ashmem_mutex, there by leading to 537 | * deadlock. We'll release the mutex and take the name to a local 538 | * variable that does not need protection and later copy the local 539 | * variable to the structure member with lock held. 540 | */ 541 | len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); 542 | if (len < 0) 543 | return len; 544 | if (len == ASHMEM_NAME_LEN) 545 | local_name[ASHMEM_NAME_LEN - 1] = '\0'; 546 | mutex_lock(&ashmem_mutex); 547 | /* cannot change an existing mapping's name */ 548 | if (unlikely(asma->file)) 549 | ret = -EINVAL; 550 | else 551 | strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); 552 | 553 | mutex_unlock(&ashmem_mutex); 554 | return ret; 555 | } 556 | 557 | static int get_name(struct ashmem_area *asma, void __user *name) 558 | { 559 | int ret = 0; 560 | size_t len; 561 | /* 562 | * Have a local variable to which we'll copy the content 563 | * from asma with the lock held. Later we can copy this to the user 564 | * space safely without holding any locks. So even if we proceed to 565 | * wait for mmap_sem, it won't lead to deadlock. 566 | */ 567 | char local_name[ASHMEM_NAME_LEN]; 568 | 569 | mutex_lock(&ashmem_mutex); 570 | if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { 571 | /* 572 | * Copying only `len', instead of ASHMEM_NAME_LEN, bytes 573 | * prevents us from revealing one user's stack to another. 574 | */ 575 | len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; 576 | memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len); 577 | } else { 578 | len = sizeof(ASHMEM_NAME_DEF); 579 | memcpy(local_name, ASHMEM_NAME_DEF, len); 580 | } 581 | mutex_unlock(&ashmem_mutex); 582 | 583 | /* 584 | * Now we are just copying from the stack variable to userland 585 | * No lock held 586 | */ 587 | if (unlikely(copy_to_user(name, local_name, len))) 588 | ret = -EFAULT; 589 | return ret; 590 | } 591 | 592 | /* 593 | * ashmem_pin - pin the given ashmem region, returning whether it was 594 | * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). 595 | * 596 | * Caller must hold ashmem_mutex. 597 | */ 598 | static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) 599 | { 600 | struct ashmem_range *range, *next; 601 | int ret = ASHMEM_NOT_PURGED; 602 | 603 | list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 604 | /* moved past last applicable page; we can short circuit */ 605 | if (range_before_page(range, pgstart)) 606 | break; 607 | 608 | /* 609 | * The user can ask us to pin pages that span multiple ranges, 610 | * or to pin pages that aren't even unpinned, so this is messy. 611 | * 612 | * Four cases: 613 | * 1. The requested range subsumes an existing range, so we 614 | * just remove the entire matching range. 615 | * 2. The requested range overlaps the start of an existing 616 | * range, so we just update that range. 617 | * 3. The requested range overlaps the end of an existing 618 | * range, so we just update that range. 619 | * 4. The requested range punches a hole in an existing range, 620 | * so we have to update one side of the range and then 621 | * create a new range for the other side. 622 | */ 623 | if (page_range_in_range(range, pgstart, pgend)) { 624 | ret |= range->purged; 625 | 626 | /* Case #1: Easy. Just nuke the whole thing. */ 627 | if (page_range_subsumes_range(range, pgstart, pgend)) { 628 | range_del(range); 629 | continue; 630 | } 631 | 632 | /* Case #2: We overlap from the start, so adjust it */ 633 | if (range->pgstart >= pgstart) { 634 | range_shrink(range, pgend + 1, range->pgend); 635 | continue; 636 | } 637 | 638 | /* Case #3: We overlap from the rear, so adjust it */ 639 | if (range->pgend <= pgend) { 640 | range_shrink(range, range->pgstart, 641 | pgstart - 1); 642 | continue; 643 | } 644 | 645 | /* 646 | * Case #4: We eat a chunk out of the middle. A bit 647 | * more complicated, we allocate a new range for the 648 | * second half and adjust the first chunk's endpoint. 649 | */ 650 | range_alloc(asma, range, range->purged, 651 | pgend + 1, range->pgend); 652 | range_shrink(range, range->pgstart, pgstart - 1); 653 | break; 654 | } 655 | } 656 | 657 | return ret; 658 | } 659 | 660 | /* 661 | * ashmem_unpin - unpin the given range of pages. Returns zero on success. 662 | * 663 | * Caller must hold ashmem_mutex. 664 | */ 665 | static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) 666 | { 667 | struct ashmem_range *range, *next; 668 | unsigned int purged = ASHMEM_NOT_PURGED; 669 | 670 | restart: 671 | list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 672 | /* short circuit: this is our insertion point */ 673 | if (range_before_page(range, pgstart)) 674 | break; 675 | 676 | /* 677 | * The user can ask us to unpin pages that are already entirely 678 | * or partially pinned. We handle those two cases here. 679 | */ 680 | if (page_range_subsumed_by_range(range, pgstart, pgend)) 681 | return 0; 682 | if (page_range_in_range(range, pgstart, pgend)) { 683 | pgstart = min_t(size_t, range->pgstart, pgstart); 684 | pgend = max_t(size_t, range->pgend, pgend); 685 | purged |= range->purged; 686 | range_del(range); 687 | goto restart; 688 | } 689 | } 690 | 691 | return range_alloc(asma, range, purged, pgstart, pgend); 692 | } 693 | 694 | /* 695 | * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the 696 | * given interval are unpinned and ASHMEM_IS_PINNED otherwise. 697 | * 698 | * Caller must hold ashmem_mutex. 699 | */ 700 | static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, 701 | size_t pgend) 702 | { 703 | struct ashmem_range *range; 704 | int ret = ASHMEM_IS_PINNED; 705 | 706 | list_for_each_entry(range, &asma->unpinned_list, unpinned) { 707 | if (range_before_page(range, pgstart)) 708 | break; 709 | if (page_range_in_range(range, pgstart, pgend)) { 710 | ret = ASHMEM_IS_UNPINNED; 711 | break; 712 | } 713 | } 714 | 715 | return ret; 716 | } 717 | 718 | static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, 719 | void __user *p) 720 | { 721 | struct ashmem_pin pin; 722 | size_t pgstart, pgend; 723 | int ret = -EINVAL; 724 | 725 | if (unlikely(!asma->file)) 726 | return -EINVAL; 727 | 728 | if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) 729 | return -EFAULT; 730 | 731 | /* per custom, you can pass zero for len to mean "everything onward" */ 732 | if (!pin.len) 733 | pin.len = PAGE_ALIGN(asma->size) - pin.offset; 734 | 735 | if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) 736 | return -EINVAL; 737 | 738 | if (unlikely(((__u32)-1) - pin.offset < pin.len)) 739 | return -EINVAL; 740 | 741 | if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) 742 | return -EINVAL; 743 | 744 | pgstart = pin.offset / PAGE_SIZE; 745 | pgend = pgstart + (pin.len / PAGE_SIZE) - 1; 746 | 747 | mutex_lock(&ashmem_mutex); 748 | 749 | switch (cmd) { 750 | case ASHMEM_PIN: 751 | ret = ashmem_pin(asma, pgstart, pgend); 752 | break; 753 | case ASHMEM_UNPIN: 754 | ret = ashmem_unpin(asma, pgstart, pgend); 755 | break; 756 | case ASHMEM_GET_PIN_STATUS: 757 | ret = ashmem_get_pin_status(asma, pgstart, pgend); 758 | break; 759 | } 760 | 761 | mutex_unlock(&ashmem_mutex); 762 | 763 | return ret; 764 | } 765 | 766 | static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 767 | { 768 | struct ashmem_area *asma = file->private_data; 769 | long ret = -ENOTTY; 770 | 771 | switch (cmd) { 772 | case ASHMEM_SET_NAME: 773 | ret = set_name(asma, (void __user *)arg); 774 | break; 775 | case ASHMEM_GET_NAME: 776 | ret = get_name(asma, (void __user *)arg); 777 | break; 778 | case ASHMEM_SET_SIZE: 779 | ret = -EINVAL; 780 | if (!asma->file) { 781 | ret = 0; 782 | asma->size = (size_t)arg; 783 | } 784 | break; 785 | case ASHMEM_GET_SIZE: 786 | ret = asma->size; 787 | break; 788 | case ASHMEM_SET_PROT_MASK: 789 | ret = set_prot_mask(asma, arg); 790 | break; 791 | case ASHMEM_GET_PROT_MASK: 792 | ret = asma->prot_mask; 793 | break; 794 | case ASHMEM_PIN: 795 | case ASHMEM_UNPIN: 796 | case ASHMEM_GET_PIN_STATUS: 797 | ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg); 798 | break; 799 | case ASHMEM_PURGE_ALL_CACHES: 800 | ret = -EPERM; 801 | if (capable(CAP_SYS_ADMIN)) { 802 | struct shrink_control sc = { 803 | .gfp_mask = GFP_KERNEL, 804 | .nr_to_scan = LONG_MAX, 805 | }; 806 | ret = ashmem_shrink_count(&ashmem_shrinker, &sc); 807 | ashmem_shrink_scan(&ashmem_shrinker, &sc); 808 | } 809 | break; 810 | } 811 | 812 | return ret; 813 | } 814 | 815 | /* support of 32bit userspace on 64bit platforms */ 816 | #ifdef CONFIG_COMPAT 817 | static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, 818 | unsigned long arg) 819 | { 820 | switch (cmd) { 821 | case COMPAT_ASHMEM_SET_SIZE: 822 | cmd = ASHMEM_SET_SIZE; 823 | break; 824 | case COMPAT_ASHMEM_SET_PROT_MASK: 825 | cmd = ASHMEM_SET_PROT_MASK; 826 | break; 827 | } 828 | return ashmem_ioctl(file, cmd, arg); 829 | } 830 | #endif 831 | 832 | static const struct file_operations ashmem_fops = { 833 | .owner = THIS_MODULE, 834 | .open = ashmem_open, 835 | .release = ashmem_release, 836 | .read = ashmem_read, 837 | .llseek = ashmem_llseek, 838 | .mmap = ashmem_mmap, 839 | .unlocked_ioctl = ashmem_ioctl, 840 | #ifdef CONFIG_COMPAT 841 | .compat_ioctl = compat_ashmem_ioctl, 842 | #endif 843 | }; 844 | 845 | static struct miscdevice ashmem_misc = { 846 | .minor = MISC_DYNAMIC_MINOR, 847 | .name = "ashmem", 848 | .fops = &ashmem_fops, 849 | }; 850 | 851 | static int __init ashmem_init(void) 852 | { 853 | int ret; 854 | 855 | ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", 856 | sizeof(struct ashmem_area), 857 | 0, 0, NULL); 858 | if (unlikely(!ashmem_area_cachep)) { 859 | pr_err("failed to create slab cache\n"); 860 | return -ENOMEM; 861 | } 862 | 863 | ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", 864 | sizeof(struct ashmem_range), 865 | 0, 0, NULL); 866 | if (unlikely(!ashmem_range_cachep)) { 867 | pr_err("failed to create slab cache\n"); 868 | return -ENOMEM; 869 | } 870 | 871 | ret = misc_register(&ashmem_misc); 872 | if (unlikely(ret)) { 873 | pr_err("failed to register misc device!\n"); 874 | return ret; 875 | } 876 | 877 | register_shrinker(&ashmem_shrinker); 878 | 879 | return 0; 880 | } 881 | 882 | static void __exit ashmem_exit(void) 883 | { 884 | unregister_shrinker(&ashmem_shrinker); 885 | 886 | misc_deregister(&ashmem_misc); 887 | 888 | kmem_cache_destroy(ashmem_range_cachep); 889 | kmem_cache_destroy(ashmem_area_cachep); 890 | } 891 | 892 | module_init(ashmem_init); 893 | module_exit(ashmem_exit); 894 | 895 | MODULE_LICENSE("GPL"); 896 | -------------------------------------------------------------------------------- /ashmem/ashmem.h: -------------------------------------------------------------------------------- 1 | /* 2 | * include/linux/ashmem.h 3 | * 4 | * Copyright 2008 Google Inc. 5 | * Author: Robert Love 6 | * 7 | * This file is dual licensed. It may be redistributed and/or modified 8 | * under the terms of the Apache 2.0 License OR version 2 of the GNU 9 | * General Public License. 10 | */ 11 | 12 | #ifndef _LINUX_ASHMEM_H 13 | #define _LINUX_ASHMEM_H 14 | 15 | #include 16 | #include 17 | #include 18 | 19 | #include "uapi/ashmem.h" 20 | 21 | /* support of 32bit userspace on 64bit platforms */ 22 | #ifdef CONFIG_COMPAT 23 | #define COMPAT_ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, compat_size_t) 24 | #define COMPAT_ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned int) 25 | #endif 26 | 27 | #endif /* _LINUX_ASHMEM_H */ 28 | -------------------------------------------------------------------------------- /ashmem/deps.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | static int (*shmem_zero_setup_ptr)(struct vm_area_struct *) = NULL; 5 | 6 | int shmem_zero_setup(struct vm_area_struct *vma) 7 | { 8 | if (!shmem_zero_setup_ptr) 9 | shmem_zero_setup_ptr = kallsyms_lookup_name("shmem_zero_setup"); 10 | return shmem_zero_setup_ptr(vma); 11 | } 12 | -------------------------------------------------------------------------------- /ashmem/dkms.conf: -------------------------------------------------------------------------------- 1 | PACKAGE_NAME="anbox-ashmem" 2 | PACKAGE_VERSION="1" 3 | CLEAN="make clean" 4 | MAKE[0]="make all KERNEL_SRC=/lib/modules/$kernelver/build" 5 | BUILT_MODULE_NAME[0]="ashmem_linux" 6 | DEST_MODULE_LOCATION[0]="/updates" 7 | AUTOINSTALL="yes" 8 | -------------------------------------------------------------------------------- /ashmem/uapi/ashmem.h: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/uapi/ashmem.h 3 | * 4 | * Copyright 2008 Google Inc. 5 | * Author: Robert Love 6 | * 7 | * This file is dual licensed. It may be redistributed and/or modified 8 | * under the terms of the Apache 2.0 License OR version 2 of the GNU 9 | * General Public License. 10 | */ 11 | 12 | #ifndef _UAPI_LINUX_ASHMEM_H 13 | #define _UAPI_LINUX_ASHMEM_H 14 | 15 | #include 16 | 17 | #define ASHMEM_NAME_LEN 256 18 | 19 | #define ASHMEM_NAME_DEF "dev/ashmem" 20 | 21 | /* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */ 22 | #define ASHMEM_NOT_PURGED 0 23 | #define ASHMEM_WAS_PURGED 1 24 | 25 | /* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */ 26 | #define ASHMEM_IS_UNPINNED 0 27 | #define ASHMEM_IS_PINNED 1 28 | 29 | struct ashmem_pin { 30 | __u32 offset; /* offset into region, in bytes, page-aligned */ 31 | __u32 len; /* length forward from offset, in bytes, page-aligned */ 32 | }; 33 | 34 | #define __ASHMEMIOC 0x77 35 | 36 | #define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN]) 37 | #define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN]) 38 | #define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t) 39 | #define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4) 40 | #define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long) 41 | #define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6) 42 | #define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin) 43 | #define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin) 44 | #define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9) 45 | #define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10) 46 | 47 | #endif /* _UAPI_LINUX_ASHMEM_H */ 48 | -------------------------------------------------------------------------------- /binder/Makefile: -------------------------------------------------------------------------------- 1 | ccflags-y += -I$(src) -Wno-int-conversion -DCONFIG_ANDROID_BINDER_DEVICES="\"binder\"" 2 | obj-m := binder_linux.o 3 | binder_linux-y := deps.o binder.o 4 | 5 | KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build 6 | 7 | all: 8 | $(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD 9 | 10 | install: 11 | cp binder_linux.ko $(DESTDIR)/ 12 | 13 | clean: 14 | rm -rf deps.h *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions 15 | -------------------------------------------------------------------------------- /binder/binder.c: -------------------------------------------------------------------------------- 1 | /* binder.c 2 | * 3 | * Android IPC Subsystem 4 | * 5 | * Copyright (C) 2007-2008 Google, Inc. 6 | * 7 | * This software is licensed under the terms of the GNU General Public 8 | * License version 2, as published by the Free Software Foundation, and 9 | * may be copied, distributed, and modified under those terms. 10 | * 11 | * This program is distributed in the hope that it will be useful, 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | * GNU General Public License for more details. 15 | * 16 | */ 17 | 18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 37 | #include 38 | #include 39 | #else 40 | #include 41 | #include 42 | #endif 43 | #include 44 | #include 45 | #include 46 | #include 47 | #include 48 | #include 49 | 50 | #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT 51 | #define BINDER_IPC_32BIT 1 52 | #endif 53 | 54 | /* Until I upstream a better version of this patch choose an arbitrary major 55 | * number in the high end spectrum that has not yet been given away and is 56 | * unlikely to be given away in the near future. 57 | */ 58 | #define BINDER_DKMS_MAJOR 511 59 | #define BINDER_DKMS_MAX_MINOR 1024 60 | 61 | #include "binder.h" 62 | #include "binder_trace.h" 63 | 64 | #ifndef MAX_NICE 65 | #define MAX_NICE 19 66 | #endif 67 | 68 | static DEFINE_MUTEX(binder_main_lock); 69 | static DEFINE_MUTEX(binder_deferred_lock); 70 | static DEFINE_MUTEX(binder_mmap_lock); 71 | static DEFINE_MUTEX(binder_devices_mtx); 72 | 73 | static HLIST_HEAD(binder_devices); 74 | static HLIST_HEAD(binder_procs); 75 | static HLIST_HEAD(binder_deferred_list); 76 | static HLIST_HEAD(binder_dead_nodes); 77 | 78 | static struct dentry *binder_debugfs_dir_entry_root; 79 | static struct dentry *binder_debugfs_dir_entry_proc; 80 | static int binder_last_id; 81 | 82 | #define BINDER_DEBUG_ENTRY(name) \ 83 | static int binder_##name##_open(struct inode *inode, struct file *file) \ 84 | { \ 85 | return single_open(file, binder_##name##_show, inode->i_private); \ 86 | } \ 87 | \ 88 | static const struct file_operations binder_##name##_fops = { \ 89 | .owner = THIS_MODULE, \ 90 | .open = binder_##name##_open, \ 91 | .read = seq_read, \ 92 | .llseek = seq_lseek, \ 93 | .release = single_release, \ 94 | } 95 | 96 | static int binder_proc_show(struct seq_file *m, void *unused); 97 | BINDER_DEBUG_ENTRY(proc); 98 | 99 | /* This is only defined in include/asm-arm/sizes.h */ 100 | #ifndef SZ_1K 101 | #define SZ_1K 0x400 102 | #endif 103 | 104 | #ifndef SZ_4M 105 | #define SZ_4M 0x400000 106 | #endif 107 | 108 | #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 109 | 110 | #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 111 | 112 | enum { 113 | BINDER_DEBUG_USER_ERROR = 1U << 0, 114 | BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 115 | BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 116 | BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 117 | BINDER_DEBUG_DEAD_BINDER = 1U << 4, 118 | BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 119 | BINDER_DEBUG_READ_WRITE = 1U << 6, 120 | BINDER_DEBUG_USER_REFS = 1U << 7, 121 | BINDER_DEBUG_THREADS = 1U << 8, 122 | BINDER_DEBUG_TRANSACTION = 1U << 9, 123 | BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 124 | BINDER_DEBUG_FREE_BUFFER = 1U << 11, 125 | BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 126 | BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, 127 | BINDER_DEBUG_PRIORITY_CAP = 1U << 14, 128 | BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, 129 | }; 130 | static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 131 | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 132 | module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 133 | 134 | static bool binder_debug_no_lock; 135 | module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); 136 | 137 | static int binder_devices_param = 1; 138 | module_param_named(num_devices, binder_devices_param, int, 0444); 139 | 140 | static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 141 | static int binder_stop_on_user_error; 142 | 143 | static int binder_set_stop_on_user_error(const char *val, 144 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) 145 | const struct kernel_param *kp) 146 | #else 147 | struct kernel_param *kp) 148 | #endif 149 | { 150 | int ret; 151 | 152 | ret = param_set_int(val, kp); 153 | if (binder_stop_on_user_error < 2) 154 | wake_up(&binder_user_error_wait); 155 | return ret; 156 | } 157 | module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 158 | param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); 159 | 160 | #define binder_debug(mask, x...) \ 161 | do { \ 162 | if (binder_debug_mask & mask) \ 163 | pr_info(x); \ 164 | } while (0) 165 | 166 | #define binder_user_error(x...) \ 167 | do { \ 168 | if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 169 | pr_info(x); \ 170 | if (binder_stop_on_user_error) \ 171 | binder_stop_on_user_error = 2; \ 172 | } while (0) 173 | 174 | #define to_flat_binder_object(hdr) \ 175 | container_of(hdr, struct flat_binder_object, hdr) 176 | 177 | #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) 178 | 179 | #define to_binder_buffer_object(hdr) \ 180 | container_of(hdr, struct binder_buffer_object, hdr) 181 | 182 | #define to_binder_fd_array_object(hdr) \ 183 | container_of(hdr, struct binder_fd_array_object, hdr) 184 | 185 | enum binder_stat_types { 186 | BINDER_STAT_PROC, 187 | BINDER_STAT_THREAD, 188 | BINDER_STAT_NODE, 189 | BINDER_STAT_REF, 190 | BINDER_STAT_DEATH, 191 | BINDER_STAT_TRANSACTION, 192 | BINDER_STAT_TRANSACTION_COMPLETE, 193 | BINDER_STAT_COUNT 194 | }; 195 | 196 | struct binder_stats { 197 | int br[_IOC_NR(BR_FAILED_REPLY) + 1]; 198 | int bc[_IOC_NR(BC_REPLY_SG) + 1]; 199 | int obj_created[BINDER_STAT_COUNT]; 200 | int obj_deleted[BINDER_STAT_COUNT]; 201 | }; 202 | 203 | static struct binder_stats binder_stats; 204 | 205 | static inline void binder_stats_deleted(enum binder_stat_types type) 206 | { 207 | binder_stats.obj_deleted[type]++; 208 | } 209 | 210 | static inline void binder_stats_created(enum binder_stat_types type) 211 | { 212 | binder_stats.obj_created[type]++; 213 | } 214 | 215 | struct binder_transaction_log_entry { 216 | int debug_id; 217 | int call_type; 218 | int from_proc; 219 | int from_thread; 220 | int target_handle; 221 | int to_proc; 222 | int to_thread; 223 | int to_node; 224 | int data_size; 225 | int offsets_size; 226 | const char *context_name; 227 | }; 228 | struct binder_transaction_log { 229 | int next; 230 | int full; 231 | struct binder_transaction_log_entry entry[32]; 232 | }; 233 | static struct binder_transaction_log binder_transaction_log; 234 | static struct binder_transaction_log binder_transaction_log_failed; 235 | 236 | static struct binder_transaction_log_entry *binder_transaction_log_add( 237 | struct binder_transaction_log *log) 238 | { 239 | struct binder_transaction_log_entry *e; 240 | 241 | e = &log->entry[log->next]; 242 | memset(e, 0, sizeof(*e)); 243 | log->next++; 244 | if (log->next == ARRAY_SIZE(log->entry)) { 245 | log->next = 0; 246 | log->full = 1; 247 | } 248 | return e; 249 | } 250 | 251 | struct binder_context { 252 | struct binder_node *binder_context_mgr_node; 253 | kuid_t binder_context_mgr_uid; 254 | const char *name; 255 | }; 256 | 257 | struct binder_device { 258 | struct hlist_node hlist; 259 | struct cdev cdev; 260 | struct device class_dev; 261 | struct binder_context context; 262 | }; 263 | 264 | struct binder_work { 265 | struct list_head entry; 266 | enum { 267 | BINDER_WORK_TRANSACTION = 1, 268 | BINDER_WORK_TRANSACTION_COMPLETE, 269 | BINDER_WORK_NODE, 270 | BINDER_WORK_DEAD_BINDER, 271 | BINDER_WORK_DEAD_BINDER_AND_CLEAR, 272 | BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 273 | } type; 274 | }; 275 | 276 | struct binder_node { 277 | int debug_id; 278 | struct binder_work work; 279 | union { 280 | struct rb_node rb_node; 281 | struct hlist_node dead_node; 282 | }; 283 | struct binder_proc *proc; 284 | struct hlist_head refs; 285 | int internal_strong_refs; 286 | int local_weak_refs; 287 | int local_strong_refs; 288 | binder_uintptr_t ptr; 289 | binder_uintptr_t cookie; 290 | unsigned has_strong_ref:1; 291 | unsigned pending_strong_ref:1; 292 | unsigned has_weak_ref:1; 293 | unsigned pending_weak_ref:1; 294 | unsigned has_async_transaction:1; 295 | unsigned accept_fds:1; 296 | unsigned min_priority:8; 297 | struct list_head async_todo; 298 | }; 299 | 300 | struct binder_ref_death { 301 | struct binder_work work; 302 | binder_uintptr_t cookie; 303 | }; 304 | 305 | struct binder_ref { 306 | /* Lookups needed: */ 307 | /* node + proc => ref (transaction) */ 308 | /* desc + proc => ref (transaction, inc/dec ref) */ 309 | /* node => refs + procs (proc exit) */ 310 | int debug_id; 311 | struct rb_node rb_node_desc; 312 | struct rb_node rb_node_node; 313 | struct hlist_node node_entry; 314 | struct binder_proc *proc; 315 | struct binder_node *node; 316 | uint32_t desc; 317 | int strong; 318 | int weak; 319 | struct binder_ref_death *death; 320 | }; 321 | 322 | struct binder_buffer { 323 | struct list_head entry; /* free and allocated entries by address */ 324 | struct rb_node rb_node; /* free entry by size or allocated entry */ 325 | /* by address */ 326 | unsigned free:1; 327 | unsigned allow_user_free:1; 328 | unsigned async_transaction:1; 329 | unsigned debug_id:29; 330 | 331 | struct binder_transaction *transaction; 332 | 333 | struct binder_node *target_node; 334 | size_t data_size; 335 | size_t offsets_size; 336 | size_t extra_buffers_size; 337 | uint8_t data[0]; 338 | }; 339 | 340 | enum binder_deferred_state { 341 | BINDER_DEFERRED_PUT_FILES = 0x01, 342 | BINDER_DEFERRED_FLUSH = 0x02, 343 | BINDER_DEFERRED_RELEASE = 0x04, 344 | }; 345 | 346 | struct binder_proc { 347 | struct hlist_node proc_node; 348 | struct rb_root threads; 349 | struct rb_root nodes; 350 | struct rb_root refs_by_desc; 351 | struct rb_root refs_by_node; 352 | int pid; 353 | struct vm_area_struct *vma; 354 | struct mm_struct *vma_vm_mm; 355 | struct task_struct *tsk; 356 | struct files_struct *files; 357 | struct hlist_node deferred_work_node; 358 | int deferred_work; 359 | void *buffer; 360 | ptrdiff_t user_buffer_offset; 361 | 362 | struct list_head buffers; 363 | struct rb_root free_buffers; 364 | struct rb_root allocated_buffers; 365 | size_t free_async_space; 366 | 367 | struct page **pages; 368 | size_t buffer_size; 369 | uint32_t buffer_free; 370 | struct list_head todo; 371 | wait_queue_head_t wait; 372 | struct binder_stats stats; 373 | struct list_head delivered_death; 374 | int max_threads; 375 | int requested_threads; 376 | int requested_threads_started; 377 | int ready_threads; 378 | long default_priority; 379 | struct dentry *debugfs_entry; 380 | struct binder_context *context; 381 | }; 382 | 383 | enum { 384 | BINDER_LOOPER_STATE_REGISTERED = 0x01, 385 | BINDER_LOOPER_STATE_ENTERED = 0x02, 386 | BINDER_LOOPER_STATE_EXITED = 0x04, 387 | BINDER_LOOPER_STATE_INVALID = 0x08, 388 | BINDER_LOOPER_STATE_WAITING = 0x10, 389 | BINDER_LOOPER_STATE_NEED_RETURN = 0x20 390 | }; 391 | 392 | struct binder_thread { 393 | struct binder_proc *proc; 394 | struct rb_node rb_node; 395 | int pid; 396 | int looper; 397 | struct binder_transaction *transaction_stack; 398 | struct list_head todo; 399 | uint32_t return_error; /* Write failed, return error code in read buf */ 400 | uint32_t return_error2; /* Write failed, return error code in read */ 401 | /* buffer. Used when sending a reply to a dead process that */ 402 | /* we are also waiting on */ 403 | wait_queue_head_t wait; 404 | struct binder_stats stats; 405 | }; 406 | 407 | struct binder_transaction { 408 | int debug_id; 409 | struct binder_work work; 410 | struct binder_thread *from; 411 | struct binder_transaction *from_parent; 412 | struct binder_proc *to_proc; 413 | struct binder_thread *to_thread; 414 | struct binder_transaction *to_parent; 415 | unsigned need_reply:1; 416 | /* unsigned is_dead:1; */ /* not used at the moment */ 417 | 418 | struct binder_buffer *buffer; 419 | unsigned int code; 420 | unsigned int flags; 421 | long priority; 422 | long saved_priority; 423 | kuid_t sender_euid; 424 | }; 425 | 426 | static void 427 | binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 428 | 429 | static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 430 | { 431 | struct files_struct *files = proc->files; 432 | unsigned long rlim_cur; 433 | unsigned long irqs; 434 | 435 | if (files == NULL) 436 | return -ESRCH; 437 | 438 | if (!lock_task_sighand(proc->tsk, &irqs)) 439 | return -EMFILE; 440 | 441 | rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); 442 | unlock_task_sighand(proc->tsk, &irqs); 443 | 444 | return __alloc_fd(files, 0, rlim_cur, flags); 445 | } 446 | 447 | /* 448 | * copied from fd_install 449 | */ 450 | static void task_fd_install( 451 | struct binder_proc *proc, unsigned int fd, struct file *file) 452 | { 453 | if (proc->files) 454 | __fd_install(proc->files, fd, file); 455 | } 456 | 457 | /* 458 | * copied from sys_close 459 | */ 460 | static long task_close_fd(struct binder_proc *proc, unsigned int fd) 461 | { 462 | int retval; 463 | 464 | if (proc->files == NULL) 465 | return -ESRCH; 466 | 467 | retval = __close_fd(proc->files, fd); 468 | /* can't restart close syscall because file table entry was cleared */ 469 | if (unlikely(retval == -ERESTARTSYS || 470 | retval == -ERESTARTNOINTR || 471 | retval == -ERESTARTNOHAND || 472 | retval == -ERESTART_RESTARTBLOCK)) 473 | retval = -EINTR; 474 | 475 | return retval; 476 | } 477 | 478 | static inline void binder_lock(const char *tag) 479 | { 480 | trace_binder_lock(tag); 481 | mutex_lock(&binder_main_lock); 482 | trace_binder_locked(tag); 483 | } 484 | 485 | static inline void binder_unlock(const char *tag) 486 | { 487 | trace_binder_unlock(tag); 488 | mutex_unlock(&binder_main_lock); 489 | } 490 | 491 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) 492 | /* 493 | * Convert rlimit style value [1,40] to nice value [-20, 19]. 494 | */ 495 | static inline long rlimit_to_nice(long prio) 496 | { 497 | return (MAX_NICE - prio + 1); 498 | } 499 | #endif 500 | 501 | static void binder_set_nice(long nice) 502 | { 503 | long min_nice; 504 | 505 | if (can_nice(current, nice)) { 506 | set_user_nice(current, nice); 507 | return; 508 | } 509 | min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur); 510 | binder_debug(BINDER_DEBUG_PRIORITY_CAP, 511 | "%d: nice value %ld not allowed use %ld instead\n", 512 | current->pid, nice, min_nice); 513 | set_user_nice(current, min_nice); 514 | if (min_nice <= MAX_NICE) 515 | return; 516 | } 517 | 518 | static size_t binder_buffer_size(struct binder_proc *proc, 519 | struct binder_buffer *buffer) 520 | { 521 | if (list_is_last(&buffer->entry, &proc->buffers)) 522 | return proc->buffer + proc->buffer_size - (void *)buffer->data; 523 | return (size_t)list_entry(buffer->entry.next, 524 | struct binder_buffer, entry) - (size_t)buffer->data; 525 | } 526 | 527 | static void binder_insert_free_buffer(struct binder_proc *proc, 528 | struct binder_buffer *new_buffer) 529 | { 530 | struct rb_node **p = &proc->free_buffers.rb_node; 531 | struct rb_node *parent = NULL; 532 | struct binder_buffer *buffer; 533 | size_t buffer_size; 534 | size_t new_buffer_size; 535 | 536 | BUG_ON(!new_buffer->free); 537 | 538 | new_buffer_size = binder_buffer_size(proc, new_buffer); 539 | 540 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 541 | "%d: add free buffer, size %zd, at %p\n", 542 | proc->pid, new_buffer_size, new_buffer); 543 | 544 | while (*p) { 545 | parent = *p; 546 | buffer = rb_entry(parent, struct binder_buffer, rb_node); 547 | BUG_ON(!buffer->free); 548 | 549 | buffer_size = binder_buffer_size(proc, buffer); 550 | 551 | if (new_buffer_size < buffer_size) 552 | p = &parent->rb_left; 553 | else 554 | p = &parent->rb_right; 555 | } 556 | rb_link_node(&new_buffer->rb_node, parent, p); 557 | rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); 558 | } 559 | 560 | static void binder_insert_allocated_buffer(struct binder_proc *proc, 561 | struct binder_buffer *new_buffer) 562 | { 563 | struct rb_node **p = &proc->allocated_buffers.rb_node; 564 | struct rb_node *parent = NULL; 565 | struct binder_buffer *buffer; 566 | 567 | BUG_ON(new_buffer->free); 568 | 569 | while (*p) { 570 | parent = *p; 571 | buffer = rb_entry(parent, struct binder_buffer, rb_node); 572 | BUG_ON(buffer->free); 573 | 574 | if (new_buffer < buffer) 575 | p = &parent->rb_left; 576 | else if (new_buffer > buffer) 577 | p = &parent->rb_right; 578 | else 579 | BUG(); 580 | } 581 | rb_link_node(&new_buffer->rb_node, parent, p); 582 | rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); 583 | } 584 | 585 | static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, 586 | uintptr_t user_ptr) 587 | { 588 | struct rb_node *n = proc->allocated_buffers.rb_node; 589 | struct binder_buffer *buffer; 590 | struct binder_buffer *kern_ptr; 591 | 592 | kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset 593 | - offsetof(struct binder_buffer, data)); 594 | 595 | while (n) { 596 | buffer = rb_entry(n, struct binder_buffer, rb_node); 597 | BUG_ON(buffer->free); 598 | 599 | if (kern_ptr < buffer) 600 | n = n->rb_left; 601 | else if (kern_ptr > buffer) 602 | n = n->rb_right; 603 | else 604 | return buffer; 605 | } 606 | return NULL; 607 | } 608 | 609 | static int binder_update_page_range(struct binder_proc *proc, int allocate, 610 | void *start, void *end, 611 | struct vm_area_struct *vma) 612 | { 613 | void *page_addr; 614 | unsigned long user_page_addr; 615 | struct page **page; 616 | struct mm_struct *mm; 617 | 618 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 619 | "%d: %s pages %p-%p\n", proc->pid, 620 | allocate ? "allocate" : "free", start, end); 621 | 622 | if (end <= start) 623 | return 0; 624 | 625 | trace_binder_update_page_range(proc, allocate, start, end); 626 | 627 | if (vma) 628 | mm = NULL; 629 | else 630 | mm = get_task_mm(proc->tsk); 631 | 632 | if (mm) { 633 | down_write(&mm->mmap_sem); 634 | vma = proc->vma; 635 | if (vma && mm != proc->vma_vm_mm) { 636 | pr_err("%d: vma mm and task mm mismatch\n", 637 | proc->pid); 638 | vma = NULL; 639 | } 640 | } 641 | 642 | if (allocate == 0) 643 | goto free_range; 644 | 645 | if (vma == NULL) { 646 | pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", 647 | proc->pid); 648 | goto err_no_vma; 649 | } 650 | 651 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 652 | int ret; 653 | 654 | page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 655 | 656 | BUG_ON(*page); 657 | *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 658 | if (*page == NULL) { 659 | pr_err("%d: binder_alloc_buf failed for page at %p\n", 660 | proc->pid, page_addr); 661 | goto err_alloc_page_failed; 662 | } 663 | ret = map_kernel_range_noflush((unsigned long)page_addr, 664 | PAGE_SIZE, PAGE_KERNEL, page); 665 | flush_cache_vmap((unsigned long)page_addr, 666 | (unsigned long)page_addr + PAGE_SIZE); 667 | if (ret != 1) { 668 | pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", 669 | proc->pid, page_addr); 670 | goto err_map_kernel_failed; 671 | } 672 | user_page_addr = 673 | (uintptr_t)page_addr + proc->user_buffer_offset; 674 | ret = vm_insert_page(vma, user_page_addr, page[0]); 675 | if (ret) { 676 | pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", 677 | proc->pid, user_page_addr); 678 | goto err_vm_insert_page_failed; 679 | } 680 | /* vm_insert_page does not seem to increment the refcount */ 681 | } 682 | if (mm) { 683 | up_write(&mm->mmap_sem); 684 | mmput(mm); 685 | } 686 | return 0; 687 | 688 | free_range: 689 | for (page_addr = end - PAGE_SIZE; page_addr >= start; 690 | page_addr -= PAGE_SIZE) { 691 | page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 692 | if (vma) 693 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 694 | zap_page_range(vma, (uintptr_t)page_addr + 695 | proc->user_buffer_offset, PAGE_SIZE); 696 | #else 697 | zap_page_range(vma, (uintptr_t)page_addr + 698 | proc->user_buffer_offset, PAGE_SIZE, NULL); 699 | #endif 700 | err_vm_insert_page_failed: 701 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 702 | err_map_kernel_failed: 703 | __free_page(*page); 704 | *page = NULL; 705 | err_alloc_page_failed: 706 | ; 707 | } 708 | err_no_vma: 709 | if (mm) { 710 | up_write(&mm->mmap_sem); 711 | mmput(mm); 712 | } 713 | return -ENOMEM; 714 | } 715 | 716 | static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, 717 | size_t data_size, 718 | size_t offsets_size, 719 | size_t extra_buffers_size, 720 | int is_async) 721 | { 722 | struct rb_node *n = proc->free_buffers.rb_node; 723 | struct binder_buffer *buffer; 724 | size_t buffer_size; 725 | struct rb_node *best_fit = NULL; 726 | void *has_page_addr; 727 | void *end_page_addr; 728 | size_t size, data_offsets_size; 729 | 730 | if (proc->vma == NULL) { 731 | pr_err("%d: binder_alloc_buf, no vma\n", 732 | proc->pid); 733 | return NULL; 734 | } 735 | 736 | data_offsets_size = ALIGN(data_size, sizeof(void *)) + 737 | ALIGN(offsets_size, sizeof(void *)); 738 | 739 | if (data_offsets_size < data_size || data_offsets_size < offsets_size) { 740 | binder_user_error("%d: got transaction with invalid size %zd-%zd\n", 741 | proc->pid, data_size, offsets_size); 742 | return NULL; 743 | } 744 | size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); 745 | if (size < data_offsets_size || size < extra_buffers_size) { 746 | binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n", 747 | proc->pid, extra_buffers_size); 748 | return NULL; 749 | } 750 | if (is_async && 751 | proc->free_async_space < size + sizeof(struct binder_buffer)) { 752 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 753 | "%d: binder_alloc_buf size %zd failed, no async space left\n", 754 | proc->pid, size); 755 | return NULL; 756 | } 757 | 758 | while (n) { 759 | buffer = rb_entry(n, struct binder_buffer, rb_node); 760 | BUG_ON(!buffer->free); 761 | buffer_size = binder_buffer_size(proc, buffer); 762 | 763 | if (size < buffer_size) { 764 | best_fit = n; 765 | n = n->rb_left; 766 | } else if (size > buffer_size) 767 | n = n->rb_right; 768 | else { 769 | best_fit = n; 770 | break; 771 | } 772 | } 773 | if (best_fit == NULL) { 774 | pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", 775 | proc->pid, size); 776 | return NULL; 777 | } 778 | if (n == NULL) { 779 | buffer = rb_entry(best_fit, struct binder_buffer, rb_node); 780 | buffer_size = binder_buffer_size(proc, buffer); 781 | } 782 | 783 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 784 | "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", 785 | proc->pid, size, buffer, buffer_size); 786 | 787 | has_page_addr = 788 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); 789 | if (n == NULL) { 790 | if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) 791 | buffer_size = size; /* no room for other buffers */ 792 | else 793 | buffer_size = size + sizeof(struct binder_buffer); 794 | } 795 | end_page_addr = 796 | (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); 797 | if (end_page_addr > has_page_addr) 798 | end_page_addr = has_page_addr; 799 | if (binder_update_page_range(proc, 1, 800 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) 801 | return NULL; 802 | 803 | rb_erase(best_fit, &proc->free_buffers); 804 | buffer->free = 0; 805 | binder_insert_allocated_buffer(proc, buffer); 806 | if (buffer_size != size) { 807 | struct binder_buffer *new_buffer = (void *)buffer->data + size; 808 | 809 | list_add(&new_buffer->entry, &buffer->entry); 810 | new_buffer->free = 1; 811 | binder_insert_free_buffer(proc, new_buffer); 812 | } 813 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 814 | "%d: binder_alloc_buf size %zd got %p\n", 815 | proc->pid, size, buffer); 816 | buffer->data_size = data_size; 817 | buffer->offsets_size = offsets_size; 818 | buffer->extra_buffers_size = extra_buffers_size; 819 | buffer->async_transaction = is_async; 820 | if (is_async) { 821 | proc->free_async_space -= size + sizeof(struct binder_buffer); 822 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 823 | "%d: binder_alloc_buf size %zd async free %zd\n", 824 | proc->pid, size, proc->free_async_space); 825 | } 826 | 827 | return buffer; 828 | } 829 | 830 | static void *buffer_start_page(struct binder_buffer *buffer) 831 | { 832 | return (void *)((uintptr_t)buffer & PAGE_MASK); 833 | } 834 | 835 | static void *buffer_end_page(struct binder_buffer *buffer) 836 | { 837 | return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); 838 | } 839 | 840 | static void binder_delete_free_buffer(struct binder_proc *proc, 841 | struct binder_buffer *buffer) 842 | { 843 | struct binder_buffer *prev, *next = NULL; 844 | int free_page_end = 1; 845 | int free_page_start = 1; 846 | 847 | BUG_ON(proc->buffers.next == &buffer->entry); 848 | prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); 849 | BUG_ON(!prev->free); 850 | if (buffer_end_page(prev) == buffer_start_page(buffer)) { 851 | free_page_start = 0; 852 | if (buffer_end_page(prev) == buffer_end_page(buffer)) 853 | free_page_end = 0; 854 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 855 | "%d: merge free, buffer %p share page with %p\n", 856 | proc->pid, buffer, prev); 857 | } 858 | 859 | if (!list_is_last(&buffer->entry, &proc->buffers)) { 860 | next = list_entry(buffer->entry.next, 861 | struct binder_buffer, entry); 862 | if (buffer_start_page(next) == buffer_end_page(buffer)) { 863 | free_page_end = 0; 864 | if (buffer_start_page(next) == 865 | buffer_start_page(buffer)) 866 | free_page_start = 0; 867 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 868 | "%d: merge free, buffer %p share page with %p\n", 869 | proc->pid, buffer, prev); 870 | } 871 | } 872 | list_del(&buffer->entry); 873 | if (free_page_start || free_page_end) { 874 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 875 | "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", 876 | proc->pid, buffer, free_page_start ? "" : " end", 877 | free_page_end ? "" : " start", prev, next); 878 | binder_update_page_range(proc, 0, free_page_start ? 879 | buffer_start_page(buffer) : buffer_end_page(buffer), 880 | (free_page_end ? buffer_end_page(buffer) : 881 | buffer_start_page(buffer)) + PAGE_SIZE, NULL); 882 | } 883 | } 884 | 885 | static void binder_free_buf(struct binder_proc *proc, 886 | struct binder_buffer *buffer) 887 | { 888 | size_t size, buffer_size; 889 | 890 | buffer_size = binder_buffer_size(proc, buffer); 891 | 892 | size = ALIGN(buffer->data_size, sizeof(void *)) + 893 | ALIGN(buffer->offsets_size, sizeof(void *)) + 894 | ALIGN(buffer->extra_buffers_size, sizeof(void *)); 895 | 896 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 897 | "%d: binder_free_buf %p size %zd buffer_size %zd\n", 898 | proc->pid, buffer, size, buffer_size); 899 | 900 | BUG_ON(buffer->free); 901 | BUG_ON(size > buffer_size); 902 | BUG_ON(buffer->transaction != NULL); 903 | BUG_ON((void *)buffer < proc->buffer); 904 | BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); 905 | 906 | if (buffer->async_transaction) { 907 | proc->free_async_space += size + sizeof(struct binder_buffer); 908 | 909 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 910 | "%d: binder_free_buf size %zd async free %zd\n", 911 | proc->pid, size, proc->free_async_space); 912 | } 913 | 914 | binder_update_page_range(proc, 0, 915 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), 916 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), 917 | NULL); 918 | rb_erase(&buffer->rb_node, &proc->allocated_buffers); 919 | buffer->free = 1; 920 | if (!list_is_last(&buffer->entry, &proc->buffers)) { 921 | struct binder_buffer *next = list_entry(buffer->entry.next, 922 | struct binder_buffer, entry); 923 | 924 | if (next->free) { 925 | rb_erase(&next->rb_node, &proc->free_buffers); 926 | binder_delete_free_buffer(proc, next); 927 | } 928 | } 929 | if (proc->buffers.next != &buffer->entry) { 930 | struct binder_buffer *prev = list_entry(buffer->entry.prev, 931 | struct binder_buffer, entry); 932 | 933 | if (prev->free) { 934 | binder_delete_free_buffer(proc, buffer); 935 | rb_erase(&prev->rb_node, &proc->free_buffers); 936 | buffer = prev; 937 | } 938 | } 939 | binder_insert_free_buffer(proc, buffer); 940 | } 941 | 942 | static struct binder_node *binder_get_node(struct binder_proc *proc, 943 | binder_uintptr_t ptr) 944 | { 945 | struct rb_node *n = proc->nodes.rb_node; 946 | struct binder_node *node; 947 | 948 | while (n) { 949 | node = rb_entry(n, struct binder_node, rb_node); 950 | 951 | if (ptr < node->ptr) 952 | n = n->rb_left; 953 | else if (ptr > node->ptr) 954 | n = n->rb_right; 955 | else 956 | return node; 957 | } 958 | return NULL; 959 | } 960 | 961 | static struct binder_node *binder_new_node(struct binder_proc *proc, 962 | binder_uintptr_t ptr, 963 | binder_uintptr_t cookie) 964 | { 965 | struct rb_node **p = &proc->nodes.rb_node; 966 | struct rb_node *parent = NULL; 967 | struct binder_node *node; 968 | 969 | while (*p) { 970 | parent = *p; 971 | node = rb_entry(parent, struct binder_node, rb_node); 972 | 973 | if (ptr < node->ptr) 974 | p = &(*p)->rb_left; 975 | else if (ptr > node->ptr) 976 | p = &(*p)->rb_right; 977 | else 978 | return NULL; 979 | } 980 | 981 | node = kzalloc(sizeof(*node), GFP_KERNEL); 982 | if (node == NULL) 983 | return NULL; 984 | binder_stats_created(BINDER_STAT_NODE); 985 | rb_link_node(&node->rb_node, parent, p); 986 | rb_insert_color(&node->rb_node, &proc->nodes); 987 | node->debug_id = ++binder_last_id; 988 | node->proc = proc; 989 | node->ptr = ptr; 990 | node->cookie = cookie; 991 | node->work.type = BINDER_WORK_NODE; 992 | INIT_LIST_HEAD(&node->work.entry); 993 | INIT_LIST_HEAD(&node->async_todo); 994 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, 995 | "%d:%d node %d u%016llx c%016llx created\n", 996 | proc->pid, current->pid, node->debug_id, 997 | (u64)node->ptr, (u64)node->cookie); 998 | return node; 999 | } 1000 | 1001 | static int binder_inc_node(struct binder_node *node, int strong, int internal, 1002 | struct list_head *target_list) 1003 | { 1004 | if (strong) { 1005 | if (internal) { 1006 | if (target_list == NULL && 1007 | node->internal_strong_refs == 0 && 1008 | !(node->proc && 1009 | node == node->proc->context->binder_context_mgr_node && 1010 | node->has_strong_ref)) { 1011 | pr_err("invalid inc strong node for %d\n", 1012 | node->debug_id); 1013 | return -EINVAL; 1014 | } 1015 | node->internal_strong_refs++; 1016 | } else 1017 | node->local_strong_refs++; 1018 | if (!node->has_strong_ref && target_list) { 1019 | list_del_init(&node->work.entry); 1020 | list_add_tail(&node->work.entry, target_list); 1021 | } 1022 | } else { 1023 | if (!internal) 1024 | node->local_weak_refs++; 1025 | if (!node->has_weak_ref && list_empty(&node->work.entry)) { 1026 | if (target_list == NULL) { 1027 | pr_err("invalid inc weak node for %d\n", 1028 | node->debug_id); 1029 | return -EINVAL; 1030 | } 1031 | list_add_tail(&node->work.entry, target_list); 1032 | } 1033 | } 1034 | return 0; 1035 | } 1036 | 1037 | static int binder_dec_node(struct binder_node *node, int strong, int internal) 1038 | { 1039 | if (strong) { 1040 | if (internal) 1041 | node->internal_strong_refs--; 1042 | else 1043 | node->local_strong_refs--; 1044 | if (node->local_strong_refs || node->internal_strong_refs) 1045 | return 0; 1046 | } else { 1047 | if (!internal) 1048 | node->local_weak_refs--; 1049 | if (node->local_weak_refs || !hlist_empty(&node->refs)) 1050 | return 0; 1051 | } 1052 | if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { 1053 | if (list_empty(&node->work.entry)) { 1054 | list_add_tail(&node->work.entry, &node->proc->todo); 1055 | wake_up_interruptible(&node->proc->wait); 1056 | } 1057 | } else { 1058 | if (hlist_empty(&node->refs) && !node->local_strong_refs && 1059 | !node->local_weak_refs) { 1060 | list_del_init(&node->work.entry); 1061 | if (node->proc) { 1062 | rb_erase(&node->rb_node, &node->proc->nodes); 1063 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1064 | "refless node %d deleted\n", 1065 | node->debug_id); 1066 | } else { 1067 | hlist_del(&node->dead_node); 1068 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1069 | "dead node %d deleted\n", 1070 | node->debug_id); 1071 | } 1072 | kfree(node); 1073 | binder_stats_deleted(BINDER_STAT_NODE); 1074 | } 1075 | } 1076 | 1077 | return 0; 1078 | } 1079 | 1080 | 1081 | static struct binder_ref *binder_get_ref(struct binder_proc *proc, 1082 | u32 desc, bool need_strong_ref) 1083 | { 1084 | struct rb_node *n = proc->refs_by_desc.rb_node; 1085 | struct binder_ref *ref; 1086 | 1087 | while (n) { 1088 | ref = rb_entry(n, struct binder_ref, rb_node_desc); 1089 | 1090 | if (desc < ref->desc) { 1091 | n = n->rb_left; 1092 | } else if (desc > ref->desc) { 1093 | n = n->rb_right; 1094 | } else if (need_strong_ref && !ref->strong) { 1095 | binder_user_error("tried to use weak ref as strong ref\n"); 1096 | return NULL; 1097 | } else { 1098 | return ref; 1099 | } 1100 | } 1101 | return NULL; 1102 | } 1103 | 1104 | static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, 1105 | struct binder_node *node) 1106 | { 1107 | struct rb_node *n; 1108 | struct rb_node **p = &proc->refs_by_node.rb_node; 1109 | struct rb_node *parent = NULL; 1110 | struct binder_ref *ref, *new_ref; 1111 | struct binder_context *context = proc->context; 1112 | 1113 | while (*p) { 1114 | parent = *p; 1115 | ref = rb_entry(parent, struct binder_ref, rb_node_node); 1116 | 1117 | if (node < ref->node) 1118 | p = &(*p)->rb_left; 1119 | else if (node > ref->node) 1120 | p = &(*p)->rb_right; 1121 | else 1122 | return ref; 1123 | } 1124 | new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1125 | if (new_ref == NULL) 1126 | return NULL; 1127 | binder_stats_created(BINDER_STAT_REF); 1128 | new_ref->debug_id = ++binder_last_id; 1129 | new_ref->proc = proc; 1130 | new_ref->node = node; 1131 | rb_link_node(&new_ref->rb_node_node, parent, p); 1132 | rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1133 | 1134 | new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1; 1135 | for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1136 | ref = rb_entry(n, struct binder_ref, rb_node_desc); 1137 | if (ref->desc > new_ref->desc) 1138 | break; 1139 | new_ref->desc = ref->desc + 1; 1140 | } 1141 | 1142 | p = &proc->refs_by_desc.rb_node; 1143 | while (*p) { 1144 | parent = *p; 1145 | ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1146 | 1147 | if (new_ref->desc < ref->desc) 1148 | p = &(*p)->rb_left; 1149 | else if (new_ref->desc > ref->desc) 1150 | p = &(*p)->rb_right; 1151 | else 1152 | BUG(); 1153 | } 1154 | rb_link_node(&new_ref->rb_node_desc, parent, p); 1155 | rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1156 | if (node) { 1157 | hlist_add_head(&new_ref->node_entry, &node->refs); 1158 | 1159 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1160 | "%d new ref %d desc %d for node %d\n", 1161 | proc->pid, new_ref->debug_id, new_ref->desc, 1162 | node->debug_id); 1163 | } else { 1164 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1165 | "%d new ref %d desc %d for dead node\n", 1166 | proc->pid, new_ref->debug_id, new_ref->desc); 1167 | } 1168 | return new_ref; 1169 | } 1170 | 1171 | static void binder_delete_ref(struct binder_ref *ref) 1172 | { 1173 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1174 | "%d delete ref %d desc %d for node %d\n", 1175 | ref->proc->pid, ref->debug_id, ref->desc, 1176 | ref->node->debug_id); 1177 | 1178 | rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1179 | rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1180 | if (ref->strong) 1181 | binder_dec_node(ref->node, 1, 1); 1182 | hlist_del(&ref->node_entry); 1183 | binder_dec_node(ref->node, 0, 1); 1184 | if (ref->death) { 1185 | binder_debug(BINDER_DEBUG_DEAD_BINDER, 1186 | "%d delete ref %d desc %d has death notification\n", 1187 | ref->proc->pid, ref->debug_id, ref->desc); 1188 | list_del(&ref->death->work.entry); 1189 | kfree(ref->death); 1190 | binder_stats_deleted(BINDER_STAT_DEATH); 1191 | } 1192 | kfree(ref); 1193 | binder_stats_deleted(BINDER_STAT_REF); 1194 | } 1195 | 1196 | static int binder_inc_ref(struct binder_ref *ref, int strong, 1197 | struct list_head *target_list) 1198 | { 1199 | int ret; 1200 | 1201 | if (strong) { 1202 | if (ref->strong == 0) { 1203 | ret = binder_inc_node(ref->node, 1, 1, target_list); 1204 | if (ret) 1205 | return ret; 1206 | } 1207 | ref->strong++; 1208 | } else { 1209 | if (ref->weak == 0) { 1210 | ret = binder_inc_node(ref->node, 0, 1, target_list); 1211 | if (ret) 1212 | return ret; 1213 | } 1214 | ref->weak++; 1215 | } 1216 | return 0; 1217 | } 1218 | 1219 | 1220 | static int binder_dec_ref(struct binder_ref *ref, int strong) 1221 | { 1222 | if (strong) { 1223 | if (ref->strong == 0) { 1224 | binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1225 | ref->proc->pid, ref->debug_id, 1226 | ref->desc, ref->strong, ref->weak); 1227 | return -EINVAL; 1228 | } 1229 | ref->strong--; 1230 | if (ref->strong == 0) { 1231 | int ret; 1232 | 1233 | ret = binder_dec_node(ref->node, strong, 1); 1234 | if (ret) 1235 | return ret; 1236 | } 1237 | } else { 1238 | if (ref->weak == 0) { 1239 | binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1240 | ref->proc->pid, ref->debug_id, 1241 | ref->desc, ref->strong, ref->weak); 1242 | return -EINVAL; 1243 | } 1244 | ref->weak--; 1245 | } 1246 | if (ref->strong == 0 && ref->weak == 0) 1247 | binder_delete_ref(ref); 1248 | return 0; 1249 | } 1250 | 1251 | static void binder_pop_transaction(struct binder_thread *target_thread, 1252 | struct binder_transaction *t) 1253 | { 1254 | if (target_thread) { 1255 | BUG_ON(target_thread->transaction_stack != t); 1256 | BUG_ON(target_thread->transaction_stack->from != target_thread); 1257 | target_thread->transaction_stack = 1258 | target_thread->transaction_stack->from_parent; 1259 | t->from = NULL; 1260 | } 1261 | t->need_reply = 0; 1262 | if (t->buffer) 1263 | t->buffer->transaction = NULL; 1264 | kfree(t); 1265 | binder_stats_deleted(BINDER_STAT_TRANSACTION); 1266 | } 1267 | 1268 | static void binder_send_failed_reply(struct binder_transaction *t, 1269 | uint32_t error_code) 1270 | { 1271 | struct binder_thread *target_thread; 1272 | struct binder_transaction *next; 1273 | 1274 | BUG_ON(t->flags & TF_ONE_WAY); 1275 | while (1) { 1276 | target_thread = t->from; 1277 | if (target_thread) { 1278 | if (target_thread->return_error != BR_OK && 1279 | target_thread->return_error2 == BR_OK) { 1280 | target_thread->return_error2 = 1281 | target_thread->return_error; 1282 | target_thread->return_error = BR_OK; 1283 | } 1284 | if (target_thread->return_error == BR_OK) { 1285 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1286 | "send failed reply for transaction %d to %d:%d\n", 1287 | t->debug_id, 1288 | target_thread->proc->pid, 1289 | target_thread->pid); 1290 | 1291 | binder_pop_transaction(target_thread, t); 1292 | target_thread->return_error = error_code; 1293 | wake_up_interruptible(&target_thread->wait); 1294 | } else { 1295 | pr_err("reply failed, target thread, %d:%d, has error code %d already\n", 1296 | target_thread->proc->pid, 1297 | target_thread->pid, 1298 | target_thread->return_error); 1299 | } 1300 | return; 1301 | } 1302 | next = t->from_parent; 1303 | 1304 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1305 | "send failed reply for transaction %d, target dead\n", 1306 | t->debug_id); 1307 | 1308 | binder_pop_transaction(target_thread, t); 1309 | if (next == NULL) { 1310 | binder_debug(BINDER_DEBUG_DEAD_BINDER, 1311 | "reply failed, no target thread at root\n"); 1312 | return; 1313 | } 1314 | t = next; 1315 | binder_debug(BINDER_DEBUG_DEAD_BINDER, 1316 | "reply failed, no target thread -- retry %d\n", 1317 | t->debug_id); 1318 | } 1319 | } 1320 | 1321 | /** 1322 | * binder_validate_object() - checks for a valid metadata object in a buffer. 1323 | * @buffer: binder_buffer that we're parsing. 1324 | * @offset: offset in the buffer at which to validate an object. 1325 | * 1326 | * Return: If there's a valid metadata object at @offset in @buffer, the 1327 | * size of that object. Otherwise, it returns zero. 1328 | */ 1329 | static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) 1330 | { 1331 | /* Check if we can read a header first */ 1332 | struct binder_object_header *hdr; 1333 | size_t object_size = 0; 1334 | 1335 | if (offset > buffer->data_size - sizeof(*hdr) || 1336 | buffer->data_size < sizeof(*hdr) || 1337 | !IS_ALIGNED(offset, sizeof(u32))) 1338 | return 0; 1339 | 1340 | /* Ok, now see if we can read a complete object. */ 1341 | hdr = (struct binder_object_header *)(buffer->data + offset); 1342 | switch (hdr->type) { 1343 | case BINDER_TYPE_BINDER: 1344 | case BINDER_TYPE_WEAK_BINDER: 1345 | case BINDER_TYPE_HANDLE: 1346 | case BINDER_TYPE_WEAK_HANDLE: 1347 | object_size = sizeof(struct flat_binder_object); 1348 | break; 1349 | case BINDER_TYPE_FD: 1350 | object_size = sizeof(struct binder_fd_object); 1351 | break; 1352 | case BINDER_TYPE_PTR: 1353 | object_size = sizeof(struct binder_buffer_object); 1354 | break; 1355 | case BINDER_TYPE_FDA: 1356 | object_size = sizeof(struct binder_fd_array_object); 1357 | break; 1358 | default: 1359 | return 0; 1360 | } 1361 | if (offset <= buffer->data_size - object_size && 1362 | buffer->data_size >= object_size) 1363 | return object_size; 1364 | else 1365 | return 0; 1366 | } 1367 | 1368 | /** 1369 | * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. 1370 | * @b: binder_buffer containing the object 1371 | * @index: index in offset array at which the binder_buffer_object is 1372 | * located 1373 | * @start: points to the start of the offset array 1374 | * @num_valid: the number of valid offsets in the offset array 1375 | * 1376 | * Return: If @index is within the valid range of the offset array 1377 | * described by @start and @num_valid, and if there's a valid 1378 | * binder_buffer_object at the offset found in index @index 1379 | * of the offset array, that object is returned. Otherwise, 1380 | * %NULL is returned. 1381 | * Note that the offset found in index @index itself is not 1382 | * verified; this function assumes that @num_valid elements 1383 | * from @start were previously verified to have valid offsets. 1384 | */ 1385 | static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, 1386 | binder_size_t index, 1387 | binder_size_t *start, 1388 | binder_size_t num_valid) 1389 | { 1390 | struct binder_buffer_object *buffer_obj; 1391 | binder_size_t *offp; 1392 | 1393 | if (index >= num_valid) 1394 | return NULL; 1395 | 1396 | offp = start + index; 1397 | buffer_obj = (struct binder_buffer_object *)(b->data + *offp); 1398 | if (buffer_obj->hdr.type != BINDER_TYPE_PTR) 1399 | return NULL; 1400 | 1401 | return buffer_obj; 1402 | } 1403 | 1404 | /** 1405 | * binder_validate_fixup() - validates pointer/fd fixups happen in order. 1406 | * @b: transaction buffer 1407 | * @objects_start start of objects buffer 1408 | * @buffer: binder_buffer_object in which to fix up 1409 | * @offset: start offset in @buffer to fix up 1410 | * @last_obj: last binder_buffer_object that we fixed up in 1411 | * @last_min_offset: minimum fixup offset in @last_obj 1412 | * 1413 | * Return: %true if a fixup in buffer @buffer at offset @offset is 1414 | * allowed. 1415 | * 1416 | * For safety reasons, we only allow fixups inside a buffer to happen 1417 | * at increasing offsets; additionally, we only allow fixup on the last 1418 | * buffer object that was verified, or one of its parents. 1419 | * 1420 | * Example of what is allowed: 1421 | * 1422 | * A 1423 | * B (parent = A, offset = 0) 1424 | * C (parent = A, offset = 16) 1425 | * D (parent = C, offset = 0) 1426 | * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 1427 | * 1428 | * Examples of what is not allowed: 1429 | * 1430 | * Decreasing offsets within the same parent: 1431 | * A 1432 | * C (parent = A, offset = 16) 1433 | * B (parent = A, offset = 0) // decreasing offset within A 1434 | * 1435 | * Referring to a parent that wasn't the last object or any of its parents: 1436 | * A 1437 | * B (parent = A, offset = 0) 1438 | * C (parent = A, offset = 0) 1439 | * C (parent = A, offset = 16) 1440 | * D (parent = B, offset = 0) // B is not A or any of A's parents 1441 | */ 1442 | static bool binder_validate_fixup(struct binder_buffer *b, 1443 | binder_size_t *objects_start, 1444 | struct binder_buffer_object *buffer, 1445 | binder_size_t fixup_offset, 1446 | struct binder_buffer_object *last_obj, 1447 | binder_size_t last_min_offset) 1448 | { 1449 | if (!last_obj) { 1450 | /* Nothing to fix up in */ 1451 | return false; 1452 | } 1453 | 1454 | while (last_obj != buffer) { 1455 | /* 1456 | * Safe to retrieve the parent of last_obj, since it 1457 | * was already previously verified by the driver. 1458 | */ 1459 | if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) 1460 | return false; 1461 | last_min_offset = last_obj->parent_offset + sizeof(uintptr_t); 1462 | last_obj = (struct binder_buffer_object *) 1463 | (b->data + *(objects_start + last_obj->parent)); 1464 | } 1465 | return (fixup_offset >= last_min_offset); 1466 | } 1467 | 1468 | static void binder_transaction_buffer_release(struct binder_proc *proc, 1469 | struct binder_buffer *buffer, 1470 | binder_size_t *failed_at) 1471 | { 1472 | binder_size_t *offp, *off_start, *off_end; 1473 | int debug_id = buffer->debug_id; 1474 | 1475 | binder_debug(BINDER_DEBUG_TRANSACTION, 1476 | "%d buffer release %d, size %zd-%zd, failed at %p\n", 1477 | proc->pid, buffer->debug_id, 1478 | buffer->data_size, buffer->offsets_size, failed_at); 1479 | 1480 | if (buffer->target_node) 1481 | binder_dec_node(buffer->target_node, 1, 0); 1482 | 1483 | off_start = (binder_size_t *)(buffer->data + 1484 | ALIGN(buffer->data_size, sizeof(void *))); 1485 | if (failed_at) 1486 | off_end = failed_at; 1487 | else 1488 | off_end = (void *)off_start + buffer->offsets_size; 1489 | for (offp = off_start; offp < off_end; offp++) { 1490 | struct binder_object_header *hdr; 1491 | size_t object_size = binder_validate_object(buffer, *offp); 1492 | 1493 | if (object_size == 0) { 1494 | pr_err("transaction release %d bad object at offset %lld, size %zd\n", 1495 | debug_id, (u64)*offp, buffer->data_size); 1496 | continue; 1497 | } 1498 | hdr = (struct binder_object_header *)(buffer->data + *offp); 1499 | switch (hdr->type) { 1500 | case BINDER_TYPE_BINDER: 1501 | case BINDER_TYPE_WEAK_BINDER: { 1502 | struct flat_binder_object *fp; 1503 | struct binder_node *node; 1504 | 1505 | fp = to_flat_binder_object(hdr); 1506 | node = binder_get_node(proc, fp->binder); 1507 | if (node == NULL) { 1508 | pr_err("transaction release %d bad node %016llx\n", 1509 | debug_id, (u64)fp->binder); 1510 | break; 1511 | } 1512 | binder_debug(BINDER_DEBUG_TRANSACTION, 1513 | " node %d u%016llx\n", 1514 | node->debug_id, (u64)node->ptr); 1515 | binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 1516 | 0); 1517 | } break; 1518 | case BINDER_TYPE_HANDLE: 1519 | case BINDER_TYPE_WEAK_HANDLE: { 1520 | struct flat_binder_object *fp; 1521 | struct binder_ref *ref; 1522 | 1523 | fp = to_flat_binder_object(hdr); 1524 | ref = binder_get_ref(proc, fp->handle, 1525 | hdr->type == BINDER_TYPE_HANDLE); 1526 | if (ref == NULL) { 1527 | pr_err("transaction release %d bad handle %d\n", 1528 | debug_id, fp->handle); 1529 | break; 1530 | } 1531 | binder_debug(BINDER_DEBUG_TRANSACTION, 1532 | " ref %d desc %d (node %d)\n", 1533 | ref->debug_id, ref->desc, ref->node->debug_id); 1534 | binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE); 1535 | } break; 1536 | 1537 | case BINDER_TYPE_FD: { 1538 | struct binder_fd_object *fp = to_binder_fd_object(hdr); 1539 | 1540 | binder_debug(BINDER_DEBUG_TRANSACTION, 1541 | " fd %d\n", fp->fd); 1542 | if (failed_at) 1543 | task_close_fd(proc, fp->fd); 1544 | } break; 1545 | case BINDER_TYPE_PTR: 1546 | /* 1547 | * Nothing to do here, this will get cleaned up when the 1548 | * transaction buffer gets freed 1549 | */ 1550 | break; 1551 | case BINDER_TYPE_FDA: { 1552 | struct binder_fd_array_object *fda; 1553 | struct binder_buffer_object *parent; 1554 | uintptr_t parent_buffer; 1555 | u32 *fd_array; 1556 | size_t fd_index; 1557 | binder_size_t fd_buf_size; 1558 | 1559 | fda = to_binder_fd_array_object(hdr); 1560 | parent = binder_validate_ptr(buffer, fda->parent, 1561 | off_start, 1562 | offp - off_start); 1563 | if (!parent) { 1564 | pr_err("transaction release %d bad parent offset", 1565 | debug_id); 1566 | continue; 1567 | } 1568 | /* 1569 | * Since the parent was already fixed up, convert it 1570 | * back to kernel address space to access it 1571 | */ 1572 | parent_buffer = parent->buffer - 1573 | proc->user_buffer_offset; 1574 | 1575 | fd_buf_size = sizeof(u32) * fda->num_fds; 1576 | if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 1577 | pr_err("transaction release %d invalid number of fds (%lld)\n", 1578 | debug_id, (u64)fda->num_fds); 1579 | continue; 1580 | } 1581 | if (fd_buf_size > parent->length || 1582 | fda->parent_offset > parent->length - fd_buf_size) { 1583 | /* No space for all file descriptors here. */ 1584 | pr_err("transaction release %d not enough space for %lld fds in buffer\n", 1585 | debug_id, (u64)fda->num_fds); 1586 | continue; 1587 | } 1588 | fd_array = (u32 *)(parent_buffer + fda->parent_offset); 1589 | for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 1590 | task_close_fd(proc, fd_array[fd_index]); 1591 | } break; 1592 | default: 1593 | pr_err("transaction release %d bad object type %x\n", 1594 | debug_id, hdr->type); 1595 | break; 1596 | } 1597 | } 1598 | } 1599 | 1600 | static int binder_translate_binder(struct flat_binder_object *fp, 1601 | struct binder_transaction *t, 1602 | struct binder_thread *thread) 1603 | { 1604 | struct binder_node *node; 1605 | struct binder_ref *ref; 1606 | struct binder_proc *proc = thread->proc; 1607 | struct binder_proc *target_proc = t->to_proc; 1608 | 1609 | node = binder_get_node(proc, fp->binder); 1610 | if (!node) { 1611 | node = binder_new_node(proc, fp->binder, fp->cookie); 1612 | if (!node) 1613 | return -ENOMEM; 1614 | 1615 | node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1616 | node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1617 | } 1618 | if (fp->cookie != node->cookie) { 1619 | binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 1620 | proc->pid, thread->pid, (u64)fp->binder, 1621 | node->debug_id, (u64)fp->cookie, 1622 | (u64)node->cookie); 1623 | return -EINVAL; 1624 | } 1625 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) 1626 | if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) 1627 | return -EPERM; 1628 | #endif 1629 | 1630 | ref = binder_get_ref_for_node(target_proc, node); 1631 | if (!ref) 1632 | return -EINVAL; 1633 | 1634 | if (fp->hdr.type == BINDER_TYPE_BINDER) 1635 | fp->hdr.type = BINDER_TYPE_HANDLE; 1636 | else 1637 | fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; 1638 | fp->binder = 0; 1639 | fp->handle = ref->desc; 1640 | fp->cookie = 0; 1641 | binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo); 1642 | 1643 | trace_binder_transaction_node_to_ref(t, node, ref); 1644 | binder_debug(BINDER_DEBUG_TRANSACTION, 1645 | " node %d u%016llx -> ref %d desc %d\n", 1646 | node->debug_id, (u64)node->ptr, 1647 | ref->debug_id, ref->desc); 1648 | 1649 | return 0; 1650 | } 1651 | 1652 | static int binder_translate_handle(struct flat_binder_object *fp, 1653 | struct binder_transaction *t, 1654 | struct binder_thread *thread) 1655 | { 1656 | struct binder_ref *ref; 1657 | struct binder_proc *proc = thread->proc; 1658 | struct binder_proc *target_proc = t->to_proc; 1659 | 1660 | ref = binder_get_ref(proc, fp->handle, 1661 | fp->hdr.type == BINDER_TYPE_HANDLE); 1662 | if (!ref) { 1663 | binder_user_error("%d:%d got transaction with invalid handle, %d\n", 1664 | proc->pid, thread->pid, fp->handle); 1665 | return -EINVAL; 1666 | } 1667 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) 1668 | if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) 1669 | return -EPERM; 1670 | #endif 1671 | 1672 | if (ref->node->proc == target_proc) { 1673 | if (fp->hdr.type == BINDER_TYPE_HANDLE) 1674 | fp->hdr.type = BINDER_TYPE_BINDER; 1675 | else 1676 | fp->hdr.type = BINDER_TYPE_WEAK_BINDER; 1677 | fp->binder = ref->node->ptr; 1678 | fp->cookie = ref->node->cookie; 1679 | binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER, 1680 | 0, NULL); 1681 | trace_binder_transaction_ref_to_node(t, ref); 1682 | binder_debug(BINDER_DEBUG_TRANSACTION, 1683 | " ref %d desc %d -> node %d u%016llx\n", 1684 | ref->debug_id, ref->desc, ref->node->debug_id, 1685 | (u64)ref->node->ptr); 1686 | } else { 1687 | struct binder_ref *new_ref; 1688 | 1689 | new_ref = binder_get_ref_for_node(target_proc, ref->node); 1690 | if (!new_ref) 1691 | return -EINVAL; 1692 | 1693 | fp->binder = 0; 1694 | fp->handle = new_ref->desc; 1695 | fp->cookie = 0; 1696 | binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE, 1697 | NULL); 1698 | trace_binder_transaction_ref_to_ref(t, ref, new_ref); 1699 | binder_debug(BINDER_DEBUG_TRANSACTION, 1700 | " ref %d desc %d -> ref %d desc %d (node %d)\n", 1701 | ref->debug_id, ref->desc, new_ref->debug_id, 1702 | new_ref->desc, ref->node->debug_id); 1703 | } 1704 | return 0; 1705 | } 1706 | 1707 | static int binder_translate_fd(int fd, 1708 | struct binder_transaction *t, 1709 | struct binder_thread *thread, 1710 | struct binder_transaction *in_reply_to) 1711 | { 1712 | struct binder_proc *proc = thread->proc; 1713 | struct binder_proc *target_proc = t->to_proc; 1714 | int target_fd; 1715 | struct file *file; 1716 | int ret; 1717 | bool target_allows_fd; 1718 | 1719 | if (in_reply_to) 1720 | target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 1721 | else 1722 | target_allows_fd = t->buffer->target_node->accept_fds; 1723 | if (!target_allows_fd) { 1724 | binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", 1725 | proc->pid, thread->pid, 1726 | in_reply_to ? "reply" : "transaction", 1727 | fd); 1728 | ret = -EPERM; 1729 | goto err_fd_not_accepted; 1730 | } 1731 | 1732 | file = fget(fd); 1733 | if (!file) { 1734 | binder_user_error("%d:%d got transaction with invalid fd, %d\n", 1735 | proc->pid, thread->pid, fd); 1736 | ret = -EBADF; 1737 | goto err_fget; 1738 | } 1739 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) 1740 | ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); 1741 | if (ret < 0) { 1742 | ret = -EPERM; 1743 | goto err_security; 1744 | } 1745 | #endif 1746 | 1747 | target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 1748 | if (target_fd < 0) { 1749 | ret = -ENOMEM; 1750 | goto err_get_unused_fd; 1751 | } 1752 | task_fd_install(target_proc, target_fd, file); 1753 | trace_binder_transaction_fd(t, fd, target_fd); 1754 | binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", 1755 | fd, target_fd); 1756 | 1757 | return target_fd; 1758 | 1759 | err_get_unused_fd: 1760 | err_security: 1761 | fput(file); 1762 | err_fget: 1763 | err_fd_not_accepted: 1764 | return ret; 1765 | } 1766 | 1767 | static int binder_translate_fd_array(struct binder_fd_array_object *fda, 1768 | struct binder_buffer_object *parent, 1769 | struct binder_transaction *t, 1770 | struct binder_thread *thread, 1771 | struct binder_transaction *in_reply_to) 1772 | { 1773 | binder_size_t fdi, fd_buf_size, num_installed_fds; 1774 | int target_fd; 1775 | uintptr_t parent_buffer; 1776 | u32 *fd_array; 1777 | struct binder_proc *proc = thread->proc; 1778 | struct binder_proc *target_proc = t->to_proc; 1779 | 1780 | fd_buf_size = sizeof(u32) * fda->num_fds; 1781 | if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 1782 | binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", 1783 | proc->pid, thread->pid, (u64)fda->num_fds); 1784 | return -EINVAL; 1785 | } 1786 | if (fd_buf_size > parent->length || 1787 | fda->parent_offset > parent->length - fd_buf_size) { 1788 | /* No space for all file descriptors here. */ 1789 | binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", 1790 | proc->pid, thread->pid, (u64)fda->num_fds); 1791 | return -EINVAL; 1792 | } 1793 | /* 1794 | * Since the parent was already fixed up, convert it 1795 | * back to the kernel address space to access it 1796 | */ 1797 | parent_buffer = parent->buffer - target_proc->user_buffer_offset; 1798 | fd_array = (u32 *)(parent_buffer + fda->parent_offset); 1799 | if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { 1800 | binder_user_error("%d:%d parent offset not aligned correctly.\n", 1801 | proc->pid, thread->pid); 1802 | return -EINVAL; 1803 | } 1804 | for (fdi = 0; fdi < fda->num_fds; fdi++) { 1805 | target_fd = binder_translate_fd(fd_array[fdi], t, thread, 1806 | in_reply_to); 1807 | if (target_fd < 0) 1808 | goto err_translate_fd_failed; 1809 | fd_array[fdi] = target_fd; 1810 | } 1811 | return 0; 1812 | 1813 | err_translate_fd_failed: 1814 | /* 1815 | * Failed to allocate fd or security error, free fds 1816 | * installed so far. 1817 | */ 1818 | num_installed_fds = fdi; 1819 | for (fdi = 0; fdi < num_installed_fds; fdi++) 1820 | task_close_fd(target_proc, fd_array[fdi]); 1821 | return target_fd; 1822 | } 1823 | 1824 | static int binder_fixup_parent(struct binder_transaction *t, 1825 | struct binder_thread *thread, 1826 | struct binder_buffer_object *bp, 1827 | binder_size_t *off_start, 1828 | binder_size_t num_valid, 1829 | struct binder_buffer_object *last_fixup_obj, 1830 | binder_size_t last_fixup_min_off) 1831 | { 1832 | struct binder_buffer_object *parent; 1833 | u8 *parent_buffer; 1834 | struct binder_buffer *b = t->buffer; 1835 | struct binder_proc *proc = thread->proc; 1836 | struct binder_proc *target_proc = t->to_proc; 1837 | 1838 | if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) 1839 | return 0; 1840 | 1841 | parent = binder_validate_ptr(b, bp->parent, off_start, num_valid); 1842 | if (!parent) { 1843 | binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 1844 | proc->pid, thread->pid); 1845 | return -EINVAL; 1846 | } 1847 | 1848 | if (!binder_validate_fixup(b, off_start, 1849 | parent, bp->parent_offset, 1850 | last_fixup_obj, 1851 | last_fixup_min_off)) { 1852 | binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 1853 | proc->pid, thread->pid); 1854 | return -EINVAL; 1855 | } 1856 | 1857 | if (parent->length < sizeof(binder_uintptr_t) || 1858 | bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { 1859 | /* No space for a pointer here! */ 1860 | binder_user_error("%d:%d got transaction with invalid parent offset\n", 1861 | proc->pid, thread->pid); 1862 | return -EINVAL; 1863 | } 1864 | parent_buffer = (u8 *)(parent->buffer - 1865 | target_proc->user_buffer_offset); 1866 | *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; 1867 | 1868 | return 0; 1869 | } 1870 | 1871 | static void binder_transaction(struct binder_proc *proc, 1872 | struct binder_thread *thread, 1873 | struct binder_transaction_data *tr, int reply, 1874 | binder_size_t extra_buffers_size) 1875 | { 1876 | int ret; 1877 | struct binder_transaction *t; 1878 | struct binder_work *tcomplete; 1879 | binder_size_t *offp, *off_end, *off_start; 1880 | binder_size_t off_min; 1881 | u8 *sg_bufp, *sg_buf_end; 1882 | struct binder_proc *target_proc; 1883 | struct binder_thread *target_thread = NULL; 1884 | struct binder_node *target_node = NULL; 1885 | struct list_head *target_list; 1886 | wait_queue_head_t *target_wait; 1887 | struct binder_transaction *in_reply_to = NULL; 1888 | struct binder_transaction_log_entry *e; 1889 | uint32_t return_error; 1890 | struct binder_buffer_object *last_fixup_obj = NULL; 1891 | binder_size_t last_fixup_min_off = 0; 1892 | struct binder_context *context = proc->context; 1893 | 1894 | e = binder_transaction_log_add(&binder_transaction_log); 1895 | e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 1896 | e->from_proc = proc->pid; 1897 | e->from_thread = thread->pid; 1898 | e->target_handle = tr->target.handle; 1899 | e->data_size = tr->data_size; 1900 | e->offsets_size = tr->offsets_size; 1901 | e->context_name = proc->context->name; 1902 | 1903 | if (reply) { 1904 | in_reply_to = thread->transaction_stack; 1905 | if (in_reply_to == NULL) { 1906 | binder_user_error("%d:%d got reply transaction with no transaction stack\n", 1907 | proc->pid, thread->pid); 1908 | return_error = BR_FAILED_REPLY; 1909 | goto err_empty_call_stack; 1910 | } 1911 | binder_set_nice(in_reply_to->saved_priority); 1912 | if (in_reply_to->to_thread != thread) { 1913 | binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 1914 | proc->pid, thread->pid, in_reply_to->debug_id, 1915 | in_reply_to->to_proc ? 1916 | in_reply_to->to_proc->pid : 0, 1917 | in_reply_to->to_thread ? 1918 | in_reply_to->to_thread->pid : 0); 1919 | return_error = BR_FAILED_REPLY; 1920 | in_reply_to = NULL; 1921 | goto err_bad_call_stack; 1922 | } 1923 | thread->transaction_stack = in_reply_to->to_parent; 1924 | target_thread = in_reply_to->from; 1925 | if (target_thread == NULL) { 1926 | return_error = BR_DEAD_REPLY; 1927 | goto err_dead_binder; 1928 | } 1929 | if (target_thread->transaction_stack != in_reply_to) { 1930 | binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 1931 | proc->pid, thread->pid, 1932 | target_thread->transaction_stack ? 1933 | target_thread->transaction_stack->debug_id : 0, 1934 | in_reply_to->debug_id); 1935 | return_error = BR_FAILED_REPLY; 1936 | in_reply_to = NULL; 1937 | target_thread = NULL; 1938 | goto err_dead_binder; 1939 | } 1940 | target_proc = target_thread->proc; 1941 | } else { 1942 | if (tr->target.handle) { 1943 | struct binder_ref *ref; 1944 | 1945 | ref = binder_get_ref(proc, tr->target.handle, true); 1946 | if (ref == NULL) { 1947 | binder_user_error("%d:%d got transaction to invalid handle\n", 1948 | proc->pid, thread->pid); 1949 | return_error = BR_FAILED_REPLY; 1950 | goto err_invalid_target_handle; 1951 | } 1952 | target_node = ref->node; 1953 | } else { 1954 | target_node = context->binder_context_mgr_node; 1955 | if (target_node == NULL) { 1956 | return_error = BR_DEAD_REPLY; 1957 | goto err_no_context_mgr_node; 1958 | } 1959 | } 1960 | e->to_node = target_node->debug_id; 1961 | target_proc = target_node->proc; 1962 | if (target_proc == NULL) { 1963 | return_error = BR_DEAD_REPLY; 1964 | goto err_dead_binder; 1965 | } 1966 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) 1967 | if (security_binder_transaction(proc->tsk, 1968 | target_proc->tsk) < 0) { 1969 | return_error = BR_FAILED_REPLY; 1970 | goto err_invalid_target_handle; 1971 | } 1972 | #endif 1973 | if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1974 | struct binder_transaction *tmp; 1975 | 1976 | tmp = thread->transaction_stack; 1977 | if (tmp->to_thread != thread) { 1978 | binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 1979 | proc->pid, thread->pid, tmp->debug_id, 1980 | tmp->to_proc ? tmp->to_proc->pid : 0, 1981 | tmp->to_thread ? 1982 | tmp->to_thread->pid : 0); 1983 | return_error = BR_FAILED_REPLY; 1984 | goto err_bad_call_stack; 1985 | } 1986 | while (tmp) { 1987 | if (tmp->from && tmp->from->proc == target_proc) 1988 | target_thread = tmp->from; 1989 | tmp = tmp->from_parent; 1990 | } 1991 | } 1992 | } 1993 | if (target_thread) { 1994 | e->to_thread = target_thread->pid; 1995 | target_list = &target_thread->todo; 1996 | target_wait = &target_thread->wait; 1997 | } else { 1998 | target_list = &target_proc->todo; 1999 | target_wait = &target_proc->wait; 2000 | } 2001 | e->to_proc = target_proc->pid; 2002 | 2003 | /* TODO: reuse incoming transaction for reply */ 2004 | t = kzalloc(sizeof(*t), GFP_KERNEL); 2005 | if (t == NULL) { 2006 | return_error = BR_FAILED_REPLY; 2007 | goto err_alloc_t_failed; 2008 | } 2009 | binder_stats_created(BINDER_STAT_TRANSACTION); 2010 | 2011 | tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 2012 | if (tcomplete == NULL) { 2013 | return_error = BR_FAILED_REPLY; 2014 | goto err_alloc_tcomplete_failed; 2015 | } 2016 | binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 2017 | 2018 | t->debug_id = ++binder_last_id; 2019 | e->debug_id = t->debug_id; 2020 | 2021 | if (reply) 2022 | binder_debug(BINDER_DEBUG_TRANSACTION, 2023 | "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", 2024 | proc->pid, thread->pid, t->debug_id, 2025 | target_proc->pid, target_thread->pid, 2026 | (u64)tr->data.ptr.buffer, 2027 | (u64)tr->data.ptr.offsets, 2028 | (u64)tr->data_size, (u64)tr->offsets_size, 2029 | (u64)extra_buffers_size); 2030 | else 2031 | binder_debug(BINDER_DEBUG_TRANSACTION, 2032 | "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", 2033 | proc->pid, thread->pid, t->debug_id, 2034 | target_proc->pid, target_node->debug_id, 2035 | (u64)tr->data.ptr.buffer, 2036 | (u64)tr->data.ptr.offsets, 2037 | (u64)tr->data_size, (u64)tr->offsets_size, 2038 | (u64)extra_buffers_size); 2039 | 2040 | if (!reply && !(tr->flags & TF_ONE_WAY)) 2041 | t->from = thread; 2042 | else 2043 | t->from = NULL; 2044 | t->sender_euid = task_euid(proc->tsk); 2045 | t->to_proc = target_proc; 2046 | t->to_thread = target_thread; 2047 | t->code = tr->code; 2048 | t->flags = tr->flags; 2049 | t->priority = task_nice(current); 2050 | 2051 | trace_binder_transaction(reply, t, target_node); 2052 | 2053 | t->buffer = binder_alloc_buf(target_proc, tr->data_size, 2054 | tr->offsets_size, extra_buffers_size, 2055 | !reply && (t->flags & TF_ONE_WAY)); 2056 | if (t->buffer == NULL) { 2057 | return_error = BR_FAILED_REPLY; 2058 | goto err_binder_alloc_buf_failed; 2059 | } 2060 | t->buffer->allow_user_free = 0; 2061 | t->buffer->debug_id = t->debug_id; 2062 | t->buffer->transaction = t; 2063 | t->buffer->target_node = target_node; 2064 | trace_binder_transaction_alloc_buf(t->buffer); 2065 | if (target_node) 2066 | binder_inc_node(target_node, 1, 0, NULL); 2067 | 2068 | off_start = (binder_size_t *)(t->buffer->data + 2069 | ALIGN(tr->data_size, sizeof(void *))); 2070 | offp = off_start; 2071 | 2072 | if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 2073 | tr->data.ptr.buffer, tr->data_size)) { 2074 | binder_user_error("%d:%d got transaction with invalid data ptr\n", 2075 | proc->pid, thread->pid); 2076 | return_error = BR_FAILED_REPLY; 2077 | goto err_copy_data_failed; 2078 | } 2079 | if (copy_from_user(offp, (const void __user *)(uintptr_t) 2080 | tr->data.ptr.offsets, tr->offsets_size)) { 2081 | binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 2082 | proc->pid, thread->pid); 2083 | return_error = BR_FAILED_REPLY; 2084 | goto err_copy_data_failed; 2085 | } 2086 | if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 2087 | binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 2088 | proc->pid, thread->pid, (u64)tr->offsets_size); 2089 | return_error = BR_FAILED_REPLY; 2090 | goto err_bad_offset; 2091 | } 2092 | if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { 2093 | binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", 2094 | proc->pid, thread->pid, 2095 | (u64)extra_buffers_size); 2096 | return_error = BR_FAILED_REPLY; 2097 | goto err_bad_offset; 2098 | } 2099 | off_end = (void *)off_start + tr->offsets_size; 2100 | sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *))); 2101 | sg_buf_end = sg_bufp + extra_buffers_size; 2102 | off_min = 0; 2103 | for (; offp < off_end; offp++) { 2104 | struct binder_object_header *hdr; 2105 | size_t object_size = binder_validate_object(t->buffer, *offp); 2106 | 2107 | if (object_size == 0 || *offp < off_min) { 2108 | binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", 2109 | proc->pid, thread->pid, (u64)*offp, 2110 | (u64)off_min, 2111 | (u64)t->buffer->data_size); 2112 | return_error = BR_FAILED_REPLY; 2113 | goto err_bad_offset; 2114 | } 2115 | 2116 | hdr = (struct binder_object_header *)(t->buffer->data + *offp); 2117 | off_min = *offp + object_size; 2118 | switch (hdr->type) { 2119 | case BINDER_TYPE_BINDER: 2120 | case BINDER_TYPE_WEAK_BINDER: { 2121 | struct flat_binder_object *fp; 2122 | 2123 | fp = to_flat_binder_object(hdr); 2124 | ret = binder_translate_binder(fp, t, thread); 2125 | if (ret < 0) { 2126 | return_error = BR_FAILED_REPLY; 2127 | goto err_translate_failed; 2128 | } 2129 | } break; 2130 | case BINDER_TYPE_HANDLE: 2131 | case BINDER_TYPE_WEAK_HANDLE: { 2132 | struct flat_binder_object *fp; 2133 | 2134 | fp = to_flat_binder_object(hdr); 2135 | ret = binder_translate_handle(fp, t, thread); 2136 | if (ret < 0) { 2137 | return_error = BR_FAILED_REPLY; 2138 | goto err_translate_failed; 2139 | } 2140 | } break; 2141 | 2142 | case BINDER_TYPE_FD: { 2143 | struct binder_fd_object *fp = to_binder_fd_object(hdr); 2144 | int target_fd = binder_translate_fd(fp->fd, t, thread, 2145 | in_reply_to); 2146 | 2147 | if (target_fd < 0) { 2148 | return_error = BR_FAILED_REPLY; 2149 | goto err_translate_failed; 2150 | } 2151 | fp->pad_binder = 0; 2152 | fp->fd = target_fd; 2153 | } break; 2154 | case BINDER_TYPE_FDA: { 2155 | struct binder_fd_array_object *fda = 2156 | to_binder_fd_array_object(hdr); 2157 | struct binder_buffer_object *parent = 2158 | binder_validate_ptr(t->buffer, fda->parent, 2159 | off_start, 2160 | offp - off_start); 2161 | if (!parent) { 2162 | binder_user_error("%d:%d got transaction with invalid parent offset or type\n", 2163 | proc->pid, thread->pid); 2164 | return_error = BR_FAILED_REPLY; 2165 | goto err_bad_parent; 2166 | } 2167 | if (!binder_validate_fixup(t->buffer, off_start, 2168 | parent, fda->parent_offset, 2169 | last_fixup_obj, 2170 | last_fixup_min_off)) { 2171 | binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", 2172 | proc->pid, thread->pid); 2173 | return_error = BR_FAILED_REPLY; 2174 | goto err_bad_parent; 2175 | } 2176 | ret = binder_translate_fd_array(fda, parent, t, thread, 2177 | in_reply_to); 2178 | if (ret < 0) { 2179 | return_error = BR_FAILED_REPLY; 2180 | goto err_translate_failed; 2181 | } 2182 | last_fixup_obj = parent; 2183 | last_fixup_min_off = 2184 | fda->parent_offset + sizeof(u32) * fda->num_fds; 2185 | } break; 2186 | case BINDER_TYPE_PTR: { 2187 | struct binder_buffer_object *bp = 2188 | to_binder_buffer_object(hdr); 2189 | size_t buf_left = sg_buf_end - sg_bufp; 2190 | 2191 | if (bp->length > buf_left) { 2192 | binder_user_error("%d:%d got transaction with too large buffer\n", 2193 | proc->pid, thread->pid); 2194 | return_error = BR_FAILED_REPLY; 2195 | goto err_bad_offset; 2196 | } 2197 | if (copy_from_user(sg_bufp, 2198 | (const void __user *)(uintptr_t) 2199 | bp->buffer, bp->length)) { 2200 | binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 2201 | proc->pid, thread->pid); 2202 | return_error = BR_FAILED_REPLY; 2203 | goto err_copy_data_failed; 2204 | } 2205 | /* Fixup buffer pointer to target proc address space */ 2206 | bp->buffer = (uintptr_t)sg_bufp + 2207 | target_proc->user_buffer_offset; 2208 | sg_bufp += ALIGN(bp->length, sizeof(u64)); 2209 | 2210 | ret = binder_fixup_parent(t, thread, bp, off_start, 2211 | offp - off_start, 2212 | last_fixup_obj, 2213 | last_fixup_min_off); 2214 | if (ret < 0) { 2215 | return_error = BR_FAILED_REPLY; 2216 | goto err_translate_failed; 2217 | } 2218 | last_fixup_obj = bp; 2219 | last_fixup_min_off = 0; 2220 | } break; 2221 | default: 2222 | binder_user_error("%d:%d got transaction with invalid object type, %x\n", 2223 | proc->pid, thread->pid, hdr->type); 2224 | return_error = BR_FAILED_REPLY; 2225 | goto err_bad_object_type; 2226 | } 2227 | } 2228 | if (reply) { 2229 | BUG_ON(t->buffer->async_transaction != 0); 2230 | binder_pop_transaction(target_thread, in_reply_to); 2231 | } else if (!(t->flags & TF_ONE_WAY)) { 2232 | BUG_ON(t->buffer->async_transaction != 0); 2233 | t->need_reply = 1; 2234 | t->from_parent = thread->transaction_stack; 2235 | thread->transaction_stack = t; 2236 | } else { 2237 | BUG_ON(target_node == NULL); 2238 | BUG_ON(t->buffer->async_transaction != 1); 2239 | if (target_node->has_async_transaction) { 2240 | target_list = &target_node->async_todo; 2241 | target_wait = NULL; 2242 | } else 2243 | target_node->has_async_transaction = 1; 2244 | } 2245 | t->work.type = BINDER_WORK_TRANSACTION; 2246 | list_add_tail(&t->work.entry, target_list); 2247 | tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 2248 | list_add_tail(&tcomplete->entry, &thread->todo); 2249 | if (target_wait) { 2250 | if (reply || !(t->flags & TF_ONE_WAY)) 2251 | wake_up_interruptible_sync(target_wait); 2252 | else 2253 | wake_up_interruptible(target_wait); 2254 | } 2255 | return; 2256 | 2257 | err_translate_failed: 2258 | err_bad_object_type: 2259 | err_bad_offset: 2260 | err_bad_parent: 2261 | err_copy_data_failed: 2262 | trace_binder_transaction_failed_buffer_release(t->buffer); 2263 | binder_transaction_buffer_release(target_proc, t->buffer, offp); 2264 | t->buffer->transaction = NULL; 2265 | binder_free_buf(target_proc, t->buffer); 2266 | err_binder_alloc_buf_failed: 2267 | kfree(tcomplete); 2268 | binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2269 | err_alloc_tcomplete_failed: 2270 | kfree(t); 2271 | binder_stats_deleted(BINDER_STAT_TRANSACTION); 2272 | err_alloc_t_failed: 2273 | err_bad_call_stack: 2274 | err_empty_call_stack: 2275 | err_dead_binder: 2276 | err_invalid_target_handle: 2277 | err_no_context_mgr_node: 2278 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2279 | "%d:%d transaction failed %d, size %lld-%lld\n", 2280 | proc->pid, thread->pid, return_error, 2281 | (u64)tr->data_size, (u64)tr->offsets_size); 2282 | 2283 | { 2284 | struct binder_transaction_log_entry *fe; 2285 | 2286 | fe = binder_transaction_log_add(&binder_transaction_log_failed); 2287 | *fe = *e; 2288 | } 2289 | 2290 | BUG_ON(thread->return_error != BR_OK); 2291 | if (in_reply_to) { 2292 | thread->return_error = BR_TRANSACTION_COMPLETE; 2293 | binder_send_failed_reply(in_reply_to, return_error); 2294 | } else 2295 | thread->return_error = return_error; 2296 | } 2297 | 2298 | static int binder_thread_write(struct binder_proc *proc, 2299 | struct binder_thread *thread, 2300 | binder_uintptr_t binder_buffer, size_t size, 2301 | binder_size_t *consumed) 2302 | { 2303 | uint32_t cmd; 2304 | struct binder_context *context = proc->context; 2305 | void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 2306 | void __user *ptr = buffer + *consumed; 2307 | void __user *end = buffer + size; 2308 | 2309 | while (ptr < end && thread->return_error == BR_OK) { 2310 | if (get_user(cmd, (uint32_t __user *)ptr)) 2311 | return -EFAULT; 2312 | ptr += sizeof(uint32_t); 2313 | trace_binder_command(cmd); 2314 | if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 2315 | binder_stats.bc[_IOC_NR(cmd)]++; 2316 | proc->stats.bc[_IOC_NR(cmd)]++; 2317 | thread->stats.bc[_IOC_NR(cmd)]++; 2318 | } 2319 | switch (cmd) { 2320 | case BC_INCREFS: 2321 | case BC_ACQUIRE: 2322 | case BC_RELEASE: 2323 | case BC_DECREFS: { 2324 | uint32_t target; 2325 | struct binder_ref *ref; 2326 | const char *debug_string; 2327 | 2328 | if (get_user(target, (uint32_t __user *)ptr)) 2329 | return -EFAULT; 2330 | ptr += sizeof(uint32_t); 2331 | if (target == 0 && context->binder_context_mgr_node && 2332 | (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { 2333 | ref = binder_get_ref_for_node(proc, 2334 | context->binder_context_mgr_node); 2335 | if (ref->desc != target) { 2336 | binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", 2337 | proc->pid, thread->pid, 2338 | ref->desc); 2339 | } 2340 | } else 2341 | ref = binder_get_ref(proc, target, 2342 | cmd == BC_ACQUIRE || 2343 | cmd == BC_RELEASE); 2344 | if (ref == NULL) { 2345 | binder_user_error("%d:%d refcount change on invalid ref %d\n", 2346 | proc->pid, thread->pid, target); 2347 | break; 2348 | } 2349 | switch (cmd) { 2350 | case BC_INCREFS: 2351 | debug_string = "IncRefs"; 2352 | binder_inc_ref(ref, 0, NULL); 2353 | break; 2354 | case BC_ACQUIRE: 2355 | debug_string = "Acquire"; 2356 | binder_inc_ref(ref, 1, NULL); 2357 | break; 2358 | case BC_RELEASE: 2359 | debug_string = "Release"; 2360 | binder_dec_ref(ref, 1); 2361 | break; 2362 | case BC_DECREFS: 2363 | default: 2364 | debug_string = "DecRefs"; 2365 | binder_dec_ref(ref, 0); 2366 | break; 2367 | } 2368 | binder_debug(BINDER_DEBUG_USER_REFS, 2369 | "%d:%d %s ref %d desc %d s %d w %d for node %d\n", 2370 | proc->pid, thread->pid, debug_string, ref->debug_id, 2371 | ref->desc, ref->strong, ref->weak, ref->node->debug_id); 2372 | break; 2373 | } 2374 | case BC_INCREFS_DONE: 2375 | case BC_ACQUIRE_DONE: { 2376 | binder_uintptr_t node_ptr; 2377 | binder_uintptr_t cookie; 2378 | struct binder_node *node; 2379 | 2380 | if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 2381 | return -EFAULT; 2382 | ptr += sizeof(binder_uintptr_t); 2383 | if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2384 | return -EFAULT; 2385 | ptr += sizeof(binder_uintptr_t); 2386 | node = binder_get_node(proc, node_ptr); 2387 | if (node == NULL) { 2388 | binder_user_error("%d:%d %s u%016llx no match\n", 2389 | proc->pid, thread->pid, 2390 | cmd == BC_INCREFS_DONE ? 2391 | "BC_INCREFS_DONE" : 2392 | "BC_ACQUIRE_DONE", 2393 | (u64)node_ptr); 2394 | break; 2395 | } 2396 | if (cookie != node->cookie) { 2397 | binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 2398 | proc->pid, thread->pid, 2399 | cmd == BC_INCREFS_DONE ? 2400 | "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 2401 | (u64)node_ptr, node->debug_id, 2402 | (u64)cookie, (u64)node->cookie); 2403 | break; 2404 | } 2405 | if (cmd == BC_ACQUIRE_DONE) { 2406 | if (node->pending_strong_ref == 0) { 2407 | binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 2408 | proc->pid, thread->pid, 2409 | node->debug_id); 2410 | break; 2411 | } 2412 | node->pending_strong_ref = 0; 2413 | } else { 2414 | if (node->pending_weak_ref == 0) { 2415 | binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 2416 | proc->pid, thread->pid, 2417 | node->debug_id); 2418 | break; 2419 | } 2420 | node->pending_weak_ref = 0; 2421 | } 2422 | binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); 2423 | binder_debug(BINDER_DEBUG_USER_REFS, 2424 | "%d:%d %s node %d ls %d lw %d\n", 2425 | proc->pid, thread->pid, 2426 | cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 2427 | node->debug_id, node->local_strong_refs, node->local_weak_refs); 2428 | break; 2429 | } 2430 | case BC_ATTEMPT_ACQUIRE: 2431 | pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 2432 | return -EINVAL; 2433 | case BC_ACQUIRE_RESULT: 2434 | pr_err("BC_ACQUIRE_RESULT not supported\n"); 2435 | return -EINVAL; 2436 | 2437 | case BC_FREE_BUFFER: { 2438 | binder_uintptr_t data_ptr; 2439 | struct binder_buffer *buffer; 2440 | 2441 | if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 2442 | return -EFAULT; 2443 | ptr += sizeof(binder_uintptr_t); 2444 | 2445 | buffer = binder_buffer_lookup(proc, data_ptr); 2446 | if (buffer == NULL) { 2447 | binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", 2448 | proc->pid, thread->pid, (u64)data_ptr); 2449 | break; 2450 | } 2451 | if (!buffer->allow_user_free) { 2452 | binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", 2453 | proc->pid, thread->pid, (u64)data_ptr); 2454 | break; 2455 | } 2456 | binder_debug(BINDER_DEBUG_FREE_BUFFER, 2457 | "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 2458 | proc->pid, thread->pid, (u64)data_ptr, 2459 | buffer->debug_id, 2460 | buffer->transaction ? "active" : "finished"); 2461 | 2462 | if (buffer->transaction) { 2463 | buffer->transaction->buffer = NULL; 2464 | buffer->transaction = NULL; 2465 | } 2466 | if (buffer->async_transaction && buffer->target_node) { 2467 | BUG_ON(!buffer->target_node->has_async_transaction); 2468 | if (list_empty(&buffer->target_node->async_todo)) 2469 | buffer->target_node->has_async_transaction = 0; 2470 | else 2471 | list_move_tail(buffer->target_node->async_todo.next, &thread->todo); 2472 | } 2473 | trace_binder_transaction_buffer_release(buffer); 2474 | binder_transaction_buffer_release(proc, buffer, NULL); 2475 | binder_free_buf(proc, buffer); 2476 | break; 2477 | } 2478 | 2479 | case BC_TRANSACTION_SG: 2480 | case BC_REPLY_SG: { 2481 | struct binder_transaction_data_sg tr; 2482 | 2483 | if (copy_from_user(&tr, ptr, sizeof(tr))) 2484 | return -EFAULT; 2485 | ptr += sizeof(tr); 2486 | binder_transaction(proc, thread, &tr.transaction_data, 2487 | cmd == BC_REPLY_SG, tr.buffers_size); 2488 | break; 2489 | } 2490 | case BC_TRANSACTION: 2491 | case BC_REPLY: { 2492 | struct binder_transaction_data tr; 2493 | 2494 | if (copy_from_user(&tr, ptr, sizeof(tr))) 2495 | return -EFAULT; 2496 | ptr += sizeof(tr); 2497 | binder_transaction(proc, thread, &tr, 2498 | cmd == BC_REPLY, 0); 2499 | break; 2500 | } 2501 | 2502 | case BC_REGISTER_LOOPER: 2503 | binder_debug(BINDER_DEBUG_THREADS, 2504 | "%d:%d BC_REGISTER_LOOPER\n", 2505 | proc->pid, thread->pid); 2506 | if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 2507 | thread->looper |= BINDER_LOOPER_STATE_INVALID; 2508 | binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 2509 | proc->pid, thread->pid); 2510 | } else if (proc->requested_threads == 0) { 2511 | thread->looper |= BINDER_LOOPER_STATE_INVALID; 2512 | binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 2513 | proc->pid, thread->pid); 2514 | } else { 2515 | proc->requested_threads--; 2516 | proc->requested_threads_started++; 2517 | } 2518 | thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 2519 | break; 2520 | case BC_ENTER_LOOPER: 2521 | binder_debug(BINDER_DEBUG_THREADS, 2522 | "%d:%d BC_ENTER_LOOPER\n", 2523 | proc->pid, thread->pid); 2524 | if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 2525 | thread->looper |= BINDER_LOOPER_STATE_INVALID; 2526 | binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 2527 | proc->pid, thread->pid); 2528 | } 2529 | thread->looper |= BINDER_LOOPER_STATE_ENTERED; 2530 | break; 2531 | case BC_EXIT_LOOPER: 2532 | binder_debug(BINDER_DEBUG_THREADS, 2533 | "%d:%d BC_EXIT_LOOPER\n", 2534 | proc->pid, thread->pid); 2535 | thread->looper |= BINDER_LOOPER_STATE_EXITED; 2536 | break; 2537 | 2538 | case BC_REQUEST_DEATH_NOTIFICATION: 2539 | case BC_CLEAR_DEATH_NOTIFICATION: { 2540 | uint32_t target; 2541 | binder_uintptr_t cookie; 2542 | struct binder_ref *ref; 2543 | struct binder_ref_death *death; 2544 | 2545 | if (get_user(target, (uint32_t __user *)ptr)) 2546 | return -EFAULT; 2547 | ptr += sizeof(uint32_t); 2548 | if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2549 | return -EFAULT; 2550 | ptr += sizeof(binder_uintptr_t); 2551 | ref = binder_get_ref(proc, target, false); 2552 | if (ref == NULL) { 2553 | binder_user_error("%d:%d %s invalid ref %d\n", 2554 | proc->pid, thread->pid, 2555 | cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2556 | "BC_REQUEST_DEATH_NOTIFICATION" : 2557 | "BC_CLEAR_DEATH_NOTIFICATION", 2558 | target); 2559 | break; 2560 | } 2561 | 2562 | binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2563 | "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 2564 | proc->pid, thread->pid, 2565 | cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2566 | "BC_REQUEST_DEATH_NOTIFICATION" : 2567 | "BC_CLEAR_DEATH_NOTIFICATION", 2568 | (u64)cookie, ref->debug_id, ref->desc, 2569 | ref->strong, ref->weak, ref->node->debug_id); 2570 | 2571 | if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 2572 | if (ref->death) { 2573 | binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 2574 | proc->pid, thread->pid); 2575 | break; 2576 | } 2577 | death = kzalloc(sizeof(*death), GFP_KERNEL); 2578 | if (death == NULL) { 2579 | thread->return_error = BR_ERROR; 2580 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2581 | "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 2582 | proc->pid, thread->pid); 2583 | break; 2584 | } 2585 | binder_stats_created(BINDER_STAT_DEATH); 2586 | INIT_LIST_HEAD(&death->work.entry); 2587 | death->cookie = cookie; 2588 | ref->death = death; 2589 | if (ref->node->proc == NULL) { 2590 | ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2591 | if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2592 | list_add_tail(&ref->death->work.entry, &thread->todo); 2593 | } else { 2594 | list_add_tail(&ref->death->work.entry, &proc->todo); 2595 | wake_up_interruptible(&proc->wait); 2596 | } 2597 | } 2598 | } else { 2599 | if (ref->death == NULL) { 2600 | binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 2601 | proc->pid, thread->pid); 2602 | break; 2603 | } 2604 | death = ref->death; 2605 | if (death->cookie != cookie) { 2606 | binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 2607 | proc->pid, thread->pid, 2608 | (u64)death->cookie, 2609 | (u64)cookie); 2610 | break; 2611 | } 2612 | ref->death = NULL; 2613 | if (list_empty(&death->work.entry)) { 2614 | death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2615 | if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2616 | list_add_tail(&death->work.entry, &thread->todo); 2617 | } else { 2618 | list_add_tail(&death->work.entry, &proc->todo); 2619 | wake_up_interruptible(&proc->wait); 2620 | } 2621 | } else { 2622 | BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 2623 | death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 2624 | } 2625 | } 2626 | } break; 2627 | case BC_DEAD_BINDER_DONE: { 2628 | struct binder_work *w; 2629 | binder_uintptr_t cookie; 2630 | struct binder_ref_death *death = NULL; 2631 | 2632 | if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2633 | return -EFAULT; 2634 | 2635 | ptr += sizeof(cookie); 2636 | list_for_each_entry(w, &proc->delivered_death, entry) { 2637 | struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2638 | 2639 | if (tmp_death->cookie == cookie) { 2640 | death = tmp_death; 2641 | break; 2642 | } 2643 | } 2644 | binder_debug(BINDER_DEBUG_DEAD_BINDER, 2645 | "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", 2646 | proc->pid, thread->pid, (u64)cookie, 2647 | death); 2648 | if (death == NULL) { 2649 | binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 2650 | proc->pid, thread->pid, (u64)cookie); 2651 | break; 2652 | } 2653 | 2654 | list_del_init(&death->work.entry); 2655 | if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 2656 | death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2657 | if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2658 | list_add_tail(&death->work.entry, &thread->todo); 2659 | } else { 2660 | list_add_tail(&death->work.entry, &proc->todo); 2661 | wake_up_interruptible(&proc->wait); 2662 | } 2663 | } 2664 | } break; 2665 | 2666 | default: 2667 | pr_err("%d:%d unknown command %d\n", 2668 | proc->pid, thread->pid, cmd); 2669 | return -EINVAL; 2670 | } 2671 | *consumed = ptr - buffer; 2672 | } 2673 | return 0; 2674 | } 2675 | 2676 | static void binder_stat_br(struct binder_proc *proc, 2677 | struct binder_thread *thread, uint32_t cmd) 2678 | { 2679 | trace_binder_return(cmd); 2680 | if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 2681 | binder_stats.br[_IOC_NR(cmd)]++; 2682 | proc->stats.br[_IOC_NR(cmd)]++; 2683 | thread->stats.br[_IOC_NR(cmd)]++; 2684 | } 2685 | } 2686 | 2687 | static int binder_has_proc_work(struct binder_proc *proc, 2688 | struct binder_thread *thread) 2689 | { 2690 | return !list_empty(&proc->todo) || 2691 | (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2692 | } 2693 | 2694 | static int binder_has_thread_work(struct binder_thread *thread) 2695 | { 2696 | return !list_empty(&thread->todo) || thread->return_error != BR_OK || 2697 | (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2698 | } 2699 | 2700 | static int binder_thread_read(struct binder_proc *proc, 2701 | struct binder_thread *thread, 2702 | binder_uintptr_t binder_buffer, size_t size, 2703 | binder_size_t *consumed, int non_block) 2704 | { 2705 | void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 2706 | void __user *ptr = buffer + *consumed; 2707 | void __user *end = buffer + size; 2708 | 2709 | int ret = 0; 2710 | int wait_for_proc_work; 2711 | 2712 | if (*consumed == 0) { 2713 | if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 2714 | return -EFAULT; 2715 | ptr += sizeof(uint32_t); 2716 | } 2717 | 2718 | retry: 2719 | wait_for_proc_work = thread->transaction_stack == NULL && 2720 | list_empty(&thread->todo); 2721 | 2722 | if (thread->return_error != BR_OK && ptr < end) { 2723 | if (thread->return_error2 != BR_OK) { 2724 | if (put_user(thread->return_error2, (uint32_t __user *)ptr)) 2725 | return -EFAULT; 2726 | ptr += sizeof(uint32_t); 2727 | binder_stat_br(proc, thread, thread->return_error2); 2728 | if (ptr == end) 2729 | goto done; 2730 | thread->return_error2 = BR_OK; 2731 | } 2732 | if (put_user(thread->return_error, (uint32_t __user *)ptr)) 2733 | return -EFAULT; 2734 | ptr += sizeof(uint32_t); 2735 | binder_stat_br(proc, thread, thread->return_error); 2736 | thread->return_error = BR_OK; 2737 | goto done; 2738 | } 2739 | 2740 | 2741 | thread->looper |= BINDER_LOOPER_STATE_WAITING; 2742 | if (wait_for_proc_work) 2743 | proc->ready_threads++; 2744 | 2745 | binder_unlock(__func__); 2746 | 2747 | trace_binder_wait_for_work(wait_for_proc_work, 2748 | !!thread->transaction_stack, 2749 | !list_empty(&thread->todo)); 2750 | if (wait_for_proc_work) { 2751 | if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2752 | BINDER_LOOPER_STATE_ENTERED))) { 2753 | binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 2754 | proc->pid, thread->pid, thread->looper); 2755 | wait_event_interruptible(binder_user_error_wait, 2756 | binder_stop_on_user_error < 2); 2757 | } 2758 | binder_set_nice(proc->default_priority); 2759 | if (non_block) { 2760 | if (!binder_has_proc_work(proc, thread)) 2761 | ret = -EAGAIN; 2762 | } else 2763 | ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 2764 | } else { 2765 | if (non_block) { 2766 | if (!binder_has_thread_work(thread)) 2767 | ret = -EAGAIN; 2768 | } else 2769 | ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); 2770 | } 2771 | 2772 | binder_lock(__func__); 2773 | 2774 | if (wait_for_proc_work) 2775 | proc->ready_threads--; 2776 | thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 2777 | 2778 | if (ret) 2779 | return ret; 2780 | 2781 | while (1) { 2782 | uint32_t cmd; 2783 | struct binder_transaction_data tr; 2784 | struct binder_work *w; 2785 | struct binder_transaction *t = NULL; 2786 | 2787 | if (!list_empty(&thread->todo)) { 2788 | w = list_first_entry(&thread->todo, struct binder_work, 2789 | entry); 2790 | } else if (!list_empty(&proc->todo) && wait_for_proc_work) { 2791 | w = list_first_entry(&proc->todo, struct binder_work, 2792 | entry); 2793 | } else { 2794 | /* no data added */ 2795 | if (ptr - buffer == 4 && 2796 | !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) 2797 | goto retry; 2798 | break; 2799 | } 2800 | 2801 | if (end - ptr < sizeof(tr) + 4) 2802 | break; 2803 | 2804 | switch (w->type) { 2805 | case BINDER_WORK_TRANSACTION: { 2806 | t = container_of(w, struct binder_transaction, work); 2807 | } break; 2808 | case BINDER_WORK_TRANSACTION_COMPLETE: { 2809 | cmd = BR_TRANSACTION_COMPLETE; 2810 | if (put_user(cmd, (uint32_t __user *)ptr)) 2811 | return -EFAULT; 2812 | ptr += sizeof(uint32_t); 2813 | 2814 | binder_stat_br(proc, thread, cmd); 2815 | binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 2816 | "%d:%d BR_TRANSACTION_COMPLETE\n", 2817 | proc->pid, thread->pid); 2818 | 2819 | list_del(&w->entry); 2820 | kfree(w); 2821 | binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2822 | } break; 2823 | case BINDER_WORK_NODE: { 2824 | struct binder_node *node = container_of(w, struct binder_node, work); 2825 | uint32_t cmd = BR_NOOP; 2826 | const char *cmd_name; 2827 | int strong = node->internal_strong_refs || node->local_strong_refs; 2828 | int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; 2829 | 2830 | if (weak && !node->has_weak_ref) { 2831 | cmd = BR_INCREFS; 2832 | cmd_name = "BR_INCREFS"; 2833 | node->has_weak_ref = 1; 2834 | node->pending_weak_ref = 1; 2835 | node->local_weak_refs++; 2836 | } else if (strong && !node->has_strong_ref) { 2837 | cmd = BR_ACQUIRE; 2838 | cmd_name = "BR_ACQUIRE"; 2839 | node->has_strong_ref = 1; 2840 | node->pending_strong_ref = 1; 2841 | node->local_strong_refs++; 2842 | } else if (!strong && node->has_strong_ref) { 2843 | cmd = BR_RELEASE; 2844 | cmd_name = "BR_RELEASE"; 2845 | node->has_strong_ref = 0; 2846 | } else if (!weak && node->has_weak_ref) { 2847 | cmd = BR_DECREFS; 2848 | cmd_name = "BR_DECREFS"; 2849 | node->has_weak_ref = 0; 2850 | } 2851 | if (cmd != BR_NOOP) { 2852 | if (put_user(cmd, (uint32_t __user *)ptr)) 2853 | return -EFAULT; 2854 | ptr += sizeof(uint32_t); 2855 | if (put_user(node->ptr, 2856 | (binder_uintptr_t __user *)ptr)) 2857 | return -EFAULT; 2858 | ptr += sizeof(binder_uintptr_t); 2859 | if (put_user(node->cookie, 2860 | (binder_uintptr_t __user *)ptr)) 2861 | return -EFAULT; 2862 | ptr += sizeof(binder_uintptr_t); 2863 | 2864 | binder_stat_br(proc, thread, cmd); 2865 | binder_debug(BINDER_DEBUG_USER_REFS, 2866 | "%d:%d %s %d u%016llx c%016llx\n", 2867 | proc->pid, thread->pid, cmd_name, 2868 | node->debug_id, 2869 | (u64)node->ptr, (u64)node->cookie); 2870 | } else { 2871 | list_del_init(&w->entry); 2872 | if (!weak && !strong) { 2873 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2874 | "%d:%d node %d u%016llx c%016llx deleted\n", 2875 | proc->pid, thread->pid, 2876 | node->debug_id, 2877 | (u64)node->ptr, 2878 | (u64)node->cookie); 2879 | rb_erase(&node->rb_node, &proc->nodes); 2880 | kfree(node); 2881 | binder_stats_deleted(BINDER_STAT_NODE); 2882 | } else { 2883 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2884 | "%d:%d node %d u%016llx c%016llx state unchanged\n", 2885 | proc->pid, thread->pid, 2886 | node->debug_id, 2887 | (u64)node->ptr, 2888 | (u64)node->cookie); 2889 | } 2890 | } 2891 | } break; 2892 | case BINDER_WORK_DEAD_BINDER: 2893 | case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2894 | case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2895 | struct binder_ref_death *death; 2896 | uint32_t cmd; 2897 | 2898 | death = container_of(w, struct binder_ref_death, work); 2899 | if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 2900 | cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 2901 | else 2902 | cmd = BR_DEAD_BINDER; 2903 | if (put_user(cmd, (uint32_t __user *)ptr)) 2904 | return -EFAULT; 2905 | ptr += sizeof(uint32_t); 2906 | if (put_user(death->cookie, 2907 | (binder_uintptr_t __user *)ptr)) 2908 | return -EFAULT; 2909 | ptr += sizeof(binder_uintptr_t); 2910 | binder_stat_br(proc, thread, cmd); 2911 | binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2912 | "%d:%d %s %016llx\n", 2913 | proc->pid, thread->pid, 2914 | cmd == BR_DEAD_BINDER ? 2915 | "BR_DEAD_BINDER" : 2916 | "BR_CLEAR_DEATH_NOTIFICATION_DONE", 2917 | (u64)death->cookie); 2918 | 2919 | if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 2920 | list_del(&w->entry); 2921 | kfree(death); 2922 | binder_stats_deleted(BINDER_STAT_DEATH); 2923 | } else 2924 | list_move(&w->entry, &proc->delivered_death); 2925 | if (cmd == BR_DEAD_BINDER) 2926 | goto done; /* DEAD_BINDER notifications can cause transactions */ 2927 | } break; 2928 | } 2929 | 2930 | if (!t) 2931 | continue; 2932 | 2933 | BUG_ON(t->buffer == NULL); 2934 | if (t->buffer->target_node) { 2935 | struct binder_node *target_node = t->buffer->target_node; 2936 | 2937 | tr.target.ptr = target_node->ptr; 2938 | tr.cookie = target_node->cookie; 2939 | t->saved_priority = task_nice(current); 2940 | if (t->priority < target_node->min_priority && 2941 | !(t->flags & TF_ONE_WAY)) 2942 | binder_set_nice(t->priority); 2943 | else if (!(t->flags & TF_ONE_WAY) || 2944 | t->saved_priority > target_node->min_priority) 2945 | binder_set_nice(target_node->min_priority); 2946 | cmd = BR_TRANSACTION; 2947 | } else { 2948 | tr.target.ptr = 0; 2949 | tr.cookie = 0; 2950 | cmd = BR_REPLY; 2951 | } 2952 | tr.code = t->code; 2953 | tr.flags = t->flags; 2954 | tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 2955 | 2956 | if (t->from) { 2957 | struct task_struct *sender = t->from->proc->tsk; 2958 | 2959 | tr.sender_pid = task_tgid_nr_ns(sender, 2960 | task_active_pid_ns(current)); 2961 | } else { 2962 | tr.sender_pid = 0; 2963 | } 2964 | 2965 | tr.data_size = t->buffer->data_size; 2966 | tr.offsets_size = t->buffer->offsets_size; 2967 | tr.data.ptr.buffer = (binder_uintptr_t)( 2968 | (uintptr_t)t->buffer->data + 2969 | proc->user_buffer_offset); 2970 | tr.data.ptr.offsets = tr.data.ptr.buffer + 2971 | ALIGN(t->buffer->data_size, 2972 | sizeof(void *)); 2973 | 2974 | if (put_user(cmd, (uint32_t __user *)ptr)) 2975 | return -EFAULT; 2976 | ptr += sizeof(uint32_t); 2977 | if (copy_to_user(ptr, &tr, sizeof(tr))) 2978 | return -EFAULT; 2979 | ptr += sizeof(tr); 2980 | 2981 | trace_binder_transaction_received(t); 2982 | binder_stat_br(proc, thread, cmd); 2983 | binder_debug(BINDER_DEBUG_TRANSACTION, 2984 | "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 2985 | proc->pid, thread->pid, 2986 | (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 2987 | "BR_REPLY", 2988 | t->debug_id, t->from ? t->from->proc->pid : 0, 2989 | t->from ? t->from->pid : 0, cmd, 2990 | t->buffer->data_size, t->buffer->offsets_size, 2991 | (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); 2992 | 2993 | list_del(&t->work.entry); 2994 | t->buffer->allow_user_free = 1; 2995 | if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 2996 | t->to_parent = thread->transaction_stack; 2997 | t->to_thread = thread; 2998 | thread->transaction_stack = t; 2999 | } else { 3000 | t->buffer->transaction = NULL; 3001 | kfree(t); 3002 | binder_stats_deleted(BINDER_STAT_TRANSACTION); 3003 | } 3004 | break; 3005 | } 3006 | 3007 | done: 3008 | 3009 | *consumed = ptr - buffer; 3010 | if (proc->requested_threads + proc->ready_threads == 0 && 3011 | proc->requested_threads_started < proc->max_threads && 3012 | (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 3013 | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 3014 | /*spawn a new thread if we leave this out */) { 3015 | proc->requested_threads++; 3016 | binder_debug(BINDER_DEBUG_THREADS, 3017 | "%d:%d BR_SPAWN_LOOPER\n", 3018 | proc->pid, thread->pid); 3019 | if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 3020 | return -EFAULT; 3021 | binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 3022 | } 3023 | return 0; 3024 | } 3025 | 3026 | static void binder_release_work(struct list_head *list) 3027 | { 3028 | struct binder_work *w; 3029 | 3030 | while (!list_empty(list)) { 3031 | w = list_first_entry(list, struct binder_work, entry); 3032 | list_del_init(&w->entry); 3033 | switch (w->type) { 3034 | case BINDER_WORK_TRANSACTION: { 3035 | struct binder_transaction *t; 3036 | 3037 | t = container_of(w, struct binder_transaction, work); 3038 | if (t->buffer->target_node && 3039 | !(t->flags & TF_ONE_WAY)) { 3040 | binder_send_failed_reply(t, BR_DEAD_REPLY); 3041 | } else { 3042 | binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 3043 | "undelivered transaction %d\n", 3044 | t->debug_id); 3045 | t->buffer->transaction = NULL; 3046 | kfree(t); 3047 | binder_stats_deleted(BINDER_STAT_TRANSACTION); 3048 | } 3049 | } break; 3050 | case BINDER_WORK_TRANSACTION_COMPLETE: { 3051 | binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 3052 | "undelivered TRANSACTION_COMPLETE\n"); 3053 | kfree(w); 3054 | binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 3055 | } break; 3056 | case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3057 | case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 3058 | struct binder_ref_death *death; 3059 | 3060 | death = container_of(w, struct binder_ref_death, work); 3061 | binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 3062 | "undelivered death notification, %016llx\n", 3063 | (u64)death->cookie); 3064 | kfree(death); 3065 | binder_stats_deleted(BINDER_STAT_DEATH); 3066 | } break; 3067 | default: 3068 | pr_err("unexpected work type, %d, not freed\n", 3069 | w->type); 3070 | break; 3071 | } 3072 | } 3073 | 3074 | } 3075 | 3076 | static struct binder_thread *binder_get_thread(struct binder_proc *proc) 3077 | { 3078 | struct binder_thread *thread = NULL; 3079 | struct rb_node *parent = NULL; 3080 | struct rb_node **p = &proc->threads.rb_node; 3081 | 3082 | while (*p) { 3083 | parent = *p; 3084 | thread = rb_entry(parent, struct binder_thread, rb_node); 3085 | 3086 | if (current->pid < thread->pid) 3087 | p = &(*p)->rb_left; 3088 | else if (current->pid > thread->pid) 3089 | p = &(*p)->rb_right; 3090 | else 3091 | break; 3092 | } 3093 | if (*p == NULL) { 3094 | thread = kzalloc(sizeof(*thread), GFP_KERNEL); 3095 | if (thread == NULL) 3096 | return NULL; 3097 | binder_stats_created(BINDER_STAT_THREAD); 3098 | thread->proc = proc; 3099 | thread->pid = current->pid; 3100 | init_waitqueue_head(&thread->wait); 3101 | INIT_LIST_HEAD(&thread->todo); 3102 | rb_link_node(&thread->rb_node, parent, p); 3103 | rb_insert_color(&thread->rb_node, &proc->threads); 3104 | thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 3105 | thread->return_error = BR_OK; 3106 | thread->return_error2 = BR_OK; 3107 | } 3108 | return thread; 3109 | } 3110 | 3111 | static int binder_free_thread(struct binder_proc *proc, 3112 | struct binder_thread *thread) 3113 | { 3114 | struct binder_transaction *t; 3115 | struct binder_transaction *send_reply = NULL; 3116 | int active_transactions = 0; 3117 | 3118 | rb_erase(&thread->rb_node, &proc->threads); 3119 | t = thread->transaction_stack; 3120 | if (t && t->to_thread == thread) 3121 | send_reply = t; 3122 | while (t) { 3123 | active_transactions++; 3124 | binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 3125 | "release %d:%d transaction %d %s, still active\n", 3126 | proc->pid, thread->pid, 3127 | t->debug_id, 3128 | (t->to_thread == thread) ? "in" : "out"); 3129 | 3130 | if (t->to_thread == thread) { 3131 | t->to_proc = NULL; 3132 | t->to_thread = NULL; 3133 | if (t->buffer) { 3134 | t->buffer->transaction = NULL; 3135 | t->buffer = NULL; 3136 | } 3137 | t = t->to_parent; 3138 | } else if (t->from == thread) { 3139 | t->from = NULL; 3140 | t = t->from_parent; 3141 | } else 3142 | BUG(); 3143 | } 3144 | if (send_reply) 3145 | binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 3146 | binder_release_work(&thread->todo); 3147 | kfree(thread); 3148 | binder_stats_deleted(BINDER_STAT_THREAD); 3149 | return active_transactions; 3150 | } 3151 | 3152 | static unsigned int binder_poll(struct file *filp, 3153 | struct poll_table_struct *wait) 3154 | { 3155 | struct binder_proc *proc = filp->private_data; 3156 | struct binder_thread *thread = NULL; 3157 | int wait_for_proc_work; 3158 | 3159 | binder_lock(__func__); 3160 | 3161 | thread = binder_get_thread(proc); 3162 | 3163 | wait_for_proc_work = thread->transaction_stack == NULL && 3164 | list_empty(&thread->todo) && thread->return_error == BR_OK; 3165 | 3166 | binder_unlock(__func__); 3167 | 3168 | if (wait_for_proc_work) { 3169 | if (binder_has_proc_work(proc, thread)) 3170 | return POLLIN; 3171 | poll_wait(filp, &proc->wait, wait); 3172 | if (binder_has_proc_work(proc, thread)) 3173 | return POLLIN; 3174 | } else { 3175 | if (binder_has_thread_work(thread)) 3176 | return POLLIN; 3177 | poll_wait(filp, &thread->wait, wait); 3178 | if (binder_has_thread_work(thread)) 3179 | return POLLIN; 3180 | } 3181 | return 0; 3182 | } 3183 | 3184 | static int binder_ioctl_write_read(struct file *filp, 3185 | unsigned int cmd, unsigned long arg, 3186 | struct binder_thread *thread) 3187 | { 3188 | int ret = 0; 3189 | struct binder_proc *proc = filp->private_data; 3190 | unsigned int size = _IOC_SIZE(cmd); 3191 | void __user *ubuf = (void __user *)arg; 3192 | struct binder_write_read bwr; 3193 | 3194 | if (size != sizeof(struct binder_write_read)) { 3195 | ret = -EINVAL; 3196 | goto out; 3197 | } 3198 | if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 3199 | ret = -EFAULT; 3200 | goto out; 3201 | } 3202 | binder_debug(BINDER_DEBUG_READ_WRITE, 3203 | "%d:%d write %lld at %016llx, read %lld at %016llx\n", 3204 | proc->pid, thread->pid, 3205 | (u64)bwr.write_size, (u64)bwr.write_buffer, 3206 | (u64)bwr.read_size, (u64)bwr.read_buffer); 3207 | 3208 | if (bwr.write_size > 0) { 3209 | ret = binder_thread_write(proc, thread, 3210 | bwr.write_buffer, 3211 | bwr.write_size, 3212 | &bwr.write_consumed); 3213 | trace_binder_write_done(ret); 3214 | if (ret < 0) { 3215 | bwr.read_consumed = 0; 3216 | if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 3217 | ret = -EFAULT; 3218 | goto out; 3219 | } 3220 | } 3221 | if (bwr.read_size > 0) { 3222 | ret = binder_thread_read(proc, thread, bwr.read_buffer, 3223 | bwr.read_size, 3224 | &bwr.read_consumed, 3225 | filp->f_flags & O_NONBLOCK); 3226 | trace_binder_read_done(ret); 3227 | if (!list_empty(&proc->todo)) 3228 | wake_up_interruptible(&proc->wait); 3229 | if (ret < 0) { 3230 | if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 3231 | ret = -EFAULT; 3232 | goto out; 3233 | } 3234 | } 3235 | binder_debug(BINDER_DEBUG_READ_WRITE, 3236 | "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 3237 | proc->pid, thread->pid, 3238 | (u64)bwr.write_consumed, (u64)bwr.write_size, 3239 | (u64)bwr.read_consumed, (u64)bwr.read_size); 3240 | if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 3241 | ret = -EFAULT; 3242 | goto out; 3243 | } 3244 | out: 3245 | return ret; 3246 | } 3247 | 3248 | static int binder_ioctl_set_ctx_mgr(struct file *filp) 3249 | { 3250 | int ret = 0; 3251 | struct binder_proc *proc = filp->private_data; 3252 | struct binder_context *context = proc->context; 3253 | 3254 | kuid_t curr_euid = current_euid(); 3255 | 3256 | if (context->binder_context_mgr_node) { 3257 | pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 3258 | ret = -EBUSY; 3259 | goto out; 3260 | } 3261 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) 3262 | ret = security_binder_set_context_mgr(proc->tsk); 3263 | if (ret < 0) 3264 | goto out; 3265 | #endif 3266 | if (uid_valid(context->binder_context_mgr_uid)) { 3267 | if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { 3268 | pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 3269 | from_kuid(&init_user_ns, curr_euid), 3270 | from_kuid(&init_user_ns, 3271 | context->binder_context_mgr_uid)); 3272 | ret = -EPERM; 3273 | goto out; 3274 | } 3275 | } else { 3276 | context->binder_context_mgr_uid = curr_euid; 3277 | } 3278 | context->binder_context_mgr_node = binder_new_node(proc, 0, 0); 3279 | if (!context->binder_context_mgr_node) { 3280 | ret = -ENOMEM; 3281 | goto out; 3282 | } 3283 | context->binder_context_mgr_node->local_weak_refs++; 3284 | context->binder_context_mgr_node->local_strong_refs++; 3285 | context->binder_context_mgr_node->has_strong_ref = 1; 3286 | context->binder_context_mgr_node->has_weak_ref = 1; 3287 | out: 3288 | return ret; 3289 | } 3290 | 3291 | static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 3292 | { 3293 | int ret; 3294 | struct binder_proc *proc = filp->private_data; 3295 | struct binder_thread *thread; 3296 | unsigned int size = _IOC_SIZE(cmd); 3297 | void __user *ubuf = (void __user *)arg; 3298 | 3299 | /*pr_info("binder_ioctl: %d:%d %x %lx\n", 3300 | proc->pid, current->pid, cmd, arg);*/ 3301 | 3302 | trace_binder_ioctl(cmd, arg); 3303 | 3304 | ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 3305 | if (ret) 3306 | goto err_unlocked; 3307 | 3308 | binder_lock(__func__); 3309 | thread = binder_get_thread(proc); 3310 | if (thread == NULL) { 3311 | ret = -ENOMEM; 3312 | goto err; 3313 | } 3314 | 3315 | switch (cmd) { 3316 | case BINDER_WRITE_READ: 3317 | ret = binder_ioctl_write_read(filp, cmd, arg, thread); 3318 | if (ret) 3319 | goto err; 3320 | break; 3321 | case BINDER_SET_MAX_THREADS: 3322 | if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { 3323 | ret = -EINVAL; 3324 | goto err; 3325 | } 3326 | break; 3327 | case BINDER_SET_CONTEXT_MGR: 3328 | ret = binder_ioctl_set_ctx_mgr(filp); 3329 | if (ret) 3330 | goto err; 3331 | break; 3332 | case BINDER_THREAD_EXIT: 3333 | binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 3334 | proc->pid, thread->pid); 3335 | binder_free_thread(proc, thread); 3336 | thread = NULL; 3337 | break; 3338 | case BINDER_VERSION: { 3339 | struct binder_version __user *ver = ubuf; 3340 | 3341 | if (size != sizeof(struct binder_version)) { 3342 | ret = -EINVAL; 3343 | goto err; 3344 | } 3345 | if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 3346 | &ver->protocol_version)) { 3347 | ret = -EINVAL; 3348 | goto err; 3349 | } 3350 | break; 3351 | } 3352 | default: 3353 | ret = -EINVAL; 3354 | goto err; 3355 | } 3356 | ret = 0; 3357 | err: 3358 | if (thread) 3359 | thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; 3360 | binder_unlock(__func__); 3361 | wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 3362 | if (ret && ret != -ERESTARTSYS) 3363 | pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 3364 | err_unlocked: 3365 | trace_binder_ioctl_done(ret); 3366 | return ret; 3367 | } 3368 | 3369 | static void binder_vma_open(struct vm_area_struct *vma) 3370 | { 3371 | struct binder_proc *proc = vma->vm_private_data; 3372 | 3373 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3374 | "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 3375 | proc->pid, vma->vm_start, vma->vm_end, 3376 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 3377 | (unsigned long)pgprot_val(vma->vm_page_prot)); 3378 | } 3379 | 3380 | static void binder_vma_close(struct vm_area_struct *vma) 3381 | { 3382 | struct binder_proc *proc = vma->vm_private_data; 3383 | 3384 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3385 | "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 3386 | proc->pid, vma->vm_start, vma->vm_end, 3387 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 3388 | (unsigned long)pgprot_val(vma->vm_page_prot)); 3389 | proc->vma = NULL; 3390 | proc->vma_vm_mm = NULL; 3391 | binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 3392 | } 3393 | 3394 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) 3395 | static vm_fault_t binder_vm_fault(struct vm_fault *vmf) 3396 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 3397 | static int binder_vm_fault(struct vm_fault *vmf) 3398 | #else 3399 | static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 3400 | #endif 3401 | { 3402 | return VM_FAULT_SIGBUS; 3403 | } 3404 | 3405 | static const struct vm_operations_struct binder_vm_ops = { 3406 | .open = binder_vma_open, 3407 | .close = binder_vma_close, 3408 | .fault = binder_vm_fault, 3409 | }; 3410 | 3411 | static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 3412 | { 3413 | int ret; 3414 | struct vm_struct *area; 3415 | struct binder_proc *proc = filp->private_data; 3416 | const char *failure_string; 3417 | struct binder_buffer *buffer; 3418 | 3419 | if (proc->tsk != current) 3420 | return -EINVAL; 3421 | 3422 | if ((vma->vm_end - vma->vm_start) > SZ_4M) 3423 | vma->vm_end = vma->vm_start + SZ_4M; 3424 | 3425 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3426 | "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 3427 | proc->pid, vma->vm_start, vma->vm_end, 3428 | (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 3429 | (unsigned long)pgprot_val(vma->vm_page_prot)); 3430 | 3431 | if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 3432 | ret = -EPERM; 3433 | failure_string = "bad vm_flags"; 3434 | goto err_bad_arg; 3435 | } 3436 | vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 3437 | 3438 | mutex_lock(&binder_mmap_lock); 3439 | if (proc->buffer) { 3440 | ret = -EBUSY; 3441 | failure_string = "already mapped"; 3442 | goto err_already_mapped; 3443 | } 3444 | 3445 | area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); 3446 | if (area == NULL) { 3447 | ret = -ENOMEM; 3448 | failure_string = "get_vm_area"; 3449 | goto err_get_vm_area_failed; 3450 | } 3451 | proc->buffer = area->addr; 3452 | proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; 3453 | mutex_unlock(&binder_mmap_lock); 3454 | 3455 | #ifdef CONFIG_CPU_CACHE_VIPT 3456 | if (cache_is_vipt_aliasing()) { 3457 | while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { 3458 | pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); 3459 | vma->vm_start += PAGE_SIZE; 3460 | } 3461 | } 3462 | #endif 3463 | proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); 3464 | if (proc->pages == NULL) { 3465 | ret = -ENOMEM; 3466 | failure_string = "alloc page array"; 3467 | goto err_alloc_pages_failed; 3468 | } 3469 | proc->buffer_size = vma->vm_end - vma->vm_start; 3470 | 3471 | vma->vm_ops = &binder_vm_ops; 3472 | vma->vm_private_data = proc; 3473 | 3474 | if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { 3475 | ret = -ENOMEM; 3476 | failure_string = "alloc small buf"; 3477 | goto err_alloc_small_buf_failed; 3478 | } 3479 | buffer = proc->buffer; 3480 | INIT_LIST_HEAD(&proc->buffers); 3481 | list_add(&buffer->entry, &proc->buffers); 3482 | buffer->free = 1; 3483 | binder_insert_free_buffer(proc, buffer); 3484 | proc->free_async_space = proc->buffer_size / 2; 3485 | barrier(); 3486 | proc->files = get_files_struct(current); 3487 | proc->vma = vma; 3488 | proc->vma_vm_mm = vma->vm_mm; 3489 | 3490 | /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", 3491 | proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ 3492 | return 0; 3493 | 3494 | err_alloc_small_buf_failed: 3495 | kfree(proc->pages); 3496 | proc->pages = NULL; 3497 | err_alloc_pages_failed: 3498 | mutex_lock(&binder_mmap_lock); 3499 | vfree(proc->buffer); 3500 | proc->buffer = NULL; 3501 | err_get_vm_area_failed: 3502 | err_already_mapped: 3503 | mutex_unlock(&binder_mmap_lock); 3504 | err_bad_arg: 3505 | pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", 3506 | proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 3507 | return ret; 3508 | } 3509 | 3510 | static int binder_open(struct inode *nodp, struct file *filp) 3511 | { 3512 | int minor = iminor(nodp); 3513 | struct hlist_node *tmp; 3514 | struct binder_proc *proc; 3515 | struct binder_device *binder_dev; 3516 | 3517 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", 3518 | current->group_leader->pid, current->pid); 3519 | 3520 | proc = kzalloc(sizeof(*proc), GFP_KERNEL); 3521 | if (proc == NULL) 3522 | return -ENOMEM; 3523 | get_task_struct(current->group_leader); 3524 | proc->tsk = current->group_leader; 3525 | INIT_LIST_HEAD(&proc->todo); 3526 | init_waitqueue_head(&proc->wait); 3527 | proc->default_priority = task_nice(current); 3528 | 3529 | mutex_lock(&binder_devices_mtx); 3530 | hlist_for_each_entry_safe(binder_dev, tmp, &binder_devices, hlist) { 3531 | if (MINOR(binder_dev->cdev.dev) == minor) 3532 | break; 3533 | binder_dev = NULL; 3534 | } 3535 | mutex_unlock(&binder_devices_mtx); 3536 | if (!binder_dev) 3537 | BUG(); 3538 | 3539 | filp->private_data = &binder_dev->class_dev; 3540 | proc->context = &binder_dev->context; 3541 | 3542 | binder_lock(__func__); 3543 | 3544 | binder_stats_created(BINDER_STAT_PROC); 3545 | hlist_add_head(&proc->proc_node, &binder_procs); 3546 | proc->pid = current->group_leader->pid; 3547 | INIT_LIST_HEAD(&proc->delivered_death); 3548 | filp->private_data = proc; 3549 | 3550 | binder_unlock(__func__); 3551 | 3552 | if (binder_debugfs_dir_entry_proc) { 3553 | char strbuf[11]; 3554 | 3555 | snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 3556 | /* 3557 | * proc debug entries are shared between contexts, so 3558 | * this will fail if the process tries to open the driver 3559 | * again with a different context. The priting code will 3560 | * anyway print all contexts that a given PID has, so this 3561 | * is not a problem. 3562 | */ 3563 | proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, 3564 | binder_debugfs_dir_entry_proc, 3565 | (void *)(unsigned long)proc->pid, 3566 | &binder_proc_fops); 3567 | } 3568 | 3569 | return 0; 3570 | } 3571 | 3572 | static int binder_flush(struct file *filp, fl_owner_t id) 3573 | { 3574 | struct binder_proc *proc = filp->private_data; 3575 | 3576 | binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 3577 | 3578 | return 0; 3579 | } 3580 | 3581 | static void binder_deferred_flush(struct binder_proc *proc) 3582 | { 3583 | struct rb_node *n; 3584 | int wake_count = 0; 3585 | 3586 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 3587 | struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 3588 | 3589 | thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 3590 | if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 3591 | wake_up_interruptible(&thread->wait); 3592 | wake_count++; 3593 | } 3594 | } 3595 | wake_up_interruptible_all(&proc->wait); 3596 | 3597 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3598 | "binder_flush: %d woke %d threads\n", proc->pid, 3599 | wake_count); 3600 | } 3601 | 3602 | static int binder_release(struct inode *nodp, struct file *filp) 3603 | { 3604 | struct binder_proc *proc = filp->private_data; 3605 | 3606 | debugfs_remove(proc->debugfs_entry); 3607 | binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 3608 | 3609 | return 0; 3610 | } 3611 | 3612 | static int binder_node_release(struct binder_node *node, int refs) 3613 | { 3614 | struct binder_ref *ref; 3615 | int death = 0; 3616 | 3617 | list_del_init(&node->work.entry); 3618 | binder_release_work(&node->async_todo); 3619 | 3620 | if (hlist_empty(&node->refs)) { 3621 | kfree(node); 3622 | binder_stats_deleted(BINDER_STAT_NODE); 3623 | 3624 | return refs; 3625 | } 3626 | 3627 | node->proc = NULL; 3628 | node->local_strong_refs = 0; 3629 | node->local_weak_refs = 0; 3630 | hlist_add_head(&node->dead_node, &binder_dead_nodes); 3631 | 3632 | hlist_for_each_entry(ref, &node->refs, node_entry) { 3633 | refs++; 3634 | 3635 | if (!ref->death) 3636 | continue; 3637 | 3638 | death++; 3639 | 3640 | if (list_empty(&ref->death->work.entry)) { 3641 | ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3642 | list_add_tail(&ref->death->work.entry, 3643 | &ref->proc->todo); 3644 | wake_up_interruptible(&ref->proc->wait); 3645 | } else 3646 | BUG(); 3647 | } 3648 | 3649 | binder_debug(BINDER_DEBUG_DEAD_BINDER, 3650 | "node %d now dead, refs %d, death %d\n", 3651 | node->debug_id, refs, death); 3652 | 3653 | return refs; 3654 | } 3655 | 3656 | static void binder_deferred_release(struct binder_proc *proc) 3657 | { 3658 | struct binder_transaction *t; 3659 | struct binder_context *context = proc->context; 3660 | struct rb_node *n; 3661 | int threads, nodes, incoming_refs, outgoing_refs, buffers, 3662 | active_transactions, page_count; 3663 | 3664 | BUG_ON(proc->vma); 3665 | BUG_ON(proc->files); 3666 | 3667 | hlist_del(&proc->proc_node); 3668 | 3669 | if (context->binder_context_mgr_node && 3670 | context->binder_context_mgr_node->proc == proc) { 3671 | binder_debug(BINDER_DEBUG_DEAD_BINDER, 3672 | "%s: %d context_mgr_node gone\n", 3673 | __func__, proc->pid); 3674 | context->binder_context_mgr_node = NULL; 3675 | } 3676 | 3677 | threads = 0; 3678 | active_transactions = 0; 3679 | while ((n = rb_first(&proc->threads))) { 3680 | struct binder_thread *thread; 3681 | 3682 | thread = rb_entry(n, struct binder_thread, rb_node); 3683 | threads++; 3684 | active_transactions += binder_free_thread(proc, thread); 3685 | } 3686 | 3687 | nodes = 0; 3688 | incoming_refs = 0; 3689 | while ((n = rb_first(&proc->nodes))) { 3690 | struct binder_node *node; 3691 | 3692 | node = rb_entry(n, struct binder_node, rb_node); 3693 | nodes++; 3694 | rb_erase(&node->rb_node, &proc->nodes); 3695 | incoming_refs = binder_node_release(node, incoming_refs); 3696 | } 3697 | 3698 | outgoing_refs = 0; 3699 | while ((n = rb_first(&proc->refs_by_desc))) { 3700 | struct binder_ref *ref; 3701 | 3702 | ref = rb_entry(n, struct binder_ref, rb_node_desc); 3703 | outgoing_refs++; 3704 | binder_delete_ref(ref); 3705 | } 3706 | 3707 | binder_release_work(&proc->todo); 3708 | binder_release_work(&proc->delivered_death); 3709 | 3710 | buffers = 0; 3711 | while ((n = rb_first(&proc->allocated_buffers))) { 3712 | struct binder_buffer *buffer; 3713 | 3714 | buffer = rb_entry(n, struct binder_buffer, rb_node); 3715 | 3716 | t = buffer->transaction; 3717 | if (t) { 3718 | t->buffer = NULL; 3719 | buffer->transaction = NULL; 3720 | pr_err("release proc %d, transaction %d, not freed\n", 3721 | proc->pid, t->debug_id); 3722 | /*BUG();*/ 3723 | } 3724 | 3725 | binder_free_buf(proc, buffer); 3726 | buffers++; 3727 | } 3728 | 3729 | binder_stats_deleted(BINDER_STAT_PROC); 3730 | 3731 | page_count = 0; 3732 | if (proc->pages) { 3733 | int i; 3734 | 3735 | for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { 3736 | void *page_addr; 3737 | 3738 | if (!proc->pages[i]) 3739 | continue; 3740 | 3741 | page_addr = proc->buffer + i * PAGE_SIZE; 3742 | binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 3743 | "%s: %d: page %d at %p not freed\n", 3744 | __func__, proc->pid, i, page_addr); 3745 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 3746 | __free_page(proc->pages[i]); 3747 | page_count++; 3748 | } 3749 | kfree(proc->pages); 3750 | vfree(proc->buffer); 3751 | } 3752 | 3753 | put_task_struct(proc->tsk); 3754 | 3755 | binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3756 | "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", 3757 | __func__, proc->pid, threads, nodes, incoming_refs, 3758 | outgoing_refs, active_transactions, buffers, page_count); 3759 | 3760 | kfree(proc); 3761 | } 3762 | 3763 | static void binder_deferred_func(struct work_struct *work) 3764 | { 3765 | struct binder_proc *proc; 3766 | struct files_struct *files; 3767 | 3768 | int defer; 3769 | 3770 | do { 3771 | binder_lock(__func__); 3772 | mutex_lock(&binder_deferred_lock); 3773 | if (!hlist_empty(&binder_deferred_list)) { 3774 | proc = hlist_entry(binder_deferred_list.first, 3775 | struct binder_proc, deferred_work_node); 3776 | hlist_del_init(&proc->deferred_work_node); 3777 | defer = proc->deferred_work; 3778 | proc->deferred_work = 0; 3779 | } else { 3780 | proc = NULL; 3781 | defer = 0; 3782 | } 3783 | mutex_unlock(&binder_deferred_lock); 3784 | 3785 | files = NULL; 3786 | if (defer & BINDER_DEFERRED_PUT_FILES) { 3787 | files = proc->files; 3788 | if (files) 3789 | proc->files = NULL; 3790 | } 3791 | 3792 | if (defer & BINDER_DEFERRED_FLUSH) 3793 | binder_deferred_flush(proc); 3794 | 3795 | if (defer & BINDER_DEFERRED_RELEASE) 3796 | binder_deferred_release(proc); /* frees proc */ 3797 | 3798 | binder_unlock(__func__); 3799 | if (files) 3800 | put_files_struct(files); 3801 | } while (proc); 3802 | } 3803 | static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 3804 | 3805 | static void 3806 | binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 3807 | { 3808 | mutex_lock(&binder_deferred_lock); 3809 | proc->deferred_work |= defer; 3810 | if (hlist_unhashed(&proc->deferred_work_node)) { 3811 | hlist_add_head(&proc->deferred_work_node, 3812 | &binder_deferred_list); 3813 | schedule_work(&binder_deferred_work); 3814 | } 3815 | mutex_unlock(&binder_deferred_lock); 3816 | } 3817 | 3818 | static void print_binder_transaction(struct seq_file *m, const char *prefix, 3819 | struct binder_transaction *t) 3820 | { 3821 | seq_printf(m, 3822 | "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 3823 | prefix, t->debug_id, t, 3824 | t->from ? t->from->proc->pid : 0, 3825 | t->from ? t->from->pid : 0, 3826 | t->to_proc ? t->to_proc->pid : 0, 3827 | t->to_thread ? t->to_thread->pid : 0, 3828 | t->code, t->flags, t->priority, t->need_reply); 3829 | if (t->buffer == NULL) { 3830 | seq_puts(m, " buffer free\n"); 3831 | return; 3832 | } 3833 | if (t->buffer->target_node) 3834 | seq_printf(m, " node %d", 3835 | t->buffer->target_node->debug_id); 3836 | seq_printf(m, " size %zd:%zd data %p\n", 3837 | t->buffer->data_size, t->buffer->offsets_size, 3838 | t->buffer->data); 3839 | } 3840 | 3841 | static void print_binder_buffer(struct seq_file *m, const char *prefix, 3842 | struct binder_buffer *buffer) 3843 | { 3844 | seq_printf(m, "%s %d: %p size %zd:%zd %s\n", 3845 | prefix, buffer->debug_id, buffer->data, 3846 | buffer->data_size, buffer->offsets_size, 3847 | buffer->transaction ? "active" : "delivered"); 3848 | } 3849 | 3850 | static void print_binder_work(struct seq_file *m, const char *prefix, 3851 | const char *transaction_prefix, 3852 | struct binder_work *w) 3853 | { 3854 | struct binder_node *node; 3855 | struct binder_transaction *t; 3856 | 3857 | switch (w->type) { 3858 | case BINDER_WORK_TRANSACTION: 3859 | t = container_of(w, struct binder_transaction, work); 3860 | print_binder_transaction(m, transaction_prefix, t); 3861 | break; 3862 | case BINDER_WORK_TRANSACTION_COMPLETE: 3863 | seq_printf(m, "%stransaction complete\n", prefix); 3864 | break; 3865 | case BINDER_WORK_NODE: 3866 | node = container_of(w, struct binder_node, work); 3867 | seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 3868 | prefix, node->debug_id, 3869 | (u64)node->ptr, (u64)node->cookie); 3870 | break; 3871 | case BINDER_WORK_DEAD_BINDER: 3872 | seq_printf(m, "%shas dead binder\n", prefix); 3873 | break; 3874 | case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3875 | seq_printf(m, "%shas cleared dead binder\n", prefix); 3876 | break; 3877 | case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 3878 | seq_printf(m, "%shas cleared death notification\n", prefix); 3879 | break; 3880 | default: 3881 | seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 3882 | break; 3883 | } 3884 | } 3885 | 3886 | static void print_binder_thread(struct seq_file *m, 3887 | struct binder_thread *thread, 3888 | int print_always) 3889 | { 3890 | struct binder_transaction *t; 3891 | struct binder_work *w; 3892 | size_t start_pos = m->count; 3893 | size_t header_pos; 3894 | 3895 | seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); 3896 | header_pos = m->count; 3897 | t = thread->transaction_stack; 3898 | while (t) { 3899 | if (t->from == thread) { 3900 | print_binder_transaction(m, 3901 | " outgoing transaction", t); 3902 | t = t->from_parent; 3903 | } else if (t->to_thread == thread) { 3904 | print_binder_transaction(m, 3905 | " incoming transaction", t); 3906 | t = t->to_parent; 3907 | } else { 3908 | print_binder_transaction(m, " bad transaction", t); 3909 | t = NULL; 3910 | } 3911 | } 3912 | list_for_each_entry(w, &thread->todo, entry) { 3913 | print_binder_work(m, " ", " pending transaction", w); 3914 | } 3915 | if (!print_always && m->count == header_pos) 3916 | m->count = start_pos; 3917 | } 3918 | 3919 | static void print_binder_node(struct seq_file *m, struct binder_node *node) 3920 | { 3921 | struct binder_ref *ref; 3922 | struct binder_work *w; 3923 | int count; 3924 | 3925 | count = 0; 3926 | hlist_for_each_entry(ref, &node->refs, node_entry) 3927 | count++; 3928 | 3929 | seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d", 3930 | node->debug_id, (u64)node->ptr, (u64)node->cookie, 3931 | node->has_strong_ref, node->has_weak_ref, 3932 | node->local_strong_refs, node->local_weak_refs, 3933 | node->internal_strong_refs, count); 3934 | if (count) { 3935 | seq_puts(m, " proc"); 3936 | hlist_for_each_entry(ref, &node->refs, node_entry) 3937 | seq_printf(m, " %d", ref->proc->pid); 3938 | } 3939 | seq_puts(m, "\n"); 3940 | list_for_each_entry(w, &node->async_todo, entry) 3941 | print_binder_work(m, " ", 3942 | " pending async transaction", w); 3943 | } 3944 | 3945 | static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) 3946 | { 3947 | seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", 3948 | ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", 3949 | ref->node->debug_id, ref->strong, ref->weak, ref->death); 3950 | } 3951 | 3952 | static void print_binder_proc(struct seq_file *m, 3953 | struct binder_proc *proc, int print_all) 3954 | { 3955 | struct binder_work *w; 3956 | struct rb_node *n; 3957 | size_t start_pos = m->count; 3958 | size_t header_pos; 3959 | 3960 | seq_printf(m, "proc %d\n", proc->pid); 3961 | seq_printf(m, "context %s\n", proc->context->name); 3962 | header_pos = m->count; 3963 | 3964 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3965 | print_binder_thread(m, rb_entry(n, struct binder_thread, 3966 | rb_node), print_all); 3967 | for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 3968 | struct binder_node *node = rb_entry(n, struct binder_node, 3969 | rb_node); 3970 | if (print_all || node->has_async_transaction) 3971 | print_binder_node(m, node); 3972 | } 3973 | if (print_all) { 3974 | for (n = rb_first(&proc->refs_by_desc); 3975 | n != NULL; 3976 | n = rb_next(n)) 3977 | print_binder_ref(m, rb_entry(n, struct binder_ref, 3978 | rb_node_desc)); 3979 | } 3980 | for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3981 | print_binder_buffer(m, " buffer", 3982 | rb_entry(n, struct binder_buffer, rb_node)); 3983 | list_for_each_entry(w, &proc->todo, entry) 3984 | print_binder_work(m, " ", " pending transaction", w); 3985 | list_for_each_entry(w, &proc->delivered_death, entry) { 3986 | seq_puts(m, " has delivered dead binder\n"); 3987 | break; 3988 | } 3989 | if (!print_all && m->count == header_pos) 3990 | m->count = start_pos; 3991 | } 3992 | 3993 | static const char * const binder_return_strings[] = { 3994 | "BR_ERROR", 3995 | "BR_OK", 3996 | "BR_TRANSACTION", 3997 | "BR_REPLY", 3998 | "BR_ACQUIRE_RESULT", 3999 | "BR_DEAD_REPLY", 4000 | "BR_TRANSACTION_COMPLETE", 4001 | "BR_INCREFS", 4002 | "BR_ACQUIRE", 4003 | "BR_RELEASE", 4004 | "BR_DECREFS", 4005 | "BR_ATTEMPT_ACQUIRE", 4006 | "BR_NOOP", 4007 | "BR_SPAWN_LOOPER", 4008 | "BR_FINISHED", 4009 | "BR_DEAD_BINDER", 4010 | "BR_CLEAR_DEATH_NOTIFICATION_DONE", 4011 | "BR_FAILED_REPLY" 4012 | }; 4013 | 4014 | static const char * const binder_command_strings[] = { 4015 | "BC_TRANSACTION", 4016 | "BC_REPLY", 4017 | "BC_ACQUIRE_RESULT", 4018 | "BC_FREE_BUFFER", 4019 | "BC_INCREFS", 4020 | "BC_ACQUIRE", 4021 | "BC_RELEASE", 4022 | "BC_DECREFS", 4023 | "BC_INCREFS_DONE", 4024 | "BC_ACQUIRE_DONE", 4025 | "BC_ATTEMPT_ACQUIRE", 4026 | "BC_REGISTER_LOOPER", 4027 | "BC_ENTER_LOOPER", 4028 | "BC_EXIT_LOOPER", 4029 | "BC_REQUEST_DEATH_NOTIFICATION", 4030 | "BC_CLEAR_DEATH_NOTIFICATION", 4031 | "BC_DEAD_BINDER_DONE", 4032 | "BC_TRANSACTION_SG", 4033 | "BC_REPLY_SG", 4034 | }; 4035 | 4036 | static const char * const binder_objstat_strings[] = { 4037 | "proc", 4038 | "thread", 4039 | "node", 4040 | "ref", 4041 | "death", 4042 | "transaction", 4043 | "transaction_complete" 4044 | }; 4045 | 4046 | static void print_binder_stats(struct seq_file *m, const char *prefix, 4047 | struct binder_stats *stats) 4048 | { 4049 | int i; 4050 | 4051 | BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 4052 | ARRAY_SIZE(binder_command_strings)); 4053 | for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 4054 | if (stats->bc[i]) 4055 | seq_printf(m, "%s%s: %d\n", prefix, 4056 | binder_command_strings[i], stats->bc[i]); 4057 | } 4058 | 4059 | BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 4060 | ARRAY_SIZE(binder_return_strings)); 4061 | for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 4062 | if (stats->br[i]) 4063 | seq_printf(m, "%s%s: %d\n", prefix, 4064 | binder_return_strings[i], stats->br[i]); 4065 | } 4066 | 4067 | BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 4068 | ARRAY_SIZE(binder_objstat_strings)); 4069 | BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 4070 | ARRAY_SIZE(stats->obj_deleted)); 4071 | for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 4072 | if (stats->obj_created[i] || stats->obj_deleted[i]) 4073 | seq_printf(m, "%s%s: active %d total %d\n", prefix, 4074 | binder_objstat_strings[i], 4075 | stats->obj_created[i] - stats->obj_deleted[i], 4076 | stats->obj_created[i]); 4077 | } 4078 | } 4079 | 4080 | static void print_binder_proc_stats(struct seq_file *m, 4081 | struct binder_proc *proc) 4082 | { 4083 | struct binder_work *w; 4084 | struct rb_node *n; 4085 | int count, strong, weak; 4086 | 4087 | seq_printf(m, "proc %d\n", proc->pid); 4088 | seq_printf(m, "context %s\n", proc->context->name); 4089 | count = 0; 4090 | for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 4091 | count++; 4092 | seq_printf(m, " threads: %d\n", count); 4093 | seq_printf(m, " requested threads: %d+%d/%d\n" 4094 | " ready threads %d\n" 4095 | " free async space %zd\n", proc->requested_threads, 4096 | proc->requested_threads_started, proc->max_threads, 4097 | proc->ready_threads, proc->free_async_space); 4098 | count = 0; 4099 | for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 4100 | count++; 4101 | seq_printf(m, " nodes: %d\n", count); 4102 | count = 0; 4103 | strong = 0; 4104 | weak = 0; 4105 | for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 4106 | struct binder_ref *ref = rb_entry(n, struct binder_ref, 4107 | rb_node_desc); 4108 | count++; 4109 | strong += ref->strong; 4110 | weak += ref->weak; 4111 | } 4112 | seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 4113 | 4114 | count = 0; 4115 | for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 4116 | count++; 4117 | seq_printf(m, " buffers: %d\n", count); 4118 | 4119 | count = 0; 4120 | list_for_each_entry(w, &proc->todo, entry) { 4121 | switch (w->type) { 4122 | case BINDER_WORK_TRANSACTION: 4123 | count++; 4124 | break; 4125 | default: 4126 | break; 4127 | } 4128 | } 4129 | seq_printf(m, " pending transactions: %d\n", count); 4130 | 4131 | print_binder_stats(m, " ", &proc->stats); 4132 | } 4133 | 4134 | 4135 | static int binder_state_show(struct seq_file *m, void *unused) 4136 | { 4137 | struct binder_proc *proc; 4138 | struct binder_node *node; 4139 | int do_lock = !binder_debug_no_lock; 4140 | 4141 | if (do_lock) 4142 | binder_lock(__func__); 4143 | 4144 | seq_puts(m, "binder state:\n"); 4145 | 4146 | if (!hlist_empty(&binder_dead_nodes)) 4147 | seq_puts(m, "dead nodes:\n"); 4148 | hlist_for_each_entry(node, &binder_dead_nodes, dead_node) 4149 | print_binder_node(m, node); 4150 | 4151 | hlist_for_each_entry(proc, &binder_procs, proc_node) 4152 | print_binder_proc(m, proc, 1); 4153 | if (do_lock) 4154 | binder_unlock(__func__); 4155 | return 0; 4156 | } 4157 | 4158 | static int binder_stats_show(struct seq_file *m, void *unused) 4159 | { 4160 | struct binder_proc *proc; 4161 | int do_lock = !binder_debug_no_lock; 4162 | 4163 | if (do_lock) 4164 | binder_lock(__func__); 4165 | 4166 | seq_puts(m, "binder stats:\n"); 4167 | 4168 | print_binder_stats(m, "", &binder_stats); 4169 | 4170 | hlist_for_each_entry(proc, &binder_procs, proc_node) 4171 | print_binder_proc_stats(m, proc); 4172 | if (do_lock) 4173 | binder_unlock(__func__); 4174 | return 0; 4175 | } 4176 | 4177 | static int binder_transactions_show(struct seq_file *m, void *unused) 4178 | { 4179 | struct binder_proc *proc; 4180 | int do_lock = !binder_debug_no_lock; 4181 | 4182 | if (do_lock) 4183 | binder_lock(__func__); 4184 | 4185 | seq_puts(m, "binder transactions:\n"); 4186 | hlist_for_each_entry(proc, &binder_procs, proc_node) 4187 | print_binder_proc(m, proc, 0); 4188 | if (do_lock) 4189 | binder_unlock(__func__); 4190 | return 0; 4191 | } 4192 | 4193 | static int binder_proc_show(struct seq_file *m, void *unused) 4194 | { 4195 | struct binder_proc *itr; 4196 | int pid = (unsigned long)m->private; 4197 | int do_lock = !binder_debug_no_lock; 4198 | 4199 | if (do_lock) 4200 | binder_lock(__func__); 4201 | 4202 | hlist_for_each_entry(itr, &binder_procs, proc_node) { 4203 | if (itr->pid == pid) { 4204 | seq_puts(m, "binder proc state:\n"); 4205 | print_binder_proc(m, itr, 1); 4206 | } 4207 | } 4208 | if (do_lock) 4209 | binder_unlock(__func__); 4210 | return 0; 4211 | } 4212 | 4213 | static void print_binder_transaction_log_entry(struct seq_file *m, 4214 | struct binder_transaction_log_entry *e) 4215 | { 4216 | seq_printf(m, 4217 | "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n", 4218 | e->debug_id, (e->call_type == 2) ? "reply" : 4219 | ((e->call_type == 1) ? "async" : "call "), e->from_proc, 4220 | e->from_thread, e->to_proc, e->to_thread, e->context_name, 4221 | e->to_node, e->target_handle, e->data_size, e->offsets_size); 4222 | } 4223 | 4224 | static int binder_transaction_log_show(struct seq_file *m, void *unused) 4225 | { 4226 | struct binder_transaction_log *log = m->private; 4227 | int i; 4228 | 4229 | if (log->full) { 4230 | for (i = log->next; i < ARRAY_SIZE(log->entry); i++) 4231 | print_binder_transaction_log_entry(m, &log->entry[i]); 4232 | } 4233 | for (i = 0; i < log->next; i++) 4234 | print_binder_transaction_log_entry(m, &log->entry[i]); 4235 | return 0; 4236 | } 4237 | 4238 | static const struct file_operations binder_fops = { 4239 | .owner = THIS_MODULE, 4240 | .poll = binder_poll, 4241 | .unlocked_ioctl = binder_ioctl, 4242 | .compat_ioctl = binder_ioctl, 4243 | .mmap = binder_mmap, 4244 | .open = binder_open, 4245 | .flush = binder_flush, 4246 | .release = binder_release, 4247 | }; 4248 | 4249 | BINDER_DEBUG_ENTRY(state); 4250 | BINDER_DEBUG_ENTRY(stats); 4251 | BINDER_DEBUG_ENTRY(transactions); 4252 | BINDER_DEBUG_ENTRY(transaction_log); 4253 | 4254 | static struct class *binder_class; 4255 | 4256 | static void binder_device_release(struct device *dev) 4257 | { 4258 | } 4259 | 4260 | static int __init init_binder_device(int idx) 4261 | { 4262 | int ret; 4263 | char *name; 4264 | dev_t devnr; 4265 | struct binder_device *binder_device; 4266 | /* strlen("binder") 4267 | * + 4268 | * maximum length of 64 bit int as string 4269 | */ 4270 | char numstr[6 + 21] = "binder"; 4271 | 4272 | binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); 4273 | if (!binder_device) 4274 | return -ENOMEM; 4275 | 4276 | cdev_init(&binder_device->cdev, &binder_fops); 4277 | binder_device->cdev.owner = THIS_MODULE; 4278 | 4279 | devnr = MKDEV(BINDER_DKMS_MAJOR, idx); 4280 | ret = cdev_add(&binder_device->cdev, devnr, 1); 4281 | if (ret) { 4282 | kfree(binder_device); 4283 | return ret; 4284 | } 4285 | 4286 | if (binder_devices_param > 1) 4287 | ret = snprintf(numstr, sizeof(numstr), "binder%d", idx); 4288 | if (ret < 0 || (size_t)ret >= sizeof(numstr)) { 4289 | cdev_del(&binder_device->cdev); 4290 | kfree(binder_device); 4291 | return -EIO; 4292 | } 4293 | 4294 | name = kzalloc(strlen(numstr) + 1, GFP_KERNEL); 4295 | if (!name) { 4296 | cdev_del(&binder_device->cdev); 4297 | kfree(binder_device); 4298 | return -ENOMEM; 4299 | } 4300 | strcpy(name, numstr); 4301 | binder_device->context.name = name; 4302 | binder_device->context.binder_context_mgr_uid = INVALID_UID; 4303 | 4304 | binder_device->class_dev.devt = binder_device->cdev.dev; 4305 | binder_device->class_dev.class = binder_class; 4306 | binder_device->class_dev.release = binder_device_release; 4307 | dev_set_name(&binder_device->class_dev, "%s", name); 4308 | ret = device_register(&binder_device->class_dev); 4309 | if (ret) { 4310 | cdev_del(&binder_device->cdev); 4311 | kfree(binder_device); 4312 | kfree(name); 4313 | return ret; 4314 | } 4315 | 4316 | mutex_lock(&binder_devices_mtx); 4317 | hlist_add_head(&binder_device->hlist, &binder_devices); 4318 | mutex_unlock(&binder_devices_mtx); 4319 | 4320 | return 0; 4321 | } 4322 | 4323 | static int __init binder_init(void) 4324 | { 4325 | int i, ret; 4326 | struct binder_device *device; 4327 | struct hlist_node *tmp; 4328 | 4329 | if (binder_devices_param <= 0 || 4330 | binder_devices_param > BINDER_DKMS_MAX_MINOR) 4331 | return -EINVAL; 4332 | 4333 | binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 4334 | if (binder_debugfs_dir_entry_root) 4335 | binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 4336 | binder_debugfs_dir_entry_root); 4337 | 4338 | if (binder_debugfs_dir_entry_root) { 4339 | debugfs_create_file("state", 4340 | S_IRUGO, 4341 | binder_debugfs_dir_entry_root, 4342 | NULL, 4343 | &binder_state_fops); 4344 | debugfs_create_file("stats", 4345 | S_IRUGO, 4346 | binder_debugfs_dir_entry_root, 4347 | NULL, 4348 | &binder_stats_fops); 4349 | debugfs_create_file("transactions", 4350 | S_IRUGO, 4351 | binder_debugfs_dir_entry_root, 4352 | NULL, 4353 | &binder_transactions_fops); 4354 | debugfs_create_file("transaction_log", 4355 | S_IRUGO, 4356 | binder_debugfs_dir_entry_root, 4357 | &binder_transaction_log, 4358 | &binder_transaction_log_fops); 4359 | debugfs_create_file("failed_transaction_log", 4360 | S_IRUGO, 4361 | binder_debugfs_dir_entry_root, 4362 | &binder_transaction_log_failed, 4363 | &binder_transaction_log_fops); 4364 | } 4365 | 4366 | ret = register_chrdev_region(MKDEV(BINDER_DKMS_MAJOR, 0), 4367 | BINDER_DKMS_MAX_MINOR, "binder"); 4368 | if (ret) 4369 | goto on_error_remove_debugfs; 4370 | 4371 | binder_class = class_create(THIS_MODULE, "binder"); 4372 | if (IS_ERR(binder_class)) 4373 | goto on_error_unregister_chrdev_region; 4374 | 4375 | for (i = 0; i < binder_devices_param; i++) { 4376 | ret = init_binder_device(i); 4377 | if (ret) 4378 | goto err_init_binder_device_failed; 4379 | } 4380 | 4381 | return ret; 4382 | 4383 | err_init_binder_device_failed: 4384 | mutex_lock(&binder_devices_mtx); 4385 | hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 4386 | cdev_del(&device->cdev); 4387 | device_unregister(&device->class_dev); 4388 | kfree(device->context.name); 4389 | hlist_del(&device->hlist); 4390 | kfree(device); 4391 | } 4392 | mutex_unlock(&binder_devices_mtx); 4393 | class_destroy(binder_class); 4394 | 4395 | on_error_unregister_chrdev_region: 4396 | unregister_chrdev_region(MKDEV(BINDER_DKMS_MAJOR, 0), 4397 | BINDER_DKMS_MAX_MINOR); 4398 | 4399 | on_error_remove_debugfs: 4400 | debugfs_remove_recursive(binder_debugfs_dir_entry_root); 4401 | 4402 | return -1; 4403 | } 4404 | 4405 | static void __exit binder_exit(void) 4406 | { 4407 | struct binder_device *device; 4408 | struct hlist_node *tmp; 4409 | 4410 | mutex_lock(&binder_devices_mtx); 4411 | hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { 4412 | cdev_del(&device->cdev); 4413 | device_unregister(&device->class_dev); 4414 | kfree(device->context.name); 4415 | hlist_del(&device->hlist); 4416 | kfree(device); 4417 | } 4418 | mutex_unlock(&binder_devices_mtx); 4419 | 4420 | class_destroy(binder_class); 4421 | 4422 | unregister_chrdev_region(MKDEV(BINDER_DKMS_MAJOR, 0), 4423 | BINDER_DKMS_MAX_MINOR); 4424 | 4425 | debugfs_remove_recursive(binder_debugfs_dir_entry_root); 4426 | } 4427 | 4428 | module_init(binder_init); 4429 | module_exit(binder_exit); 4430 | 4431 | #define CREATE_TRACE_POINTS 4432 | #include "binder_trace.h" 4433 | 4434 | MODULE_LICENSE("GPL v2"); 4435 | -------------------------------------------------------------------------------- /binder/binder.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2008 Google, Inc. 3 | * 4 | * Based on, but no longer compatible with, the original 5 | * OpenBinder.org binder driver interface, which is: 6 | * 7 | * Copyright (c) 2005 Palmsource, Inc. 8 | * 9 | * This software is licensed under the terms of the GNU General Public 10 | * License version 2, as published by the Free Software Foundation, and 11 | * may be copied, distributed, and modified under those terms. 12 | * 13 | * This program is distributed in the hope that it will be useful, 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | * GNU General Public License for more details. 17 | * 18 | */ 19 | 20 | #ifndef _UAPI_LINUX_BINDER_H 21 | #define _UAPI_LINUX_BINDER_H 22 | 23 | #include 24 | #include 25 | 26 | #define B_PACK_CHARS(c1, c2, c3, c4) \ 27 | ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) 28 | #define B_TYPE_LARGE 0x85 29 | 30 | enum { 31 | BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), 32 | BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), 33 | BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), 34 | BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), 35 | BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), 36 | BINDER_TYPE_FDA = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE), 37 | BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE), 38 | }; 39 | 40 | enum { 41 | FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, 42 | FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, 43 | }; 44 | 45 | #ifdef BINDER_IPC_32BIT 46 | typedef __u32 binder_size_t; 47 | typedef __u32 binder_uintptr_t; 48 | #else 49 | typedef __u64 binder_size_t; 50 | typedef __u64 binder_uintptr_t; 51 | #endif 52 | 53 | /** 54 | * struct binder_object_header - header shared by all binder metadata objects. 55 | * @type: type of the object 56 | */ 57 | struct binder_object_header { 58 | __u32 type; 59 | }; 60 | 61 | /* 62 | * This is the flattened representation of a Binder object for transfer 63 | * between processes. The 'offsets' supplied as part of a binder transaction 64 | * contains offsets into the data where these structures occur. The Binder 65 | * driver takes care of re-writing the structure type and data as it moves 66 | * between processes. 67 | */ 68 | struct flat_binder_object { 69 | struct binder_object_header hdr; 70 | __u32 flags; 71 | 72 | /* 8 bytes of data. */ 73 | union { 74 | binder_uintptr_t binder; /* local object */ 75 | __u32 handle; /* remote object */ 76 | }; 77 | 78 | /* extra data associated with local object */ 79 | binder_uintptr_t cookie; 80 | }; 81 | 82 | /** 83 | * struct binder_fd_object - describes a filedescriptor to be fixed up. 84 | * @hdr: common header structure 85 | * @pad_flags: padding to remain compatible with old userspace code 86 | * @pad_binder: padding to remain compatible with old userspace code 87 | * @fd: file descriptor 88 | * @cookie: opaque data, used by user-space 89 | */ 90 | struct binder_fd_object { 91 | struct binder_object_header hdr; 92 | __u32 pad_flags; 93 | union { 94 | binder_uintptr_t pad_binder; 95 | __u32 fd; 96 | }; 97 | 98 | binder_uintptr_t cookie; 99 | }; 100 | 101 | /* struct binder_buffer_object - object describing a userspace buffer 102 | * @hdr: common header structure 103 | * @flags: one or more BINDER_BUFFER_* flags 104 | * @buffer: address of the buffer 105 | * @length: length of the buffer 106 | * @parent: index in offset array pointing to parent buffer 107 | * @parent_offset: offset in @parent pointing to this buffer 108 | * 109 | * A binder_buffer object represents an object that the 110 | * binder kernel driver can copy verbatim to the target 111 | * address space. A buffer itself may be pointed to from 112 | * within another buffer, meaning that the pointer inside 113 | * that other buffer needs to be fixed up as well. This 114 | * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT 115 | * flag in @flags, by setting @parent buffer to the index 116 | * in the offset array pointing to the parent binder_buffer_object, 117 | * and by setting @parent_offset to the offset in the parent buffer 118 | * at which the pointer to this buffer is located. 119 | */ 120 | struct binder_buffer_object { 121 | struct binder_object_header hdr; 122 | __u32 flags; 123 | binder_uintptr_t buffer; 124 | binder_size_t length; 125 | binder_size_t parent; 126 | binder_size_t parent_offset; 127 | }; 128 | 129 | enum { 130 | BINDER_BUFFER_FLAG_HAS_PARENT = 0x01, 131 | }; 132 | 133 | /* struct binder_fd_array_object - object describing an array of fds in a buffer 134 | * @hdr: common header structure 135 | * @num_fds: number of file descriptors in the buffer 136 | * @parent: index in offset array to buffer holding the fd array 137 | * @parent_offset: start offset of fd array in the buffer 138 | * 139 | * A binder_fd_array object represents an array of file 140 | * descriptors embedded in a binder_buffer_object. It is 141 | * different from a regular binder_buffer_object because it 142 | * describes a list of file descriptors to fix up, not an opaque 143 | * blob of memory, and hence the kernel needs to treat it differently. 144 | * 145 | * An example of how this would be used is with Android's 146 | * native_handle_t object, which is a struct with a list of integers 147 | * and a list of file descriptors. The native_handle_t struct itself 148 | * will be represented by a struct binder_buffer_objct, whereas the 149 | * embedded list of file descriptors is represented by a 150 | * struct binder_fd_array_object with that binder_buffer_object as 151 | * a parent. 152 | */ 153 | struct binder_fd_array_object { 154 | struct binder_object_header hdr; 155 | binder_size_t num_fds; 156 | binder_size_t parent; 157 | binder_size_t parent_offset; 158 | }; 159 | 160 | /* 161 | * On 64-bit platforms where user code may run in 32-bits the driver must 162 | * translate the buffer (and local binder) addresses appropriately. 163 | */ 164 | 165 | struct binder_write_read { 166 | binder_size_t write_size; /* bytes to write */ 167 | binder_size_t write_consumed; /* bytes consumed by driver */ 168 | binder_uintptr_t write_buffer; 169 | binder_size_t read_size; /* bytes to read */ 170 | binder_size_t read_consumed; /* bytes consumed by driver */ 171 | binder_uintptr_t read_buffer; 172 | }; 173 | 174 | /* Use with BINDER_VERSION, driver fills in fields. */ 175 | struct binder_version { 176 | /* driver protocol version -- increment with incompatible change */ 177 | __s32 protocol_version; 178 | }; 179 | 180 | /* This is the current protocol version. */ 181 | #ifdef BINDER_IPC_32BIT 182 | #define BINDER_CURRENT_PROTOCOL_VERSION 7 183 | #else 184 | #define BINDER_CURRENT_PROTOCOL_VERSION 8 185 | #endif 186 | 187 | #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) 188 | #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) 189 | #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) 190 | #define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) 191 | #define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) 192 | #define BINDER_THREAD_EXIT _IOW('b', 8, __s32) 193 | #define BINDER_VERSION _IOWR('b', 9, struct binder_version) 194 | 195 | /* 196 | * NOTE: Two special error codes you should check for when calling 197 | * in to the driver are: 198 | * 199 | * EINTR -- The operation has been interupted. This should be 200 | * handled by retrying the ioctl() until a different error code 201 | * is returned. 202 | * 203 | * ECONNREFUSED -- The driver is no longer accepting operations 204 | * from your process. That is, the process is being destroyed. 205 | * You should handle this by exiting from your process. Note 206 | * that once this error code is returned, all further calls to 207 | * the driver from any thread will return this same code. 208 | */ 209 | 210 | enum transaction_flags { 211 | TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ 212 | TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ 213 | TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ 214 | TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ 215 | }; 216 | 217 | struct binder_transaction_data { 218 | /* The first two are only used for bcTRANSACTION and brTRANSACTION, 219 | * identifying the target and contents of the transaction. 220 | */ 221 | union { 222 | /* target descriptor of command transaction */ 223 | __u32 handle; 224 | /* target descriptor of return transaction */ 225 | binder_uintptr_t ptr; 226 | } target; 227 | binder_uintptr_t cookie; /* target object cookie */ 228 | __u32 code; /* transaction command */ 229 | 230 | /* General information about the transaction. */ 231 | __u32 flags; 232 | pid_t sender_pid; 233 | uid_t sender_euid; 234 | binder_size_t data_size; /* number of bytes of data */ 235 | binder_size_t offsets_size; /* number of bytes of offsets */ 236 | 237 | /* If this transaction is inline, the data immediately 238 | * follows here; otherwise, it ends with a pointer to 239 | * the data buffer. 240 | */ 241 | union { 242 | struct { 243 | /* transaction data */ 244 | binder_uintptr_t buffer; 245 | /* offsets from buffer to flat_binder_object structs */ 246 | binder_uintptr_t offsets; 247 | } ptr; 248 | __u8 buf[8]; 249 | } data; 250 | }; 251 | 252 | struct binder_transaction_data_sg { 253 | struct binder_transaction_data transaction_data; 254 | binder_size_t buffers_size; 255 | }; 256 | 257 | struct binder_ptr_cookie { 258 | binder_uintptr_t ptr; 259 | binder_uintptr_t cookie; 260 | }; 261 | 262 | struct binder_handle_cookie { 263 | __u32 handle; 264 | binder_uintptr_t cookie; 265 | } __packed; 266 | 267 | struct binder_pri_desc { 268 | __s32 priority; 269 | __u32 desc; 270 | }; 271 | 272 | struct binder_pri_ptr_cookie { 273 | __s32 priority; 274 | binder_uintptr_t ptr; 275 | binder_uintptr_t cookie; 276 | }; 277 | 278 | enum binder_driver_return_protocol { 279 | BR_ERROR = _IOR('r', 0, __s32), 280 | /* 281 | * int: error code 282 | */ 283 | 284 | BR_OK = _IO('r', 1), 285 | /* No parameters! */ 286 | 287 | BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), 288 | BR_REPLY = _IOR('r', 3, struct binder_transaction_data), 289 | /* 290 | * binder_transaction_data: the received command. 291 | */ 292 | 293 | BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), 294 | /* 295 | * not currently supported 296 | * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. 297 | * Else the remote object has acquired a primary reference. 298 | */ 299 | 300 | BR_DEAD_REPLY = _IO('r', 5), 301 | /* 302 | * The target of the last transaction (either a bcTRANSACTION or 303 | * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. 304 | */ 305 | 306 | BR_TRANSACTION_COMPLETE = _IO('r', 6), 307 | /* 308 | * No parameters... always refers to the last transaction requested 309 | * (including replies). Note that this will be sent even for 310 | * asynchronous transactions. 311 | */ 312 | 313 | BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), 314 | BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), 315 | BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), 316 | BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), 317 | /* 318 | * void *: ptr to binder 319 | * void *: cookie for binder 320 | */ 321 | 322 | BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), 323 | /* 324 | * not currently supported 325 | * int: priority 326 | * void *: ptr to binder 327 | * void *: cookie for binder 328 | */ 329 | 330 | BR_NOOP = _IO('r', 12), 331 | /* 332 | * No parameters. Do nothing and examine the next command. It exists 333 | * primarily so that we can replace it with a BR_SPAWN_LOOPER command. 334 | */ 335 | 336 | BR_SPAWN_LOOPER = _IO('r', 13), 337 | /* 338 | * No parameters. The driver has determined that a process has no 339 | * threads waiting to service incoming transactions. When a process 340 | * receives this command, it must spawn a new service thread and 341 | * register it via bcENTER_LOOPER. 342 | */ 343 | 344 | BR_FINISHED = _IO('r', 14), 345 | /* 346 | * not currently supported 347 | * stop threadpool thread 348 | */ 349 | 350 | BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t), 351 | /* 352 | * void *: cookie 353 | */ 354 | BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t), 355 | /* 356 | * void *: cookie 357 | */ 358 | 359 | BR_FAILED_REPLY = _IO('r', 17), 360 | /* 361 | * The the last transaction (either a bcTRANSACTION or 362 | * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. 363 | */ 364 | }; 365 | 366 | enum binder_driver_command_protocol { 367 | BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), 368 | BC_REPLY = _IOW('c', 1, struct binder_transaction_data), 369 | /* 370 | * binder_transaction_data: the sent command. 371 | */ 372 | 373 | BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), 374 | /* 375 | * not currently supported 376 | * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. 377 | * Else you have acquired a primary reference on the object. 378 | */ 379 | 380 | BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t), 381 | /* 382 | * void *: ptr to transaction data received on a read 383 | */ 384 | 385 | BC_INCREFS = _IOW('c', 4, __u32), 386 | BC_ACQUIRE = _IOW('c', 5, __u32), 387 | BC_RELEASE = _IOW('c', 6, __u32), 388 | BC_DECREFS = _IOW('c', 7, __u32), 389 | /* 390 | * int: descriptor 391 | */ 392 | 393 | BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), 394 | BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), 395 | /* 396 | * void *: ptr to binder 397 | * void *: cookie for binder 398 | */ 399 | 400 | BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), 401 | /* 402 | * not currently supported 403 | * int: priority 404 | * int: descriptor 405 | */ 406 | 407 | BC_REGISTER_LOOPER = _IO('c', 11), 408 | /* 409 | * No parameters. 410 | * Register a spawned looper thread with the device. 411 | */ 412 | 413 | BC_ENTER_LOOPER = _IO('c', 12), 414 | BC_EXIT_LOOPER = _IO('c', 13), 415 | /* 416 | * No parameters. 417 | * These two commands are sent as an application-level thread 418 | * enters and exits the binder loop, respectively. They are 419 | * used so the binder can have an accurate count of the number 420 | * of looping threads it has available. 421 | */ 422 | 423 | BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, 424 | struct binder_handle_cookie), 425 | /* 426 | * int: handle 427 | * void *: cookie 428 | */ 429 | 430 | BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, 431 | struct binder_handle_cookie), 432 | /* 433 | * int: handle 434 | * void *: cookie 435 | */ 436 | 437 | BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t), 438 | /* 439 | * void *: cookie 440 | */ 441 | 442 | BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg), 443 | BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg), 444 | /* 445 | * binder_transaction_data_sg: the sent command. 446 | */ 447 | }; 448 | 449 | #endif /* _UAPI_LINUX_BINDER_H */ 450 | 451 | -------------------------------------------------------------------------------- /binder/binder_trace.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2012 Google, Inc. 3 | * 4 | * This software is licensed under the terms of the GNU General Public 5 | * License version 2, as published by the Free Software Foundation, and 6 | * may be copied, distributed, and modified under those terms. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | * GNU General Public License for more details. 12 | * 13 | */ 14 | 15 | #undef TRACE_SYSTEM 16 | #define TRACE_SYSTEM binder 17 | 18 | #if !defined(_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) 19 | #define _BINDER_TRACE_H 20 | 21 | #include 22 | 23 | struct binder_buffer; 24 | struct binder_node; 25 | struct binder_proc; 26 | struct binder_ref; 27 | struct binder_thread; 28 | struct binder_transaction; 29 | 30 | TRACE_EVENT(binder_ioctl, 31 | TP_PROTO(unsigned int cmd, unsigned long arg), 32 | TP_ARGS(cmd, arg), 33 | 34 | TP_STRUCT__entry( 35 | __field(unsigned int, cmd) 36 | __field(unsigned long, arg) 37 | ), 38 | TP_fast_assign( 39 | __entry->cmd = cmd; 40 | __entry->arg = arg; 41 | ), 42 | TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg) 43 | ); 44 | 45 | DECLARE_EVENT_CLASS(binder_lock_class, 46 | TP_PROTO(const char *tag), 47 | TP_ARGS(tag), 48 | TP_STRUCT__entry( 49 | __field(const char *, tag) 50 | ), 51 | TP_fast_assign( 52 | __entry->tag = tag; 53 | ), 54 | TP_printk("tag=%s", __entry->tag) 55 | ); 56 | 57 | #define DEFINE_BINDER_LOCK_EVENT(name) \ 58 | DEFINE_EVENT(binder_lock_class, name, \ 59 | TP_PROTO(const char *func), \ 60 | TP_ARGS(func)) 61 | 62 | DEFINE_BINDER_LOCK_EVENT(binder_lock); 63 | DEFINE_BINDER_LOCK_EVENT(binder_locked); 64 | DEFINE_BINDER_LOCK_EVENT(binder_unlock); 65 | 66 | DECLARE_EVENT_CLASS(binder_function_return_class, 67 | TP_PROTO(int ret), 68 | TP_ARGS(ret), 69 | TP_STRUCT__entry( 70 | __field(int, ret) 71 | ), 72 | TP_fast_assign( 73 | __entry->ret = ret; 74 | ), 75 | TP_printk("ret=%d", __entry->ret) 76 | ); 77 | 78 | #define DEFINE_BINDER_FUNCTION_RETURN_EVENT(name) \ 79 | DEFINE_EVENT(binder_function_return_class, name, \ 80 | TP_PROTO(int ret), \ 81 | TP_ARGS(ret)) 82 | 83 | DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done); 84 | DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done); 85 | DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done); 86 | 87 | TRACE_EVENT(binder_wait_for_work, 88 | TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo), 89 | TP_ARGS(proc_work, transaction_stack, thread_todo), 90 | 91 | TP_STRUCT__entry( 92 | __field(bool, proc_work) 93 | __field(bool, transaction_stack) 94 | __field(bool, thread_todo) 95 | ), 96 | TP_fast_assign( 97 | __entry->proc_work = proc_work; 98 | __entry->transaction_stack = transaction_stack; 99 | __entry->thread_todo = thread_todo; 100 | ), 101 | TP_printk("proc_work=%d transaction_stack=%d thread_todo=%d", 102 | __entry->proc_work, __entry->transaction_stack, 103 | __entry->thread_todo) 104 | ); 105 | 106 | TRACE_EVENT(binder_transaction, 107 | TP_PROTO(bool reply, struct binder_transaction *t, 108 | struct binder_node *target_node), 109 | TP_ARGS(reply, t, target_node), 110 | TP_STRUCT__entry( 111 | __field(int, debug_id) 112 | __field(int, target_node) 113 | __field(int, to_proc) 114 | __field(int, to_thread) 115 | __field(int, reply) 116 | __field(unsigned int, code) 117 | __field(unsigned int, flags) 118 | ), 119 | TP_fast_assign( 120 | __entry->debug_id = t->debug_id; 121 | __entry->target_node = target_node ? target_node->debug_id : 0; 122 | __entry->to_proc = t->to_proc->pid; 123 | __entry->to_thread = t->to_thread ? t->to_thread->pid : 0; 124 | __entry->reply = reply; 125 | __entry->code = t->code; 126 | __entry->flags = t->flags; 127 | ), 128 | TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x", 129 | __entry->debug_id, __entry->target_node, 130 | __entry->to_proc, __entry->to_thread, 131 | __entry->reply, __entry->flags, __entry->code) 132 | ); 133 | 134 | TRACE_EVENT(binder_transaction_received, 135 | TP_PROTO(struct binder_transaction *t), 136 | TP_ARGS(t), 137 | 138 | TP_STRUCT__entry( 139 | __field(int, debug_id) 140 | ), 141 | TP_fast_assign( 142 | __entry->debug_id = t->debug_id; 143 | ), 144 | TP_printk("transaction=%d", __entry->debug_id) 145 | ); 146 | 147 | TRACE_EVENT(binder_transaction_node_to_ref, 148 | TP_PROTO(struct binder_transaction *t, struct binder_node *node, 149 | struct binder_ref *ref), 150 | TP_ARGS(t, node, ref), 151 | 152 | TP_STRUCT__entry( 153 | __field(int, debug_id) 154 | __field(int, node_debug_id) 155 | __field(binder_uintptr_t, node_ptr) 156 | __field(int, ref_debug_id) 157 | __field(uint32_t, ref_desc) 158 | ), 159 | TP_fast_assign( 160 | __entry->debug_id = t->debug_id; 161 | __entry->node_debug_id = node->debug_id; 162 | __entry->node_ptr = node->ptr; 163 | __entry->ref_debug_id = ref->debug_id; 164 | __entry->ref_desc = ref->desc; 165 | ), 166 | TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d", 167 | __entry->debug_id, __entry->node_debug_id, 168 | (u64)__entry->node_ptr, 169 | __entry->ref_debug_id, __entry->ref_desc) 170 | ); 171 | 172 | TRACE_EVENT(binder_transaction_ref_to_node, 173 | TP_PROTO(struct binder_transaction *t, struct binder_ref *ref), 174 | TP_ARGS(t, ref), 175 | 176 | TP_STRUCT__entry( 177 | __field(int, debug_id) 178 | __field(int, ref_debug_id) 179 | __field(uint32_t, ref_desc) 180 | __field(int, node_debug_id) 181 | __field(binder_uintptr_t, node_ptr) 182 | ), 183 | TP_fast_assign( 184 | __entry->debug_id = t->debug_id; 185 | __entry->ref_debug_id = ref->debug_id; 186 | __entry->ref_desc = ref->desc; 187 | __entry->node_debug_id = ref->node->debug_id; 188 | __entry->node_ptr = ref->node->ptr; 189 | ), 190 | TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx", 191 | __entry->debug_id, __entry->node_debug_id, 192 | __entry->ref_debug_id, __entry->ref_desc, 193 | (u64)__entry->node_ptr) 194 | ); 195 | 196 | TRACE_EVENT(binder_transaction_ref_to_ref, 197 | TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref, 198 | struct binder_ref *dest_ref), 199 | TP_ARGS(t, src_ref, dest_ref), 200 | 201 | TP_STRUCT__entry( 202 | __field(int, debug_id) 203 | __field(int, node_debug_id) 204 | __field(int, src_ref_debug_id) 205 | __field(uint32_t, src_ref_desc) 206 | __field(int, dest_ref_debug_id) 207 | __field(uint32_t, dest_ref_desc) 208 | ), 209 | TP_fast_assign( 210 | __entry->debug_id = t->debug_id; 211 | __entry->node_debug_id = src_ref->node->debug_id; 212 | __entry->src_ref_debug_id = src_ref->debug_id; 213 | __entry->src_ref_desc = src_ref->desc; 214 | __entry->dest_ref_debug_id = dest_ref->debug_id; 215 | __entry->dest_ref_desc = dest_ref->desc; 216 | ), 217 | TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ref=%d dest_desc=%d", 218 | __entry->debug_id, __entry->node_debug_id, 219 | __entry->src_ref_debug_id, __entry->src_ref_desc, 220 | __entry->dest_ref_debug_id, __entry->dest_ref_desc) 221 | ); 222 | 223 | TRACE_EVENT(binder_transaction_fd, 224 | TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd), 225 | TP_ARGS(t, src_fd, dest_fd), 226 | 227 | TP_STRUCT__entry( 228 | __field(int, debug_id) 229 | __field(int, src_fd) 230 | __field(int, dest_fd) 231 | ), 232 | TP_fast_assign( 233 | __entry->debug_id = t->debug_id; 234 | __entry->src_fd = src_fd; 235 | __entry->dest_fd = dest_fd; 236 | ), 237 | TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d", 238 | __entry->debug_id, __entry->src_fd, __entry->dest_fd) 239 | ); 240 | 241 | DECLARE_EVENT_CLASS(binder_buffer_class, 242 | TP_PROTO(struct binder_buffer *buf), 243 | TP_ARGS(buf), 244 | TP_STRUCT__entry( 245 | __field(int, debug_id) 246 | __field(size_t, data_size) 247 | __field(size_t, offsets_size) 248 | ), 249 | TP_fast_assign( 250 | __entry->debug_id = buf->debug_id; 251 | __entry->data_size = buf->data_size; 252 | __entry->offsets_size = buf->offsets_size; 253 | ), 254 | TP_printk("transaction=%d data_size=%zd offsets_size=%zd", 255 | __entry->debug_id, __entry->data_size, __entry->offsets_size) 256 | ); 257 | 258 | DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf, 259 | TP_PROTO(struct binder_buffer *buffer), 260 | TP_ARGS(buffer)); 261 | 262 | DEFINE_EVENT(binder_buffer_class, binder_transaction_buffer_release, 263 | TP_PROTO(struct binder_buffer *buffer), 264 | TP_ARGS(buffer)); 265 | 266 | DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release, 267 | TP_PROTO(struct binder_buffer *buffer), 268 | TP_ARGS(buffer)); 269 | 270 | TRACE_EVENT(binder_update_page_range, 271 | TP_PROTO(struct binder_proc *proc, bool allocate, 272 | void *start, void *end), 273 | TP_ARGS(proc, allocate, start, end), 274 | TP_STRUCT__entry( 275 | __field(int, proc) 276 | __field(bool, allocate) 277 | __field(size_t, offset) 278 | __field(size_t, size) 279 | ), 280 | TP_fast_assign( 281 | __entry->proc = proc->pid; 282 | __entry->allocate = allocate; 283 | __entry->offset = start - proc->buffer; 284 | __entry->size = end - start; 285 | ), 286 | TP_printk("proc=%d allocate=%d offset=%zu size=%zu", 287 | __entry->proc, __entry->allocate, 288 | __entry->offset, __entry->size) 289 | ); 290 | 291 | TRACE_EVENT(binder_command, 292 | TP_PROTO(uint32_t cmd), 293 | TP_ARGS(cmd), 294 | TP_STRUCT__entry( 295 | __field(uint32_t, cmd) 296 | ), 297 | TP_fast_assign( 298 | __entry->cmd = cmd; 299 | ), 300 | TP_printk("cmd=0x%x %s", 301 | __entry->cmd, 302 | _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_command_strings) ? 303 | binder_command_strings[_IOC_NR(__entry->cmd)] : 304 | "unknown") 305 | ); 306 | 307 | TRACE_EVENT(binder_return, 308 | TP_PROTO(uint32_t cmd), 309 | TP_ARGS(cmd), 310 | TP_STRUCT__entry( 311 | __field(uint32_t, cmd) 312 | ), 313 | TP_fast_assign( 314 | __entry->cmd = cmd; 315 | ), 316 | TP_printk("cmd=0x%x %s", 317 | __entry->cmd, 318 | _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_return_strings) ? 319 | binder_return_strings[_IOC_NR(__entry->cmd)] : 320 | "unknown") 321 | ); 322 | 323 | #endif /* _BINDER_TRACE_H */ 324 | 325 | #undef TRACE_INCLUDE_PATH 326 | #undef TRACE_INCLUDE_FILE 327 | #define TRACE_INCLUDE_PATH . 328 | #define TRACE_INCLUDE_FILE binder_trace 329 | #include 330 | -------------------------------------------------------------------------------- /binder/deps.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | static struct vm_struct *(*get_vm_area_ptr)(unsigned long, unsigned long) = NULL; 12 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 13 | static void (*zap_page_range_ptr)(struct vm_area_struct *, unsigned long, unsigned long) = NULL; 14 | #else 15 | static void (*zap_page_range_ptr)(struct vm_area_struct *, unsigned long, unsigned long, struct zap_details *) = NULL; 16 | #endif 17 | static int (*map_kernel_range_noflush_ptr)(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) = NULL; 18 | static void (*unmap_kernel_range_ptr)(unsigned long, unsigned long) = NULL; 19 | static struct files_struct *(*get_files_struct_ptr)(struct task_struct *) = NULL; 20 | static void (*put_files_struct_ptr)(struct files_struct *) = NULL; 21 | static struct sighand_struct *(*__lock_task_sighand_ptr)(struct task_struct *, unsigned long *) = NULL; 22 | static int (*__alloc_fd_ptr)(struct files_struct *files, unsigned start, unsigned end, unsigned flags) = NULL; 23 | static void (*__fd_install_ptr)(struct files_struct *files, unsigned int fd, struct file *file) = NULL; 24 | static int (*__close_fd_ptr)(struct files_struct *files, unsigned int fd) = NULL; 25 | static int (*can_nice_ptr)(const struct task_struct *, const int) = NULL; 26 | static int (*security_binder_set_context_mgr_ptr)(struct task_struct *mgr) = NULL; 27 | static int (*security_binder_transaction_ptr)(struct task_struct *from, struct task_struct *to) = NULL; 28 | static int (*security_binder_transfer_binder_ptr)(struct task_struct *from, struct task_struct *to) = NULL; 29 | static int (*security_binder_transfer_file_ptr)(struct task_struct *from, struct task_struct *to, struct file *file) = NULL; 30 | 31 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 32 | { 33 | if (!get_vm_area_ptr) 34 | get_vm_area_ptr = kallsyms_lookup_name("get_vm_area"); 35 | return get_vm_area_ptr(size, flags); 36 | } 37 | 38 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 39 | void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size) 40 | #else 41 | void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) 42 | #endif 43 | { 44 | if (!zap_page_range_ptr) 45 | zap_page_range_ptr = kallsyms_lookup_name("zap_page_range"); 46 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 47 | zap_page_range_ptr(vma, address, size); 48 | #else 49 | zap_page_range_ptr(vma, address, size, details); 50 | #endif 51 | } 52 | 53 | int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) 54 | { 55 | if (!map_kernel_range_noflush_ptr) 56 | map_kernel_range_noflush_ptr = kallsyms_lookup_name("map_kernel_range_noflush"); 57 | return map_kernel_range_noflush_ptr(start, size, prot, pages); 58 | } 59 | 60 | void unmap_kernel_range(unsigned long addr, unsigned long size) 61 | { 62 | if (!unmap_kernel_range_ptr) 63 | unmap_kernel_range_ptr = kallsyms_lookup_name("unmap_kernel_range"); 64 | unmap_kernel_range_ptr(addr, size); 65 | } 66 | 67 | struct files_struct *get_files_struct(struct task_struct *task) 68 | { 69 | if (!get_files_struct_ptr) 70 | get_files_struct_ptr = kallsyms_lookup_name("get_files_struct"); 71 | return get_files_struct_ptr(task); 72 | } 73 | 74 | void put_files_struct(struct files_struct *files) 75 | { 76 | if (!put_files_struct_ptr) 77 | put_files_struct_ptr = kallsyms_lookup_name("put_files_struct"); 78 | put_files_struct_ptr(files); 79 | } 80 | 81 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, unsigned long *flags) 82 | { 83 | if (!__lock_task_sighand_ptr) 84 | __lock_task_sighand_ptr = kallsyms_lookup_name("__lock_task_sighand"); 85 | return __lock_task_sighand_ptr(tsk, flags); 86 | } 87 | 88 | int __alloc_fd(struct files_struct *files, unsigned start, unsigned end, unsigned flags) 89 | { 90 | if (!__alloc_fd_ptr) 91 | __alloc_fd_ptr = kallsyms_lookup_name("__alloc_fd"); 92 | return __alloc_fd_ptr(files, start, end, flags); 93 | } 94 | 95 | void __fd_install(struct files_struct *files, unsigned int fd, struct file *file) 96 | { 97 | if (!__fd_install_ptr) 98 | __fd_install_ptr = kallsyms_lookup_name("__fd_install"); 99 | __fd_install_ptr(files, fd, file); 100 | } 101 | 102 | int __close_fd(struct files_struct *files, unsigned int fd) 103 | { 104 | if (!__close_fd_ptr) 105 | __close_fd_ptr = kallsyms_lookup_name("__close_fd_ptr"); 106 | return __close_fd_ptr(files, fd); 107 | } 108 | 109 | int can_nice(const struct task_struct *p, const int nice) 110 | { 111 | if (!can_nice_ptr) 112 | can_nice_ptr = kallsyms_lookup_name("can_nice"); 113 | return can_nice_ptr(p, nice); 114 | } 115 | 116 | int security_binder_set_context_mgr(struct task_struct *mgr) 117 | { 118 | if (!security_binder_set_context_mgr_ptr) 119 | security_binder_set_context_mgr_ptr = kallsyms_lookup_name("security_binder_set_context_mgr"); 120 | return security_binder_set_context_mgr_ptr(mgr); 121 | } 122 | 123 | int security_binder_transaction(struct task_struct *from, struct task_struct *to) 124 | { 125 | if (!security_binder_transaction_ptr) 126 | security_binder_transaction_ptr = kallsyms_lookup_name("security_binder_transaction"); 127 | return security_binder_transaction_ptr(from, to); 128 | } 129 | 130 | int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to) 131 | { 132 | if (!security_binder_transfer_binder_ptr) 133 | security_binder_transfer_binder_ptr = kallsyms_lookup_name("security_binder_transfer_binder"); 134 | return security_binder_transfer_binder_ptr(from, to); 135 | } 136 | 137 | int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file) 138 | { 139 | if (!security_binder_transfer_file_ptr) 140 | security_binder_transfer_file_ptr = kallsyms_lookup_name("security_binder_transfer_file"); 141 | return security_binder_transfer_file_ptr(from, to, file); 142 | } 143 | -------------------------------------------------------------------------------- /binder/dkms.conf: -------------------------------------------------------------------------------- 1 | PACKAGE_NAME="anbox-binder" 2 | PACKAGE_VERSION="1" 3 | CLEAN="make clean" 4 | MAKE[0]="make all KERNEL_SRC=/lib/modules/$kernelver/build" 5 | BUILT_MODULE_NAME[0]="binder_linux" 6 | DEST_MODULE_LOCATION[0]="/updates" 7 | AUTOINSTALL="yes" 8 | -------------------------------------------------------------------------------- /debian/README.Debian: -------------------------------------------------------------------------------- 1 | MODULE_NAME DKMS module for Debian 2 | 3 | This package was automatically generated by the DKMS system, 4 | for distribution on Debian based operating systems. 5 | 6 | -------------------------------------------------------------------------------- /debian/changelog: -------------------------------------------------------------------------------- 1 | anbox-modules (13) bionic; urgency=medium 2 | 3 | * staging: ashmem: Fix SIGBUS crash when traversing mmaped ashmem pages 4 | * ashmem: account for older kernel which don't have vma_set_anonymous 5 | 6 | -- Simon Fels Sun, 02 Sep 2018 13:44:17 +0200 7 | 8 | anbox-modules (12) bionic; urgency=medium 9 | 10 | * Bump version for new PPA release 11 | 12 | -- Simon Fels Thu, 05 Jul 2018 17:40:54 +0200 13 | 14 | anbox-modules (11) bionic; urgency=medium 15 | 16 | * Split kernel modules out of anbox main repository. 17 | 18 | -- Shengjing Zhu Fri, 08 Jun 2018 23:53:35 +0800 19 | 20 | anbox (10) bionic; urgency=medium 21 | 22 | * Bump version 23 | 24 | -- Simon Fels Wed, 23 May 2018 11:05:05 +0200 25 | 26 | anbox (8) artful; urgency=medium 27 | 28 | * Drop upstart/systemctl session jobs 29 | 30 | -- Simon Fels Fri, 14 Jul 2017 20:28:49 +0200 31 | 32 | anbox (7) artful; urgency=medium 33 | 34 | * Rebuild dkms modules with the kernel asked for and not the current 35 | running kernel. 36 | 37 | -- Simon Fels Thu, 15 Jun 2017 18:19:07 +0200 38 | 39 | anbox (6) artful; urgency=medium 40 | 41 | * Add anbox-common package which ships any additional files needed for 42 | anbox but can't be installed with a snap as of today. 43 | 44 | -- Simon Fels Sat, 29 Apr 2017 12:06:56 +0200 45 | 46 | anbox (5) zesty; urgency=medium 47 | 48 | * Rework packaging to also ship things we installed through the snap 49 | based installer before. 50 | 51 | -- Simon Fels Thu, 20 Apr 2017 19:58:22 +0200 52 | 53 | anbox (4) zesty; urgency=medium 54 | 55 | * Fetch dkms version from debian package in post-install step. 56 | 57 | -- Simon Fels Thu, 23 Feb 2017 19:12:15 +0100 58 | 59 | anbox (3) zesty; urgency=medium 60 | 61 | * Use correct package version in our Makefile to avoid failing module 62 | builds on the target device. 63 | 64 | -- Simon Fels Tue, 21 Feb 2017 07:36:45 +0100 65 | 66 | anbox (1) xenial; urgency=low 67 | 68 | * Initial release. 69 | 70 | -- Simon Fels Mon, 06 Feb 2017 21:43:58 +0100 71 | -------------------------------------------------------------------------------- /debian/compat: -------------------------------------------------------------------------------- 1 | 9 2 | -------------------------------------------------------------------------------- /debian/control: -------------------------------------------------------------------------------- 1 | Source: anbox-modules 2 | Section: kernel 3 | Priority: optional 4 | Maintainer: Simon Fels 5 | Uploaders: 6 | Shengjing Zhu , 7 | Build-Depends: 8 | debhelper (>= 9), 9 | dkms, 10 | Standards-Version: 4.1.4 11 | Homepage: https://anbox.io 12 | Vcs-Browser: https://github.com/anbox/anbox-modules 13 | Vcs-Git: https://github.com/anbox/anbox-modules.git 14 | 15 | Package: anbox-modules-dkms 16 | Architecture: all 17 | Depends: 18 | ${misc:Depends}, 19 | Description: Android kernel driver (binder, ashmem) in DKMS format. 20 | . 21 | This package contains a out-of-tree version of the core Android 22 | kernel functionalities binder and ashmem. 23 | -------------------------------------------------------------------------------- /debian/copyright: -------------------------------------------------------------------------------- 1 | Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 2 | Upstream-Name: anbox-modules 3 | Source: http://github.com/anbox/anbox-modules 4 | 5 | Files: * 6 | Copyright: 2008-2012 Google Inc. 7 | License: GPL-2 8 | 9 | Files: debian/* 10 | Copyright: 2016-2018, Simon Fels 11 | 2018, Shengjing Zhu 12 | License: GPL-3 13 | 14 | License: GPL-2 15 | This package is free software; you can redistribute it and/or modify 16 | it under the terms of the GNU General Public License as published by 17 | the Free Software Foundation; version 2 of the License. 18 | . 19 | This package is distributed in the hope that it will be useful, 20 | but WITHOUT ANY WARRANTY; without even the implied warranty of 21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 | GNU General Public License for more details. 23 | . 24 | You should have received a copy of the GNU General Public License 25 | along with this program. If not, see 26 | . 27 | On Debian systems, the complete text of the GNU General 28 | Public License version 2 can be found in "/usr/share/common-licenses/GPL-2". 29 | 30 | License: GPL-3 31 | This program is free software: you can redistribute it and/or modify 32 | it under the terms of the GNU General Public License as published by 33 | the Free Software Foundation, version 3 of the License. 34 | . 35 | This package is distributed in the hope that it will be useful, 36 | but WITHOUT ANY WARRANTY; without even the implied warranty of 37 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 38 | GNU General Public License for more details. 39 | . 40 | You should have received a copy of the GNU General Public License 41 | along with this program. If not, see . 42 | . 43 | On Debian systems, the complete text of the GNU General 44 | Public License version 3 can be found in "/usr/share/common-licenses/GPL-3". 45 | -------------------------------------------------------------------------------- /debian/dirs: -------------------------------------------------------------------------------- 1 | usr/src/anbox-1 2 | etc/modules-load.d/ 3 | -------------------------------------------------------------------------------- /debian/dkms: -------------------------------------------------------------------------------- 1 | PACKAGE_NAME="anbox" 2 | PACKAGE_VERSION="1" 3 | CLEAN="make -C ashmem clean && make -C binder clean" 4 | MAKE[0]="'make' -j$parallel_jobs -C ashmem KERNEL_SRC=$kernel_source_dir && make -j$parallel_jobs -C binder KERNEL_SRC=$kernel_source_dir" 5 | BUILT_MODULE_NAME[0]="ashmem_linux" 6 | BUILT_MODULE_LOCATION[0]="ashmem" 7 | DEST_MODULE_LOCATION[0]="/updates" 8 | BUILT_MODULE_NAME[1]="binder_linux" 9 | BUILT_MODULE_LOCATION[1]="binder" 10 | DEST_MODULE_LOCATION[1]="/updates" 11 | AUTOINSTALL="yes" 12 | -------------------------------------------------------------------------------- /debian/install: -------------------------------------------------------------------------------- 1 | ashmem usr/src/anbox-1 2 | binder usr/src/anbox-1 3 | anbox.conf /etc/modules-load.d/ 4 | -------------------------------------------------------------------------------- /debian/rules: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make -f 2 | # -*- makefile -*- 3 | 4 | %: 5 | dh $@ --with dkms 6 | -------------------------------------------------------------------------------- /debian/source/format: -------------------------------------------------------------------------------- 1 | 3.0 (native) 2 | -------------------------------------------------------------------------------- /debian/source/options: -------------------------------------------------------------------------------- 1 | tar-ignore = ".git" 2 | tar-ignore = "*.swp" 3 | -------------------------------------------------------------------------------- /debian/udev: -------------------------------------------------------------------------------- 1 | ../99-anbox.rules -------------------------------------------------------------------------------- /scripts/build-against-kernel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | KVER=${1:-master} 6 | CC=${2:-gcc} 7 | 8 | src_dir="../linux-${KVER}" 9 | 10 | if [ "${KVER}" = "master" ]; then 11 | archive=master.tar.gz 12 | else 13 | archive="v${KVER}.tar.gz" 14 | fi 15 | 16 | if [ ! -d "${src_dir}" ]; then 17 | wget -O - "https://github.com/torvalds/linux/archive/${archive}" | tar -C ../ -xz 18 | fi 19 | 20 | ( 21 | cd "$src_dir" || exit 1 22 | make allmodconfig CC=${CC} HOSTCC=${CC} 23 | make prepare CC=${CC} HOSTCC=${CC} 24 | make scripts CC=${CC} HOSTCC=${CC} 25 | ) 26 | 27 | ( 28 | cd ashmem || exit 1 29 | make KERNEL_SRC="../${src_dir}" CC=${CC} HOSTCC=${CC} 30 | ) 31 | 32 | ( 33 | cd binder || exit 1 34 | make KERNEL_SRC="../${src_dir}" CC=${CC} HOSTCC=${CC} 35 | ) 36 | -------------------------------------------------------------------------------- /scripts/build-with-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | docker pull ubuntu:16.04 3 | docker run -i -t -v $PWD:/anbox ubuntu:16.04 /anbox/scripts/clean-build.sh 4 | -------------------------------------------------------------------------------- /scripts/clean-build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -ex 4 | 5 | apt-get update -qq 6 | apt-get install -qq -y \ 7 | build-essential \ 8 | debhelper \ 9 | git 10 | 11 | apt-get clean 12 | 13 | cd /anbox 14 | 15 | cleanup() { 16 | # In cases where anbox comes directly from a checked out Android 17 | # build environment we miss some symlinks which are present on 18 | # the host and don't have a valid git repository in that case. 19 | if [ -d .git ] ; then 20 | git clean -fdx . 21 | git reset --hard 22 | fi 23 | } 24 | 25 | cleanup 26 | 27 | apt-get install -y build-essential curl devscripts gdebi-core dkms dh-systemd 28 | apt-get install -y $(gdebi --quiet --apt-line ./debian/control) 29 | debuild -us -uc 30 | --------------------------------------------------------------------------------