├── .gitignore ├── Kconfig ├── Makefile ├── README.md ├── ion ├── Kconfig ├── Makefile ├── compat_ion.c ├── compat_ion.h ├── ion-ioctl.c ├── ion.c ├── ion.h ├── ion_carveout_heap.c ├── ion_chunk_heap.c ├── ion_cma_heap.c ├── ion_heap.c ├── ion_of.c ├── ion_of.h ├── ion_page_pool.c ├── ion_priv.h ├── ion_system_heap.c ├── sunxi │ ├── Makefile │ ├── cache-v7.S │ ├── cache.S │ ├── cache.h │ ├── sunxi_ion.c │ ├── sunxi_ion.h │ └── sunxi_ion_priv.h └── uapi │ └── ion.h └── ve ├── Kconfig ├── Makefile ├── cedar_ve.c ├── cedar_ve.h ├── cedar_ve_priv.h └── ve_mem_list.h /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode -------------------------------------------------------------------------------- /Kconfig: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-2.0 2 | source "drivers/staging/media/sunxi/cedar/ve/Kconfig" 3 | source "drivers/staging/media/sunxi/cedar/ion/Kconfig" 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-2.0 2 | obj-$(CONFIG_VIDEO_SUNXI_CEDAR_VE) += ve/ 3 | obj-$(CONFIG_VIDEO_SUNXI_CEDAR_ION) += ion/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Allwinner CedarX Driver for Mainline Linux 5.4 2 | ### VideoEngine driver based on Allwinner H6 Homlet BSP 3 | ### Ion driver based on Google Android Ion 4 | 5 | ## Install 6 | 7 | ### Put all file in "drivers/staging/media/sunxi/cedar" 8 | 9 | ### Add source to "drivers/staging/media/sunxi/Kconfig" 10 | ``` 11 | source "drivers/staging/media/sunxi/cedar/Kconfig" 12 | ``` 13 | Demo 14 | ``` 15 | # SPDX-License-Identifier: GPL-2.0 16 | config VIDEO_SUNXI 17 | bool "Allwinner sunXi family Video Devices" 18 | depends on ARCH_SUNXI || COMPILE_TEST 19 | help 20 | If you have an Allwinner SoC based on the sunXi family, say Y. 21 | 22 | Note that this option doesn't include new drivers in the 23 | kernel: saying N will just cause Kconfig to skip all the 24 | questions about Allwinner media devices. 25 | 26 | if VIDEO_SUNXI 27 | 28 | source "drivers/staging/media/sunxi/cedrus/Kconfig" 29 | source "drivers/staging/media/sunxi/cedar/Kconfig" 30 | 31 | endif 32 | ``` 33 | 34 | ### Add obj to "drivers/staging/media/sunxi/Makefile" 35 | ``` 36 | obj-y += cedar/ 37 | ``` 38 | Demo 39 | ``` 40 | # SPDX-License-Identifier: GPL-2.0 41 | obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += cedrus/ 42 | obj-y += cedar/ 43 | ``` 44 | 45 | ## DeviceTree 46 | ### Demo for Allwinner V3 / V3s / S3L / S3 47 | ``` 48 | syscon: syscon@1c00000 { 49 | compatible = "allwinner,sun8i-v3s-system-controller", "allwinner,sun8i-h3-system-control", "syscon"; 50 | reg = <0x01c00000 0xd0>; 51 | #address-cells = <1>; 52 | #size-cells = <1>; 53 | ranges; 54 | 55 | sram_c: sram@1d00000 { 56 | compatible = "mmio-sram"; 57 | reg = <0x01d00000 0x80000>; 58 | #address-cells = <1>; 59 | #size-cells = <1>; 60 | ranges = <0 0x01d00000 0x80000>; 61 | 62 | ve_sram: sram-section@0 { 63 | compatible = "allwinner,sun8i-v3s-sram-c", "allwinner,sun4i-a10-sram-c1"; 64 | reg = <0x000000 0x80000>; 65 | }; 66 | }; 67 | }; 68 | 69 | cedarx: video-codec@1c0e000 { 70 | compatible = "allwinner,sun8i-v3-cedar"; 71 | reg = <0x01c0e000 0x1000>; 72 | clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>, <&ccu CLK_DRAM_VE>; 73 | clock-names = "ahb", "mod", "ram"; 74 | resets = <&ccu RST_BUS_VE>; 75 | interrupts = ; 76 | allwinner,sram = <&ve_sram 1>; 77 | status = "disabled"; 78 | }; 79 | 80 | ion: ion { 81 | compatible = "allwinner,sunxi-ion"; 82 | status = "disabled"; 83 | heap_cma@0{ 84 | compatible = "allwinner,cma"; 85 | heap-name = "cma"; 86 | heap-id = <0x4>; 87 | heap-base = <0x0>; 88 | heap-size = <0x0>; 89 | heap-type = "ion_cma"; 90 | }; 91 | }; 92 | ``` 93 | ### Demo for Allwinner F1C100s / F1C200s 94 | 95 | In drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c 96 | 97 | Change 98 | 99 | static SUNXI_CCU_GATE(ve_clk, "ve", "pll-audio", 0x13c, BIT(31), 0); 100 | 101 | To 102 | 103 | static SUNXI_CCU_GATE(ve_clk, "ve", "pll-ve", 0x13c, BIT(31), 0); 104 | 105 | ``` 106 | sram-controller@1c00000 { 107 | compatible = "allwinner,suniv-f1c100s-system-control", 108 | "allwinner,sun4i-a10-system-control"; 109 | reg = <0x01c00000 0x30>; 110 | #address-cells = <1>; 111 | #size-cells = <1>; 112 | ranges; 113 | 114 | sram_c: sram@1d00000 { 115 | compatible = "mmio-sram"; 116 | reg = <0x01d00000 0x80000>; 117 | #address-cells = <1>; 118 | #size-cells = <1>; 119 | ranges = <0 0x01d00000 0x80000>; 120 | 121 | ve_sram: sram-section@0 { 122 | compatible = "allwinner,suniv-f1c100s-sram-c", "allwinner,sun4i-a10-sram-c1"; 123 | reg = <0x000000 0x80000>; 124 | }; 125 | }; 126 | }; 127 | 128 | cedarx: video-codec@1c0e000 { 129 | compatible = "allwinner,suniv-f1c100s-cedar"; 130 | reg = <0x01c0e000 0x1000>; 131 | clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>, <&ccu CLK_DRAM_VE>; 132 | clock-names = "ahb", "mod", "ram"; 133 | resets = <&ccu RST_BUS_VE>; 134 | interrupts = <34>; 135 | allwinner,sram = <&ve_sram 1>; 136 | status = "disabled"; 137 | }; 138 | 139 | ion: ion { 140 | compatible = "allwinner,sunxi-ion"; 141 | status = "disabled"; 142 | heap_cma@0{ 143 | compatible = "allwinner,cma"; 144 | heap-name = "cma"; 145 | heap-id = <0x4>; 146 | heap-base = <0x0>; 147 | heap-size = <0x0>; 148 | heap-type = "ion_cma"; 149 | }; 150 | }; 151 | ``` 152 | ## Compile 153 | ### Enable Driver in 154 | ``` 155 | > Device Drivers > Staging drivers > Media staging drivers 156 | [*] Allwinner sunXi family Video Devices 157 | <*> Allwinner CedarX Video Engine Driver 158 | <*> Allwinner CedarX Ion Driver 159 | ``` 160 | ### Config "DMA Contiguous Memory Allocator" 161 | ``` 162 | > Library routines 163 | -*- DMA Contiguous Memory Allocator 164 | *** Default contiguous memory area size: *** 165 | (32) Size in Mega Bytes 166 | Selected region size (Use mega bytes value only) ---> 167 | ``` 168 | ... and here we go. 169 | 170 | ## Debug 171 | ### ION_IOC_ALLOC error / memory alloc fail 172 | Increase 173 | ``` 174 | CMA_AREAS 175 | CMA_SIZE_MBYTES 176 | ``` 177 | ### Default 178 | Report in issue. 179 | 180 | ## Userspace library 181 | https://github.com/aodzip/libcedarc 182 | -------------------------------------------------------------------------------- /ion/Kconfig: -------------------------------------------------------------------------------- 1 | config VIDEO_SUNXI_CEDAR_ION 2 | tristate "Allwinner CedarX Ion Driver" 3 | depends on ARCH_SUNXI && OF_ADDRESS && HAS_DMA && MMU 4 | select GENERIC_ALLOCATOR 5 | select DMA_SHARED_BUFFER 6 | select CMA 7 | select DMA_CMA 8 | help 9 | Allwinner libcdc compatible Ion driver. 10 | -------------------------------------------------------------------------------- /ion/Makefile: -------------------------------------------------------------------------------- 1 | obj-$(CONFIG_VIDEO_SUNXI_CEDAR_ION) += ion.o ion-ioctl.o ion_heap.o \ 2 | ion_page_pool.o ion_system_heap.o \ 3 | ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o \ 4 | ion_of.o 5 | ifdef CONFIG_COMPAT 6 | obj-$(CONFIG_VIDEO_SUNXI_CEDAR_ION) += compat_ion.o 7 | endif 8 | obj-$(CONFIG_VIDEO_SUNXI_CEDAR_ION) += sunxi/ -------------------------------------------------------------------------------- /ion/compat_ion.c: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/ion/compat_ion.c 3 | * 4 | * Copyright (C) 2013 Google, Inc. 5 | * 6 | * This software is licensed under the terms of the GNU General Public 7 | * License version 2, as published by the Free Software Foundation, and 8 | * may be copied, distributed, and modified under those terms. 9 | * 10 | * This program is distributed in the hope that it will be useful, 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | * GNU General Public License for more details. 14 | * 15 | */ 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | #include "ion.h" 22 | #include "compat_ion.h" 23 | 24 | /* See drivers/staging/android/uapi/ion.h for the definition of these structs */ 25 | struct compat_ion_allocation_data { 26 | compat_size_t len; 27 | compat_size_t align; 28 | compat_uint_t heap_id_mask; 29 | compat_uint_t flags; 30 | compat_int_t handle; 31 | }; 32 | 33 | struct compat_ion_custom_data { 34 | compat_uint_t cmd; 35 | compat_ulong_t arg; 36 | }; 37 | 38 | struct compat_ion_handle_data { 39 | compat_int_t handle; 40 | }; 41 | 42 | #define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ 43 | struct compat_ion_allocation_data) 44 | #define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \ 45 | struct compat_ion_handle_data) 46 | #define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \ 47 | struct compat_ion_custom_data) 48 | 49 | static int compat_get_ion_allocation_data( 50 | struct compat_ion_allocation_data __user *data32, 51 | struct ion_allocation_data __user *data) 52 | { 53 | compat_size_t s; 54 | compat_uint_t u; 55 | compat_int_t i; 56 | int err; 57 | 58 | err = get_user(s, &data32->len); 59 | err |= put_user(s, &data->len); 60 | err |= get_user(s, &data32->align); 61 | err |= put_user(s, &data->align); 62 | err |= get_user(u, &data32->heap_id_mask); 63 | err |= put_user(u, &data->heap_id_mask); 64 | err |= get_user(u, &data32->flags); 65 | err |= put_user(u, &data->flags); 66 | err |= get_user(i, &data32->handle); 67 | err |= put_user(i, &data->handle); 68 | 69 | return err; 70 | } 71 | 72 | static int compat_get_ion_handle_data( 73 | struct compat_ion_handle_data __user *data32, 74 | struct ion_handle_data __user *data) 75 | { 76 | compat_int_t i; 77 | int err; 78 | 79 | err = get_user(i, &data32->handle); 80 | err |= put_user(i, &data->handle); 81 | 82 | return err; 83 | } 84 | 85 | static int compat_put_ion_allocation_data( 86 | struct compat_ion_allocation_data __user *data32, 87 | struct ion_allocation_data __user *data) 88 | { 89 | compat_size_t s; 90 | compat_uint_t u; 91 | compat_int_t i; 92 | int err; 93 | 94 | err = get_user(s, &data->len); 95 | err |= put_user(s, &data32->len); 96 | err |= get_user(s, &data->align); 97 | err |= put_user(s, &data32->align); 98 | err |= get_user(u, &data->heap_id_mask); 99 | err |= put_user(u, &data32->heap_id_mask); 100 | err |= get_user(u, &data->flags); 101 | err |= put_user(u, &data32->flags); 102 | err |= get_user(i, &data->handle); 103 | err |= put_user(i, &data32->handle); 104 | 105 | return err; 106 | } 107 | 108 | static int compat_get_ion_custom_data( 109 | struct compat_ion_custom_data __user *data32, 110 | struct ion_custom_data __user *data) 111 | { 112 | compat_uint_t cmd; 113 | compat_ulong_t arg; 114 | int err; 115 | 116 | err = get_user(cmd, &data32->cmd); 117 | err |= put_user(cmd, &data->cmd); 118 | err |= get_user(arg, &data32->arg); 119 | err |= put_user(arg, &data->arg); 120 | 121 | return err; 122 | }; 123 | 124 | long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 125 | { 126 | long ret; 127 | 128 | if (!filp->f_op->unlocked_ioctl) 129 | return -ENOTTY; 130 | 131 | switch (cmd) { 132 | case COMPAT_ION_IOC_ALLOC: 133 | { 134 | struct compat_ion_allocation_data __user *data32; 135 | struct ion_allocation_data __user *data; 136 | int err; 137 | 138 | data32 = compat_ptr(arg); 139 | data = compat_alloc_user_space(sizeof(*data)); 140 | if (!data) 141 | return -EFAULT; 142 | 143 | err = compat_get_ion_allocation_data(data32, data); 144 | if (err) 145 | return err; 146 | ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC, 147 | (unsigned long)data); 148 | err = compat_put_ion_allocation_data(data32, data); 149 | return ret ? ret : err; 150 | } 151 | case COMPAT_ION_IOC_FREE: 152 | { 153 | struct compat_ion_handle_data __user *data32; 154 | struct ion_handle_data __user *data; 155 | int err; 156 | 157 | data32 = compat_ptr(arg); 158 | data = compat_alloc_user_space(sizeof(*data)); 159 | if (!data) 160 | return -EFAULT; 161 | 162 | err = compat_get_ion_handle_data(data32, data); 163 | if (err) 164 | return err; 165 | 166 | return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE, 167 | (unsigned long)data); 168 | } 169 | case COMPAT_ION_IOC_CUSTOM: { 170 | struct compat_ion_custom_data __user *data32; 171 | struct ion_custom_data __user *data; 172 | int err; 173 | 174 | data32 = compat_ptr(arg); 175 | data = compat_alloc_user_space(sizeof(*data)); 176 | if (!data) 177 | return -EFAULT; 178 | 179 | err = compat_get_ion_custom_data(data32, data); 180 | if (err) 181 | return err; 182 | 183 | return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM, 184 | (unsigned long)data); 185 | } 186 | case ION_IOC_SHARE: 187 | case ION_IOC_MAP: 188 | case ION_IOC_IMPORT: 189 | case ION_IOC_SYNC: 190 | return filp->f_op->unlocked_ioctl(filp, cmd, 191 | (unsigned long)compat_ptr(arg)); 192 | default: 193 | return -ENOIOCTLCMD; 194 | } 195 | } 196 | -------------------------------------------------------------------------------- /ion/compat_ion.h: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/ion/compat_ion.h 3 | * 4 | * Copyright (C) 2013 Google, Inc. 5 | * 6 | * This software is licensed under the terms of the GNU General Public 7 | * License version 2, as published by the Free Software Foundation, and 8 | * may be copied, distributed, and modified under those terms. 9 | * 10 | * This program is distributed in the hope that it will be useful, 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | * GNU General Public License for more details. 14 | * 15 | */ 16 | 17 | #ifndef _LINUX_COMPAT_ION_H 18 | #define _LINUX_COMPAT_ION_H 19 | 20 | #if IS_ENABLED(CONFIG_COMPAT) 21 | 22 | long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 23 | 24 | #else 25 | 26 | #define compat_ion_ioctl NULL 27 | 28 | #endif /* CONFIG_COMPAT */ 29 | #endif /* _LINUX_COMPAT_ION_H */ 30 | -------------------------------------------------------------------------------- /ion/ion-ioctl.c: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Copyright (C) 2011 Google, Inc. 4 | * 5 | * This software is licensed under the terms of the GNU General Public 6 | * License version 2, as published by the Free Software Foundation, and 7 | * may be copied, distributed, and modified under those terms. 8 | * 9 | * This program is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | * GNU General Public License for more details. 13 | * 14 | */ 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | #include "ion.h" 22 | #include "ion_priv.h" 23 | #include "compat_ion.h" 24 | 25 | union ion_ioctl_arg { 26 | struct ion_fd_data fd; 27 | struct ion_allocation_data allocation; 28 | struct ion_handle_data handle; 29 | struct ion_custom_data custom; 30 | struct ion_heap_query query; 31 | }; 32 | 33 | static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg) 34 | { 35 | int ret = 0; 36 | 37 | switch (cmd) { 38 | case ION_IOC_HEAP_QUERY: 39 | ret = arg->query.reserved0 != 0; 40 | ret |= arg->query.reserved1 != 0; 41 | ret |= arg->query.reserved2 != 0; 42 | break; 43 | default: 44 | break; 45 | } 46 | 47 | return ret ? -EINVAL : 0; 48 | } 49 | 50 | /* fix up the cases where the ioctl direction bits are incorrect */ 51 | static unsigned int ion_ioctl_dir(unsigned int cmd) 52 | { 53 | switch (cmd) { 54 | case ION_IOC_SYNC: 55 | case ION_IOC_FREE: 56 | case ION_IOC_CUSTOM: 57 | return _IOC_WRITE; 58 | default: 59 | return _IOC_DIR(cmd); 60 | } 61 | } 62 | 63 | long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 64 | { 65 | struct ion_client *client = filp->private_data; 66 | struct ion_device *dev = client->dev; 67 | struct ion_handle *cleanup_handle = NULL; 68 | int ret = 0; 69 | unsigned int dir; 70 | union ion_ioctl_arg data; 71 | 72 | dir = ion_ioctl_dir(cmd); 73 | 74 | if (_IOC_SIZE(cmd) > sizeof(data)) 75 | return -EINVAL; 76 | 77 | /* 78 | * The copy_from_user is unconditional here for both read and write 79 | * to do the validate. If there is no write for the ioctl, the 80 | * buffer is cleared 81 | */ 82 | if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) 83 | return -EFAULT; 84 | 85 | ret = validate_ioctl_arg(cmd, &data); 86 | if (ret) { 87 | pr_warn_once("%s: ioctl validate failed\n", __func__); 88 | return ret; 89 | } 90 | 91 | if (!(dir & _IOC_WRITE)) 92 | memset(&data, 0, sizeof(data)); 93 | 94 | switch (cmd) { 95 | case ION_IOC_ALLOC: 96 | { 97 | struct ion_handle *handle; 98 | 99 | handle = ion_alloc(client, data.allocation.len, 100 | data.allocation.align, 101 | data.allocation.heap_id_mask, 102 | data.allocation.flags); 103 | if (IS_ERR(handle)) 104 | return PTR_ERR(handle); 105 | 106 | data.allocation.handle = handle->id; 107 | 108 | cleanup_handle = handle; 109 | break; 110 | } 111 | case ION_IOC_FREE: 112 | { 113 | struct ion_handle *handle; 114 | 115 | mutex_lock(&client->lock); 116 | handle = ion_handle_get_by_id_nolock(client, data.handle.handle); 117 | if (IS_ERR(handle)) { 118 | mutex_unlock(&client->lock); 119 | return PTR_ERR(handle); 120 | } 121 | ion_free_nolock(client, handle); 122 | ion_handle_put_nolock(handle); 123 | mutex_unlock(&client->lock); 124 | break; 125 | } 126 | case ION_IOC_SHARE: 127 | case ION_IOC_MAP: 128 | { 129 | struct ion_handle *handle; 130 | 131 | mutex_lock(&client->lock); 132 | handle = ion_handle_get_by_id_nolock(client, data.handle.handle); 133 | if (IS_ERR(handle)) { 134 | mutex_unlock(&client->lock); 135 | return PTR_ERR(handle); 136 | } 137 | data.fd.fd = ion_share_dma_buf_fd_nolock(client, handle); 138 | ion_handle_put_nolock(handle); 139 | mutex_unlock(&client->lock); 140 | if (data.fd.fd < 0) 141 | ret = data.fd.fd; 142 | break; 143 | } 144 | case ION_IOC_IMPORT: 145 | { 146 | struct ion_handle *handle; 147 | 148 | handle = ion_import_dma_buf_fd(client, data.fd.fd); 149 | if (IS_ERR(handle)) 150 | ret = PTR_ERR(handle); 151 | else 152 | data.handle.handle = handle->id; 153 | break; 154 | } 155 | case ION_IOC_SYNC: 156 | { 157 | ret = ion_sync_for_device(client, data.fd.fd); 158 | break; 159 | } 160 | case ION_IOC_CUSTOM: 161 | { 162 | if (!dev->custom_ioctl) 163 | return -ENOTTY; 164 | ret = dev->custom_ioctl(client, data.custom.cmd, 165 | data.custom.arg); 166 | break; 167 | } 168 | case ION_IOC_HEAP_QUERY: 169 | ret = ion_query_heaps(client, &data.query); 170 | break; 171 | 172 | case 5: // Stupid Allwinner libcdc 173 | if (!dev->custom_ioctl) 174 | return -ENOTTY; 175 | ret = dev->custom_ioctl(client, cmd, arg); 176 | break; 177 | 178 | default: 179 | return -ENOTTY; 180 | } 181 | 182 | if (dir & _IOC_READ) { 183 | if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { 184 | if (cleanup_handle) 185 | ion_free(client, cleanup_handle); 186 | return -EFAULT; 187 | } 188 | } 189 | return ret; 190 | } 191 | -------------------------------------------------------------------------------- /ion/ion.c: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * drivers/staging/android/ion/ion.c 4 | * 5 | * Copyright (C) 2011 Google, Inc. 6 | * 7 | * This software is licensed under the terms of the GNU General Public 8 | * License version 2, as published by the Free Software Foundation, and 9 | * may be copied, distributed, and modified under those terms. 10 | * 11 | * This program is distributed in the hope that it will be useful, 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | * GNU General Public License for more details. 15 | * 16 | */ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | 42 | #include "ion.h" 43 | #include "ion_priv.h" 44 | #include "compat_ion.h" 45 | 46 | bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) 47 | { 48 | return (buffer->flags & ION_FLAG_CACHED) && 49 | !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); 50 | } 51 | 52 | bool ion_buffer_cached(struct ion_buffer *buffer) 53 | { 54 | return !!(buffer->flags & ION_FLAG_CACHED); 55 | } 56 | 57 | static inline struct page *ion_buffer_page(struct page *page) 58 | { 59 | return (struct page *)((unsigned long)page & ~(1UL)); 60 | } 61 | 62 | static inline bool ion_buffer_page_is_dirty(struct page *page) 63 | { 64 | return !!((unsigned long)page & 1UL); 65 | } 66 | 67 | static inline void ion_buffer_page_dirty(struct page **page) 68 | { 69 | *page = (struct page *)((unsigned long)(*page) | 1UL); 70 | } 71 | 72 | static inline void ion_buffer_page_clean(struct page **page) 73 | { 74 | *page = (struct page *)((unsigned long)(*page) & ~(1UL)); 75 | } 76 | 77 | /* this function should only be called while dev->lock is held */ 78 | static void ion_buffer_add(struct ion_device *dev, 79 | struct ion_buffer *buffer) 80 | { 81 | struct rb_node **p = &dev->buffers.rb_node; 82 | struct rb_node *parent = NULL; 83 | struct ion_buffer *entry; 84 | 85 | while (*p) { 86 | parent = *p; 87 | entry = rb_entry(parent, struct ion_buffer, node); 88 | 89 | if (buffer < entry) { 90 | p = &(*p)->rb_left; 91 | } else if (buffer > entry) { 92 | p = &(*p)->rb_right; 93 | } else { 94 | pr_err("%s: buffer already found.", __func__); 95 | BUG(); 96 | } 97 | } 98 | 99 | rb_link_node(&buffer->node, parent, p); 100 | rb_insert_color(&buffer->node, &dev->buffers); 101 | } 102 | 103 | /* this function should only be called while dev->lock is held */ 104 | static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, 105 | struct ion_device *dev, 106 | unsigned long len, 107 | unsigned long align, 108 | unsigned long flags) 109 | { 110 | struct ion_buffer *buffer; 111 | struct sg_table *table; 112 | struct scatterlist *sg; 113 | int i, ret; 114 | 115 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 116 | if (!buffer) 117 | return ERR_PTR(-ENOMEM); 118 | 119 | buffer->heap = heap; 120 | buffer->flags = flags; 121 | kref_init(&buffer->ref); 122 | 123 | ret = heap->ops->allocate(heap, buffer, len, align, flags); 124 | 125 | if (ret) { 126 | if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) 127 | goto err2; 128 | 129 | ion_heap_freelist_drain(heap, 0); 130 | ret = heap->ops->allocate(heap, buffer, len, align, 131 | flags); 132 | if (ret) 133 | goto err2; 134 | } 135 | 136 | if (buffer->sg_table == NULL) { 137 | WARN_ONCE(1, "This heap needs to set the sgtable"); 138 | ret = -EINVAL; 139 | goto err1; 140 | } 141 | 142 | table = buffer->sg_table; 143 | buffer->dev = dev; 144 | buffer->size = len; 145 | 146 | if (ion_buffer_fault_user_mappings(buffer)) { 147 | int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; 148 | struct scatterlist *sg; 149 | int i, j, k = 0; 150 | 151 | buffer->pages = vmalloc(sizeof(struct page *) * num_pages); 152 | if (!buffer->pages) { 153 | ret = -ENOMEM; 154 | goto err1; 155 | } 156 | 157 | for_each_sg(table->sgl, sg, table->nents, i) { 158 | struct page *page = sg_page(sg); 159 | 160 | for (j = 0; j < sg->length / PAGE_SIZE; j++) 161 | buffer->pages[k++] = page++; 162 | } 163 | } 164 | 165 | buffer->dev = dev; 166 | buffer->size = len; 167 | INIT_LIST_HEAD(&buffer->vmas); 168 | mutex_init(&buffer->lock); 169 | /* 170 | * this will set up dma addresses for the sglist -- it is not 171 | * technically correct as per the dma api -- a specific 172 | * device isn't really taking ownership here. However, in practice on 173 | * our systems the only dma_address space is physical addresses. 174 | * Additionally, we can't afford the overhead of invalidating every 175 | * allocation via dma_map_sg. The implicit contract here is that 176 | * memory coming from the heaps is ready for dma, ie if it has a 177 | * cached mapping that mapping has been invalidated 178 | */ 179 | for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { 180 | sg_dma_address(sg) = sg_phys(sg); 181 | sg_dma_len(sg) = sg->length; 182 | } 183 | mutex_lock(&dev->buffer_lock); 184 | ion_buffer_add(dev, buffer); 185 | mutex_unlock(&dev->buffer_lock); 186 | return buffer; 187 | 188 | err1: 189 | heap->ops->free(buffer); 190 | err2: 191 | kfree(buffer); 192 | return ERR_PTR(ret); 193 | } 194 | 195 | void ion_buffer_destroy(struct ion_buffer *buffer) 196 | { 197 | if (buffer->kmap_cnt > 0) { 198 | pr_warn_once("%s: buffer still mapped in the kernel\n", 199 | __func__); 200 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 201 | } 202 | buffer->heap->ops->free(buffer); 203 | vfree(buffer->pages); 204 | kfree(buffer); 205 | } 206 | 207 | static void _ion_buffer_destroy(struct kref *kref) 208 | { 209 | struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); 210 | struct ion_heap *heap = buffer->heap; 211 | struct ion_device *dev = buffer->dev; 212 | 213 | mutex_lock(&dev->buffer_lock); 214 | rb_erase(&buffer->node, &dev->buffers); 215 | mutex_unlock(&dev->buffer_lock); 216 | 217 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 218 | ion_heap_freelist_add(heap, buffer); 219 | else 220 | ion_buffer_destroy(buffer); 221 | } 222 | 223 | static void ion_buffer_get(struct ion_buffer *buffer) 224 | { 225 | kref_get(&buffer->ref); 226 | } 227 | 228 | static int ion_buffer_put(struct ion_buffer *buffer) 229 | { 230 | return kref_put(&buffer->ref, _ion_buffer_destroy); 231 | } 232 | 233 | static void ion_buffer_add_to_handle(struct ion_buffer *buffer) 234 | { 235 | mutex_lock(&buffer->lock); 236 | buffer->handle_count++; 237 | mutex_unlock(&buffer->lock); 238 | } 239 | 240 | static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) 241 | { 242 | /* 243 | * when a buffer is removed from a handle, if it is not in 244 | * any other handles, copy the taskcomm and the pid of the 245 | * process it's being removed from into the buffer. At this 246 | * point there will be no way to track what processes this buffer is 247 | * being used by, it only exists as a dma_buf file descriptor. 248 | * The taskcomm and pid can provide a debug hint as to where this fd 249 | * is in the system 250 | */ 251 | mutex_lock(&buffer->lock); 252 | buffer->handle_count--; 253 | BUG_ON(buffer->handle_count < 0); 254 | if (!buffer->handle_count) { 255 | struct task_struct *task; 256 | 257 | task = current->group_leader; 258 | get_task_comm(buffer->task_comm, task); 259 | buffer->pid = task_pid_nr(task); 260 | } 261 | mutex_unlock(&buffer->lock); 262 | } 263 | 264 | static struct ion_handle *ion_handle_create(struct ion_client *client, 265 | struct ion_buffer *buffer) 266 | { 267 | struct ion_handle *handle; 268 | 269 | handle = kzalloc(sizeof(*handle), GFP_KERNEL); 270 | if (!handle) 271 | return ERR_PTR(-ENOMEM); 272 | kref_init(&handle->ref); 273 | RB_CLEAR_NODE(&handle->node); 274 | handle->client = client; 275 | ion_buffer_get(buffer); 276 | ion_buffer_add_to_handle(buffer); 277 | handle->buffer = buffer; 278 | 279 | return handle; 280 | } 281 | 282 | static void ion_handle_kmap_put(struct ion_handle *); 283 | 284 | static void ion_handle_destroy(struct kref *kref) 285 | { 286 | struct ion_handle *handle = container_of(kref, struct ion_handle, ref); 287 | struct ion_client *client = handle->client; 288 | struct ion_buffer *buffer = handle->buffer; 289 | 290 | mutex_lock(&buffer->lock); 291 | while (handle->kmap_cnt) 292 | ion_handle_kmap_put(handle); 293 | mutex_unlock(&buffer->lock); 294 | 295 | idr_remove(&client->idr, handle->id); 296 | if (!RB_EMPTY_NODE(&handle->node)) 297 | rb_erase(&handle->node, &client->handles); 298 | 299 | ion_buffer_remove_from_handle(buffer); 300 | ion_buffer_put(buffer); 301 | 302 | kfree(handle); 303 | } 304 | 305 | static void ion_handle_get(struct ion_handle *handle) 306 | { 307 | kref_get(&handle->ref); 308 | } 309 | 310 | /* Must hold the client lock */ 311 | static struct ion_handle *ion_handle_get_check_overflow( 312 | struct ion_handle *handle) 313 | { 314 | if (atomic_read(&handle->ref.refcount.refs) + 1 == 0) 315 | return ERR_PTR(-EOVERFLOW); 316 | ion_handle_get(handle); 317 | return handle; 318 | } 319 | 320 | int ion_handle_put_nolock(struct ion_handle *handle) 321 | { 322 | return kref_put(&handle->ref, ion_handle_destroy); 323 | } 324 | 325 | int ion_handle_put(struct ion_handle *handle) 326 | { 327 | struct ion_client *client = handle->client; 328 | int ret; 329 | 330 | mutex_lock(&client->lock); 331 | ret = ion_handle_put_nolock(handle); 332 | mutex_unlock(&client->lock); 333 | 334 | return ret; 335 | } 336 | 337 | static struct ion_handle *ion_handle_lookup(struct ion_client *client, 338 | struct ion_buffer *buffer) 339 | { 340 | struct rb_node *n = client->handles.rb_node; 341 | 342 | while (n) { 343 | struct ion_handle *entry = rb_entry(n, struct ion_handle, node); 344 | 345 | if (buffer < entry->buffer) 346 | n = n->rb_left; 347 | else if (buffer > entry->buffer) 348 | n = n->rb_right; 349 | else 350 | return entry; 351 | } 352 | return ERR_PTR(-EINVAL); 353 | } 354 | 355 | struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, 356 | int id) 357 | { 358 | struct ion_handle *handle; 359 | 360 | handle = idr_find(&client->idr, id); 361 | if (handle) 362 | return ion_handle_get_check_overflow(handle); 363 | 364 | return ERR_PTR(-EINVAL); 365 | } 366 | 367 | static bool ion_handle_validate(struct ion_client *client, 368 | struct ion_handle *handle) 369 | { 370 | WARN_ON(!mutex_is_locked(&client->lock)); 371 | return idr_find(&client->idr, handle->id) == handle; 372 | } 373 | 374 | static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) 375 | { 376 | int id; 377 | struct rb_node **p = &client->handles.rb_node; 378 | struct rb_node *parent = NULL; 379 | struct ion_handle *entry; 380 | 381 | id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); 382 | if (id < 0) 383 | return id; 384 | 385 | handle->id = id; 386 | 387 | while (*p) { 388 | parent = *p; 389 | entry = rb_entry(parent, struct ion_handle, node); 390 | 391 | if (handle->buffer < entry->buffer) 392 | p = &(*p)->rb_left; 393 | else if (handle->buffer > entry->buffer) 394 | p = &(*p)->rb_right; 395 | else 396 | WARN(1, "%s: buffer already found.", __func__); 397 | } 398 | 399 | rb_link_node(&handle->node, parent, p); 400 | rb_insert_color(&handle->node, &client->handles); 401 | 402 | return 0; 403 | } 404 | 405 | struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 406 | size_t align, unsigned int heap_id_mask, 407 | unsigned int flags) 408 | { 409 | struct ion_handle *handle; 410 | struct ion_device *dev = client->dev; 411 | struct ion_buffer *buffer = NULL; 412 | struct ion_heap *heap; 413 | int ret; 414 | 415 | pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, 416 | len, align, heap_id_mask, flags); 417 | /* 418 | * traverse the list of heaps available in this system in priority 419 | * order. If the heap type is supported by the client, and matches the 420 | * request of the caller allocate from it. Repeat until allocate has 421 | * succeeded or all heaps have been tried 422 | */ 423 | len = PAGE_ALIGN(len); 424 | 425 | if (!len) 426 | return ERR_PTR(-EINVAL); 427 | 428 | down_read(&dev->lock); 429 | plist_for_each_entry(heap, &dev->heaps, node) { 430 | /* if the caller didn't specify this heap id */ 431 | if (!((1 << heap->id) & heap_id_mask)) 432 | continue; 433 | buffer = ion_buffer_create(heap, dev, len, align, flags); 434 | if (!IS_ERR(buffer)) 435 | break; 436 | } 437 | up_read(&dev->lock); 438 | 439 | if (buffer == NULL) 440 | return ERR_PTR(-ENODEV); 441 | 442 | if (IS_ERR(buffer)) 443 | return ERR_CAST(buffer); 444 | 445 | handle = ion_handle_create(client, buffer); 446 | 447 | /* 448 | * ion_buffer_create will create a buffer with a ref_cnt of 1, 449 | * and ion_handle_create will take a second reference, drop one here 450 | */ 451 | ion_buffer_put(buffer); 452 | 453 | if (IS_ERR(handle)) 454 | return handle; 455 | 456 | mutex_lock(&client->lock); 457 | ret = ion_handle_add(client, handle); 458 | mutex_unlock(&client->lock); 459 | if (ret) { 460 | ion_handle_put(handle); 461 | handle = ERR_PTR(ret); 462 | } 463 | 464 | return handle; 465 | } 466 | EXPORT_SYMBOL(ion_alloc); 467 | 468 | void ion_free_nolock(struct ion_client *client, 469 | struct ion_handle *handle) 470 | { 471 | if (!ion_handle_validate(client, handle)) { 472 | WARN(1, "%s: invalid handle passed to free.\n", __func__); 473 | return; 474 | } 475 | ion_handle_put_nolock(handle); 476 | } 477 | 478 | void ion_free(struct ion_client *client, struct ion_handle *handle) 479 | { 480 | BUG_ON(client != handle->client); 481 | 482 | mutex_lock(&client->lock); 483 | ion_free_nolock(client, handle); 484 | mutex_unlock(&client->lock); 485 | } 486 | EXPORT_SYMBOL(ion_free); 487 | 488 | static void *ion_buffer_kmap_get(struct ion_buffer *buffer) 489 | { 490 | void *vaddr; 491 | 492 | if (buffer->kmap_cnt) { 493 | buffer->kmap_cnt++; 494 | return buffer->vaddr; 495 | } 496 | vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); 497 | if (WARN_ONCE(vaddr == NULL, 498 | "heap->ops->map_kernel should return ERR_PTR on error")) 499 | return ERR_PTR(-EINVAL); 500 | if (IS_ERR(vaddr)) 501 | return vaddr; 502 | buffer->vaddr = vaddr; 503 | buffer->kmap_cnt++; 504 | return vaddr; 505 | } 506 | 507 | static void *ion_handle_kmap_get(struct ion_handle *handle) 508 | { 509 | struct ion_buffer *buffer = handle->buffer; 510 | void *vaddr; 511 | 512 | if (handle->kmap_cnt) { 513 | handle->kmap_cnt++; 514 | return buffer->vaddr; 515 | } 516 | vaddr = ion_buffer_kmap_get(buffer); 517 | if (IS_ERR(vaddr)) 518 | return vaddr; 519 | handle->kmap_cnt++; 520 | return vaddr; 521 | } 522 | 523 | static void ion_buffer_kmap_put(struct ion_buffer *buffer) 524 | { 525 | buffer->kmap_cnt--; 526 | if (!buffer->kmap_cnt) { 527 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 528 | buffer->vaddr = NULL; 529 | } 530 | } 531 | 532 | static void ion_handle_kmap_put(struct ion_handle *handle) 533 | { 534 | struct ion_buffer *buffer = handle->buffer; 535 | 536 | if (!handle->kmap_cnt) { 537 | WARN(1, "%s: Double unmap detected! bailing...\n", __func__); 538 | return; 539 | } 540 | handle->kmap_cnt--; 541 | if (!handle->kmap_cnt) 542 | ion_buffer_kmap_put(buffer); 543 | } 544 | 545 | void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) 546 | { 547 | struct ion_buffer *buffer; 548 | void *vaddr; 549 | 550 | mutex_lock(&client->lock); 551 | if (!ion_handle_validate(client, handle)) { 552 | pr_err("%s: invalid handle passed to map_kernel.\n", 553 | __func__); 554 | mutex_unlock(&client->lock); 555 | return ERR_PTR(-EINVAL); 556 | } 557 | 558 | buffer = handle->buffer; 559 | 560 | if (!handle->buffer->heap->ops->map_kernel) { 561 | pr_err("%s: map_kernel is not implemented by this heap.\n", 562 | __func__); 563 | mutex_unlock(&client->lock); 564 | return ERR_PTR(-ENODEV); 565 | } 566 | 567 | mutex_lock(&buffer->lock); 568 | vaddr = ion_handle_kmap_get(handle); 569 | mutex_unlock(&buffer->lock); 570 | mutex_unlock(&client->lock); 571 | return vaddr; 572 | } 573 | EXPORT_SYMBOL(ion_map_kernel); 574 | 575 | void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) 576 | { 577 | struct ion_buffer *buffer; 578 | 579 | mutex_lock(&client->lock); 580 | buffer = handle->buffer; 581 | mutex_lock(&buffer->lock); 582 | ion_handle_kmap_put(handle); 583 | mutex_unlock(&buffer->lock); 584 | mutex_unlock(&client->lock); 585 | } 586 | EXPORT_SYMBOL(ion_unmap_kernel); 587 | 588 | static struct mutex debugfs_mutex; 589 | static struct rb_root *ion_root_client; 590 | static int is_client_alive(struct ion_client *client) 591 | { 592 | struct rb_node *node; 593 | struct ion_client *tmp; 594 | struct ion_device *dev; 595 | 596 | node = ion_root_client->rb_node; 597 | dev = container_of(ion_root_client, struct ion_device, clients); 598 | 599 | down_read(&dev->lock); 600 | while (node) { 601 | tmp = rb_entry(node, struct ion_client, node); 602 | if (client < tmp) { 603 | node = node->rb_left; 604 | } else if (client > tmp) { 605 | node = node->rb_right; 606 | } else { 607 | up_read(&dev->lock); 608 | return 1; 609 | } 610 | } 611 | 612 | up_read(&dev->lock); 613 | return 0; 614 | } 615 | 616 | static int ion_debug_client_show(struct seq_file *s, void *unused) 617 | { 618 | struct ion_client *client = s->private; 619 | struct rb_node *n; 620 | size_t sizes[ION_NUM_HEAP_IDS] = {0}; 621 | const char *names[ION_NUM_HEAP_IDS] = {NULL}; 622 | int i; 623 | 624 | mutex_lock(&debugfs_mutex); 625 | if (!is_client_alive(client)) { 626 | seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n", 627 | client); 628 | mutex_unlock(&debugfs_mutex); 629 | return 0; 630 | } 631 | 632 | mutex_lock(&client->lock); 633 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { 634 | struct ion_handle *handle = rb_entry(n, struct ion_handle, 635 | node); 636 | unsigned int id = handle->buffer->heap->id; 637 | 638 | if (!names[id]) 639 | names[id] = handle->buffer->heap->name; 640 | sizes[id] += handle->buffer->size; 641 | } 642 | mutex_unlock(&client->lock); 643 | mutex_unlock(&debugfs_mutex); 644 | 645 | seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); 646 | for (i = 0; i < ION_NUM_HEAP_IDS; i++) { 647 | if (!names[i]) 648 | continue; 649 | seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); 650 | } 651 | return 0; 652 | } 653 | 654 | static int ion_debug_client_open(struct inode *inode, struct file *file) 655 | { 656 | return single_open(file, ion_debug_client_show, inode->i_private); 657 | } 658 | 659 | static const struct file_operations debug_client_fops = { 660 | .open = ion_debug_client_open, 661 | .read = seq_read, 662 | .llseek = seq_lseek, 663 | .release = single_release, 664 | }; 665 | 666 | static int ion_get_client_serial(const struct rb_root *root, 667 | const unsigned char *name) 668 | { 669 | int serial = -1; 670 | struct rb_node *node; 671 | 672 | for (node = rb_first(root); node; node = rb_next(node)) { 673 | struct ion_client *client = rb_entry(node, struct ion_client, 674 | node); 675 | 676 | if (strcmp(client->name, name)) 677 | continue; 678 | serial = max(serial, client->display_serial); 679 | } 680 | return serial + 1; 681 | } 682 | 683 | struct ion_client *ion_client_create(struct ion_device *dev, 684 | const char *name) 685 | { 686 | struct ion_client *client; 687 | struct task_struct *task; 688 | struct rb_node **p; 689 | struct rb_node *parent = NULL; 690 | struct ion_client *entry; 691 | pid_t pid; 692 | 693 | if (!name) { 694 | pr_err("%s: Name cannot be null\n", __func__); 695 | return ERR_PTR(-EINVAL); 696 | } 697 | 698 | get_task_struct(current->group_leader); 699 | task_lock(current->group_leader); 700 | pid = task_pid_nr(current->group_leader); 701 | /* 702 | * don't bother to store task struct for kernel threads, 703 | * they can't be killed anyway 704 | */ 705 | if (current->group_leader->flags & PF_KTHREAD) { 706 | put_task_struct(current->group_leader); 707 | task = NULL; 708 | } else { 709 | task = current->group_leader; 710 | } 711 | task_unlock(current->group_leader); 712 | 713 | client = kzalloc(sizeof(*client), GFP_KERNEL); 714 | if (!client) 715 | goto err_put_task_struct; 716 | 717 | client->dev = dev; 718 | client->handles = RB_ROOT; 719 | idr_init(&client->idr); 720 | mutex_init(&client->lock); 721 | client->task = task; 722 | client->pid = pid; 723 | client->name = kstrdup(name, GFP_KERNEL); 724 | if (!client->name) 725 | goto err_free_client; 726 | 727 | down_write(&dev->lock); 728 | client->display_serial = ion_get_client_serial(&dev->clients, name); 729 | client->display_name = kasprintf( 730 | GFP_KERNEL, "%s-%d", name, client->display_serial); 731 | if (!client->display_name) { 732 | up_write(&dev->lock); 733 | goto err_free_client_name; 734 | } 735 | p = &dev->clients.rb_node; 736 | while (*p) { 737 | parent = *p; 738 | entry = rb_entry(parent, struct ion_client, node); 739 | 740 | if (client < entry) 741 | p = &(*p)->rb_left; 742 | else if (client > entry) 743 | p = &(*p)->rb_right; 744 | } 745 | rb_link_node(&client->node, parent, p); 746 | rb_insert_color(&client->node, &dev->clients); 747 | 748 | client->debug_root = debugfs_create_file(client->display_name, 0664, 749 | dev->clients_debug_root, 750 | client, &debug_client_fops); 751 | if (!client->debug_root) { 752 | char buf[256], *path; 753 | 754 | path = dentry_path(dev->clients_debug_root, buf, 256); 755 | pr_err("Failed to create client debugfs at %s/%s\n", 756 | path, client->display_name); 757 | } 758 | 759 | up_write(&dev->lock); 760 | 761 | return client; 762 | 763 | err_free_client_name: 764 | kfree(client->name); 765 | err_free_client: 766 | kfree(client); 767 | err_put_task_struct: 768 | if (task) 769 | put_task_struct(current->group_leader); 770 | return ERR_PTR(-ENOMEM); 771 | } 772 | EXPORT_SYMBOL(ion_client_create); 773 | 774 | void ion_client_destroy(struct ion_client *client) 775 | { 776 | struct ion_device *dev = client->dev; 777 | struct rb_node *n; 778 | 779 | pr_debug("%s: %d\n", __func__, __LINE__); 780 | mutex_lock(&debugfs_mutex); 781 | while ((n = rb_first(&client->handles))) { 782 | struct ion_handle *handle = rb_entry(n, struct ion_handle, 783 | node); 784 | ion_handle_destroy(&handle->ref); 785 | } 786 | 787 | idr_destroy(&client->idr); 788 | 789 | down_write(&dev->lock); 790 | if (client->task) 791 | put_task_struct(client->task); 792 | rb_erase(&client->node, &dev->clients); 793 | debugfs_remove_recursive(client->debug_root); 794 | up_write(&dev->lock); 795 | 796 | kfree(client->display_name); 797 | kfree(client->name); 798 | kfree(client); 799 | mutex_unlock(&debugfs_mutex); 800 | } 801 | EXPORT_SYMBOL(ion_client_destroy); 802 | 803 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 804 | struct device *dev, 805 | enum dma_data_direction direction); 806 | 807 | static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, 808 | enum dma_data_direction direction) 809 | { 810 | struct dma_buf *dmabuf = attachment->dmabuf; 811 | struct ion_buffer *buffer = dmabuf->priv; 812 | 813 | ion_buffer_sync_for_device(buffer, attachment->dev, direction); 814 | return buffer->sg_table; 815 | } 816 | 817 | static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, 818 | struct sg_table *table, 819 | enum dma_data_direction direction) 820 | { 821 | } 822 | 823 | void ion_pages_sync_for_device(struct device *dev, struct page *page, 824 | size_t size, enum dma_data_direction dir) 825 | { 826 | struct scatterlist sg; 827 | 828 | sg_init_table(&sg, 1); 829 | sg_set_page(&sg, page, size, 0); 830 | /* 831 | * This is not correct - sg_dma_address needs a dma_addr_t that is valid 832 | * for the targeted device, but this works on the currently targeted 833 | * hardware. 834 | */ 835 | sg_dma_address(&sg) = page_to_phys(page); 836 | dma_sync_sg_for_device(dev, &sg, 1, dir); 837 | } 838 | 839 | struct ion_vma_list { 840 | struct list_head list; 841 | struct vm_area_struct *vma; 842 | }; 843 | 844 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 845 | struct device *dev, 846 | enum dma_data_direction dir) 847 | { 848 | struct ion_vma_list *vma_list; 849 | int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; 850 | int i; 851 | 852 | pr_debug("%s: syncing for device %s\n", __func__, 853 | dev ? dev_name(dev) : "null"); 854 | 855 | if (!ion_buffer_fault_user_mappings(buffer)) 856 | return; 857 | 858 | mutex_lock(&buffer->lock); 859 | for (i = 0; i < pages; i++) { 860 | struct page *page = buffer->pages[i]; 861 | 862 | if (ion_buffer_page_is_dirty(page)) 863 | ion_pages_sync_for_device(dev, ion_buffer_page(page), 864 | PAGE_SIZE, dir); 865 | 866 | ion_buffer_page_clean(buffer->pages + i); 867 | } 868 | list_for_each_entry(vma_list, &buffer->vmas, list) { 869 | struct vm_area_struct *vma = vma_list->vma; 870 | 871 | zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start); 872 | } 873 | mutex_unlock(&buffer->lock); 874 | } 875 | 876 | static vm_fault_t ion_vm_fault(struct vm_fault *vmf) 877 | { 878 | struct vm_area_struct *vma = vmf->vma; 879 | struct ion_buffer *buffer = vma->vm_private_data; 880 | unsigned long pfn; 881 | int ret; 882 | 883 | mutex_lock(&buffer->lock); 884 | ion_buffer_page_dirty(buffer->pages + vmf->pgoff); 885 | BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); 886 | 887 | pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); 888 | ret = vmf_insert_pfn(vma, (unsigned long)vmf->address, pfn); 889 | mutex_unlock(&buffer->lock); 890 | if (ret) 891 | return VM_FAULT_ERROR; 892 | 893 | return VM_FAULT_NOPAGE; 894 | } 895 | 896 | static void ion_vm_open(struct vm_area_struct *vma) 897 | { 898 | struct ion_buffer *buffer = vma->vm_private_data; 899 | struct ion_vma_list *vma_list; 900 | 901 | vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL); 902 | if (!vma_list) 903 | return; 904 | vma_list->vma = vma; 905 | mutex_lock(&buffer->lock); 906 | list_add(&vma_list->list, &buffer->vmas); 907 | mutex_unlock(&buffer->lock); 908 | pr_debug("%s: adding %p\n", __func__, vma); 909 | } 910 | 911 | static void ion_vm_close(struct vm_area_struct *vma) 912 | { 913 | struct ion_buffer *buffer = vma->vm_private_data; 914 | struct ion_vma_list *vma_list, *tmp; 915 | 916 | pr_debug("%s\n", __func__); 917 | mutex_lock(&buffer->lock); 918 | list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { 919 | if (vma_list->vma != vma) 920 | continue; 921 | list_del(&vma_list->list); 922 | kfree(vma_list); 923 | pr_debug("%s: deleting %p\n", __func__, vma); 924 | break; 925 | } 926 | mutex_unlock(&buffer->lock); 927 | } 928 | 929 | static const struct vm_operations_struct ion_vma_ops = { 930 | .open = ion_vm_open, 931 | .close = ion_vm_close, 932 | .fault = ion_vm_fault, 933 | }; 934 | 935 | static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 936 | { 937 | struct ion_buffer *buffer = dmabuf->priv; 938 | int ret = 0; 939 | 940 | if (!buffer->heap->ops->map_user) { 941 | pr_err("%s: this heap does not define a method for mapping to userspace\n", 942 | __func__); 943 | return -EINVAL; 944 | } 945 | 946 | if (ion_buffer_fault_user_mappings(buffer)) { 947 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | 948 | VM_DONTDUMP; 949 | vma->vm_private_data = buffer; 950 | vma->vm_ops = &ion_vma_ops; 951 | ion_vm_open(vma); 952 | return 0; 953 | } 954 | 955 | if (!(buffer->flags & ION_FLAG_CACHED)) 956 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 957 | 958 | mutex_lock(&buffer->lock); 959 | /* now map it to userspace */ 960 | ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); 961 | mutex_unlock(&buffer->lock); 962 | 963 | if (ret) 964 | pr_err("%s: failure mapping buffer to userspace\n", 965 | __func__); 966 | 967 | return ret; 968 | } 969 | 970 | static void ion_dma_buf_release(struct dma_buf *dmabuf) 971 | { 972 | struct ion_buffer *buffer = dmabuf->priv; 973 | 974 | ion_buffer_put(buffer); 975 | } 976 | 977 | static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) 978 | { 979 | struct ion_buffer *buffer = dmabuf->priv; 980 | 981 | return buffer->vaddr + offset * PAGE_SIZE; 982 | } 983 | 984 | static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, 985 | void *ptr) 986 | { 987 | } 988 | 989 | static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 990 | enum dma_data_direction direction) 991 | { 992 | struct ion_buffer *buffer = dmabuf->priv; 993 | void *vaddr; 994 | 995 | if (!buffer->heap->ops->map_kernel) { 996 | pr_err("%s: map kernel is not implemented by this heap.\n", 997 | __func__); 998 | return -ENODEV; 999 | } 1000 | 1001 | mutex_lock(&buffer->lock); 1002 | vaddr = ion_buffer_kmap_get(buffer); 1003 | mutex_unlock(&buffer->lock); 1004 | return PTR_ERR_OR_ZERO(vaddr); 1005 | } 1006 | 1007 | static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, 1008 | enum dma_data_direction direction) 1009 | { 1010 | struct ion_buffer *buffer = dmabuf->priv; 1011 | 1012 | mutex_lock(&buffer->lock); 1013 | ion_buffer_kmap_put(buffer); 1014 | mutex_unlock(&buffer->lock); 1015 | 1016 | return 0; 1017 | } 1018 | 1019 | static struct dma_buf_ops dma_buf_ops = { 1020 | .map_dma_buf = ion_map_dma_buf, 1021 | .unmap_dma_buf = ion_unmap_dma_buf, 1022 | .mmap = ion_mmap, 1023 | .release = ion_dma_buf_release, 1024 | .begin_cpu_access = ion_dma_buf_begin_cpu_access, 1025 | .end_cpu_access = ion_dma_buf_end_cpu_access, 1026 | .map = ion_dma_buf_kmap, 1027 | .unmap = ion_dma_buf_kunmap, 1028 | }; 1029 | 1030 | static struct dma_buf *__ion_share_dma_buf(struct ion_client *client, 1031 | struct ion_handle *handle, 1032 | bool lock_client) 1033 | { 1034 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 1035 | struct ion_buffer *buffer; 1036 | struct dma_buf *dmabuf; 1037 | bool valid_handle; 1038 | 1039 | if (lock_client) 1040 | mutex_lock(&client->lock); 1041 | valid_handle = ion_handle_validate(client, handle); 1042 | if (!valid_handle) { 1043 | WARN(1, "%s: invalid handle passed to share.\n", __func__); 1044 | if (lock_client) 1045 | mutex_unlock(&client->lock); 1046 | return ERR_PTR(-EINVAL); 1047 | } 1048 | buffer = handle->buffer; 1049 | ion_buffer_get(buffer); 1050 | if (lock_client) 1051 | mutex_unlock(&client->lock); 1052 | 1053 | exp_info.ops = &dma_buf_ops; 1054 | exp_info.size = buffer->size; 1055 | exp_info.flags = O_RDWR; 1056 | exp_info.priv = buffer; 1057 | 1058 | dmabuf = dma_buf_export(&exp_info); 1059 | if (IS_ERR(dmabuf)) { 1060 | ion_buffer_put(buffer); 1061 | return dmabuf; 1062 | } 1063 | 1064 | return dmabuf; 1065 | } 1066 | 1067 | struct dma_buf *ion_share_dma_buf(struct ion_client *client, 1068 | struct ion_handle *handle) 1069 | { 1070 | return __ion_share_dma_buf(client, handle, true); 1071 | } 1072 | EXPORT_SYMBOL(ion_share_dma_buf); 1073 | 1074 | static int __ion_share_dma_buf_fd(struct ion_client *client, 1075 | struct ion_handle *handle, bool lock_client) 1076 | { 1077 | struct dma_buf *dmabuf; 1078 | int fd; 1079 | 1080 | dmabuf = __ion_share_dma_buf(client, handle, lock_client); 1081 | if (IS_ERR(dmabuf)) 1082 | return PTR_ERR(dmabuf); 1083 | 1084 | fd = dma_buf_fd(dmabuf, O_CLOEXEC); 1085 | if (fd < 0) 1086 | dma_buf_put(dmabuf); 1087 | 1088 | return fd; 1089 | } 1090 | 1091 | int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) 1092 | { 1093 | return __ion_share_dma_buf_fd(client, handle, true); 1094 | } 1095 | EXPORT_SYMBOL(ion_share_dma_buf_fd); 1096 | 1097 | int ion_share_dma_buf_fd_nolock(struct ion_client *client, 1098 | struct ion_handle *handle) 1099 | { 1100 | return __ion_share_dma_buf_fd(client, handle, false); 1101 | } 1102 | 1103 | struct ion_handle *ion_import_dma_buf(struct ion_client *client, 1104 | struct dma_buf *dmabuf) 1105 | { 1106 | struct ion_buffer *buffer; 1107 | struct ion_handle *handle; 1108 | int ret; 1109 | 1110 | /* if this memory came from ion */ 1111 | 1112 | if (dmabuf->ops != &dma_buf_ops) { 1113 | pr_err("%s: can not import dmabuf from another exporter\n", 1114 | __func__); 1115 | return ERR_PTR(-EINVAL); 1116 | } 1117 | buffer = dmabuf->priv; 1118 | 1119 | mutex_lock(&client->lock); 1120 | /* if a handle exists for this buffer just take a reference to it */ 1121 | handle = ion_handle_lookup(client, buffer); 1122 | if (!IS_ERR(handle)) { 1123 | handle = ion_handle_get_check_overflow(handle); 1124 | mutex_unlock(&client->lock); 1125 | goto end; 1126 | } 1127 | 1128 | handle = ion_handle_create(client, buffer); 1129 | if (IS_ERR(handle)) { 1130 | mutex_unlock(&client->lock); 1131 | goto end; 1132 | } 1133 | 1134 | ret = ion_handle_add(client, handle); 1135 | mutex_unlock(&client->lock); 1136 | if (ret) { 1137 | ion_handle_put(handle); 1138 | handle = ERR_PTR(ret); 1139 | } 1140 | 1141 | end: 1142 | return handle; 1143 | } 1144 | EXPORT_SYMBOL(ion_import_dma_buf); 1145 | 1146 | struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd) 1147 | { 1148 | struct dma_buf *dmabuf; 1149 | struct ion_handle *handle; 1150 | 1151 | dmabuf = dma_buf_get(fd); 1152 | if (IS_ERR(dmabuf)) 1153 | return ERR_CAST(dmabuf); 1154 | 1155 | handle = ion_import_dma_buf(client, dmabuf); 1156 | dma_buf_put(dmabuf); 1157 | return handle; 1158 | } 1159 | EXPORT_SYMBOL(ion_import_dma_buf_fd); 1160 | 1161 | int ion_sync_for_device(struct ion_client *client, int fd) 1162 | { 1163 | struct dma_buf *dmabuf; 1164 | struct ion_buffer *buffer; 1165 | 1166 | dmabuf = dma_buf_get(fd); 1167 | if (IS_ERR(dmabuf)) 1168 | return PTR_ERR(dmabuf); 1169 | 1170 | /* if this memory came from ion */ 1171 | if (dmabuf->ops != &dma_buf_ops) { 1172 | pr_err("%s: can not sync dmabuf from another exporter\n", 1173 | __func__); 1174 | dma_buf_put(dmabuf); 1175 | return -EINVAL; 1176 | } 1177 | buffer = dmabuf->priv; 1178 | 1179 | dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, 1180 | buffer->sg_table->nents, DMA_BIDIRECTIONAL); 1181 | dma_buf_put(dmabuf); 1182 | return 0; 1183 | } 1184 | 1185 | int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query) 1186 | { 1187 | struct ion_device *dev = client->dev; 1188 | struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps); 1189 | int ret = -EINVAL, cnt = 0, max_cnt; 1190 | struct ion_heap *heap; 1191 | struct ion_heap_data hdata; 1192 | 1193 | memset(&hdata, 0, sizeof(hdata)); 1194 | 1195 | down_read(&dev->lock); 1196 | if (!buffer) { 1197 | query->cnt = dev->heap_cnt; 1198 | ret = 0; 1199 | goto out; 1200 | } 1201 | 1202 | if (query->cnt <= 0) 1203 | goto out; 1204 | 1205 | max_cnt = query->cnt; 1206 | 1207 | plist_for_each_entry(heap, &dev->heaps, node) { 1208 | strncpy(hdata.name, heap->name, MAX_HEAP_NAME); 1209 | hdata.name[sizeof(hdata.name) - 1] = '\0'; 1210 | hdata.type = heap->type; 1211 | hdata.heap_id = heap->id; 1212 | 1213 | if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) { 1214 | ret = -EFAULT; 1215 | goto out; 1216 | } 1217 | 1218 | cnt++; 1219 | if (cnt >= max_cnt) 1220 | break; 1221 | } 1222 | 1223 | query->cnt = cnt; 1224 | out: 1225 | up_read(&dev->lock); 1226 | return ret; 1227 | } 1228 | 1229 | static int ion_release(struct inode *inode, struct file *file) 1230 | { 1231 | struct ion_client *client = file->private_data; 1232 | 1233 | pr_debug("%s: %d\n", __func__, __LINE__); 1234 | ion_client_destroy(client); 1235 | return 0; 1236 | } 1237 | 1238 | static int ion_open(struct inode *inode, struct file *file) 1239 | { 1240 | struct miscdevice *miscdev = file->private_data; 1241 | struct ion_device *dev = container_of(miscdev, struct ion_device, dev); 1242 | struct ion_client *client; 1243 | char debug_name[64]; 1244 | 1245 | pr_debug("%s: %d\n", __func__, __LINE__); 1246 | snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); 1247 | client = ion_client_create(dev, debug_name); 1248 | if (IS_ERR(client)) 1249 | return PTR_ERR(client); 1250 | file->private_data = client; 1251 | 1252 | return 0; 1253 | } 1254 | 1255 | static const struct file_operations ion_fops = { 1256 | .owner = THIS_MODULE, 1257 | .open = ion_open, 1258 | .release = ion_release, 1259 | .unlocked_ioctl = ion_ioctl, 1260 | .compat_ioctl = compat_ion_ioctl, 1261 | }; 1262 | 1263 | static size_t ion_debug_heap_total(struct ion_client *client, 1264 | unsigned int id) 1265 | { 1266 | size_t size = 0; 1267 | struct rb_node *n; 1268 | 1269 | mutex_lock(&client->lock); 1270 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { 1271 | struct ion_handle *handle = rb_entry(n, 1272 | struct ion_handle, 1273 | node); 1274 | if (handle->buffer->heap->id == id) 1275 | size += handle->buffer->size; 1276 | } 1277 | mutex_unlock(&client->lock); 1278 | return size; 1279 | } 1280 | 1281 | static int ion_debug_heap_show(struct seq_file *s, void *unused) 1282 | { 1283 | struct ion_heap *heap = s->private; 1284 | struct ion_device *dev = heap->dev; 1285 | struct rb_node *n; 1286 | size_t total_size = 0; 1287 | size_t total_orphaned_size = 0; 1288 | 1289 | seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size"); 1290 | seq_puts(s, "----------------------------------------------------\n"); 1291 | 1292 | mutex_lock(&debugfs_mutex); 1293 | for (n = rb_first(&dev->clients); n; n = rb_next(n)) { 1294 | struct ion_client *client = rb_entry(n, struct ion_client, 1295 | node); 1296 | size_t size = ion_debug_heap_total(client, heap->id); 1297 | 1298 | if (!size) 1299 | continue; 1300 | if (client->task) { 1301 | char task_comm[TASK_COMM_LEN]; 1302 | 1303 | get_task_comm(task_comm, client->task); 1304 | seq_printf(s, "%16s %16u %16zu\n", task_comm, 1305 | client->pid, size); 1306 | } else { 1307 | seq_printf(s, "%16s %16u %16zu\n", client->name, 1308 | client->pid, size); 1309 | } 1310 | } 1311 | mutex_unlock(&debugfs_mutex); 1312 | 1313 | seq_puts(s, "----------------------------------------------------\n"); 1314 | seq_puts(s, "orphaned allocations (info is from last known client):\n"); 1315 | mutex_lock(&dev->buffer_lock); 1316 | for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { 1317 | struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, 1318 | node); 1319 | if (buffer->heap->id != heap->id) 1320 | continue; 1321 | total_size += buffer->size; 1322 | if (!buffer->handle_count) { 1323 | seq_printf(s, "%16s %16u %16zu %d %d\n", 1324 | buffer->task_comm, buffer->pid, 1325 | buffer->size, buffer->kmap_cnt, 1326 | atomic_read(&buffer->ref.refcount.refs)); 1327 | total_orphaned_size += buffer->size; 1328 | } 1329 | } 1330 | mutex_unlock(&dev->buffer_lock); 1331 | seq_puts(s, "----------------------------------------------------\n"); 1332 | seq_printf(s, "%16s %16zu\n", "total orphaned", 1333 | total_orphaned_size); 1334 | seq_printf(s, "%16s %16zu\n", "total ", total_size); 1335 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 1336 | seq_printf(s, "%16s %16zu\n", "deferred free", 1337 | heap->free_list_size); 1338 | seq_puts(s, "----------------------------------------------------\n"); 1339 | 1340 | if (heap->debug_show) 1341 | heap->debug_show(heap, s, unused); 1342 | 1343 | return 0; 1344 | } 1345 | 1346 | static int ion_debug_heap_open(struct inode *inode, struct file *file) 1347 | { 1348 | return single_open(file, ion_debug_heap_show, inode->i_private); 1349 | } 1350 | 1351 | static const struct file_operations debug_heap_fops = { 1352 | .open = ion_debug_heap_open, 1353 | .read = seq_read, 1354 | .llseek = seq_lseek, 1355 | .release = single_release, 1356 | }; 1357 | 1358 | static int debug_shrink_set(void *data, u64 val) 1359 | { 1360 | struct ion_heap *heap = data; 1361 | struct shrink_control sc; 1362 | int objs; 1363 | 1364 | sc.gfp_mask = GFP_HIGHUSER; 1365 | sc.nr_to_scan = val; 1366 | 1367 | if (!val) { 1368 | objs = heap->shrinker.count_objects(&heap->shrinker, &sc); 1369 | sc.nr_to_scan = objs; 1370 | } 1371 | 1372 | heap->shrinker.scan_objects(&heap->shrinker, &sc); 1373 | return 0; 1374 | } 1375 | 1376 | static int debug_shrink_get(void *data, u64 *val) 1377 | { 1378 | struct ion_heap *heap = data; 1379 | struct shrink_control sc; 1380 | int objs; 1381 | 1382 | sc.gfp_mask = GFP_HIGHUSER; 1383 | sc.nr_to_scan = 0; 1384 | 1385 | objs = heap->shrinker.count_objects(&heap->shrinker, &sc); 1386 | *val = objs; 1387 | return 0; 1388 | } 1389 | 1390 | DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, 1391 | debug_shrink_set, "%llu\n"); 1392 | 1393 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) 1394 | { 1395 | struct dentry *debug_file; 1396 | 1397 | if (!heap->ops->allocate || !heap->ops->free) 1398 | pr_err("%s: can not add heap with invalid ops struct.\n", 1399 | __func__); 1400 | 1401 | spin_lock_init(&heap->free_lock); 1402 | heap->free_list_size = 0; 1403 | 1404 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 1405 | ion_heap_init_deferred_free(heap); 1406 | 1407 | if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) 1408 | ion_heap_init_shrinker(heap); 1409 | 1410 | heap->dev = dev; 1411 | down_write(&dev->lock); 1412 | /* 1413 | * use negative heap->id to reverse the priority -- when traversing 1414 | * the list later attempt higher id numbers first 1415 | */ 1416 | plist_node_init(&heap->node, -heap->id); 1417 | plist_add(&heap->node, &dev->heaps); 1418 | debug_file = debugfs_create_file(heap->name, 0664, 1419 | dev->heaps_debug_root, heap, 1420 | &debug_heap_fops); 1421 | 1422 | if (!debug_file) { 1423 | char buf[256], *path; 1424 | 1425 | path = dentry_path(dev->heaps_debug_root, buf, 256); 1426 | pr_err("Failed to create heap debugfs at %s/%s\n", 1427 | path, heap->name); 1428 | } 1429 | 1430 | if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { 1431 | char debug_name[64]; 1432 | 1433 | snprintf(debug_name, 64, "%s_shrink", heap->name); 1434 | debug_file = debugfs_create_file( 1435 | debug_name, 0644, dev->heaps_debug_root, heap, 1436 | &debug_shrink_fops); 1437 | if (!debug_file) { 1438 | char buf[256], *path; 1439 | 1440 | path = dentry_path(dev->heaps_debug_root, buf, 256); 1441 | pr_err("Failed to create heap shrinker debugfs at %s/%s\n", 1442 | path, debug_name); 1443 | } 1444 | } 1445 | 1446 | dev->heap_cnt++; 1447 | up_write(&dev->lock); 1448 | } 1449 | EXPORT_SYMBOL(ion_device_add_heap); 1450 | 1451 | struct ion_device *ion_device_create(long (*custom_ioctl) 1452 | (struct ion_client *client, 1453 | unsigned int cmd, 1454 | unsigned long arg)) 1455 | { 1456 | struct ion_device *idev; 1457 | int ret; 1458 | 1459 | idev = kzalloc(sizeof(*idev), GFP_KERNEL); 1460 | if (!idev) 1461 | return ERR_PTR(-ENOMEM); 1462 | 1463 | idev->dev.minor = MISC_DYNAMIC_MINOR; 1464 | idev->dev.name = "ion"; 1465 | idev->dev.fops = &ion_fops; 1466 | idev->dev.parent = NULL; 1467 | ret = misc_register(&idev->dev); 1468 | if (ret) { 1469 | pr_err("ion: failed to register misc device.\n"); 1470 | kfree(idev); 1471 | return ERR_PTR(ret); 1472 | } 1473 | 1474 | idev->debug_root = debugfs_create_dir("ion", NULL); 1475 | if (!idev->debug_root) { 1476 | pr_err("ion: failed to create debugfs root directory.\n"); 1477 | goto debugfs_done; 1478 | } 1479 | idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); 1480 | if (!idev->heaps_debug_root) { 1481 | pr_err("ion: failed to create debugfs heaps directory.\n"); 1482 | goto debugfs_done; 1483 | } 1484 | idev->clients_debug_root = debugfs_create_dir("clients", 1485 | idev->debug_root); 1486 | if (!idev->clients_debug_root) 1487 | pr_err("ion: failed to create debugfs clients directory.\n"); 1488 | 1489 | debugfs_done: 1490 | 1491 | idev->custom_ioctl = custom_ioctl; 1492 | idev->buffers = RB_ROOT; 1493 | mutex_init(&idev->buffer_lock); 1494 | init_rwsem(&idev->lock); 1495 | plist_head_init(&idev->heaps); 1496 | idev->clients = RB_ROOT; 1497 | ion_root_client = &idev->clients; 1498 | mutex_init(&debugfs_mutex); 1499 | return idev; 1500 | } 1501 | EXPORT_SYMBOL(ion_device_create); 1502 | 1503 | void ion_device_destroy(struct ion_device *dev) 1504 | { 1505 | misc_deregister(&dev->dev); 1506 | debugfs_remove_recursive(dev->debug_root); 1507 | /* XXX need to free the heaps and clients ? */ 1508 | kfree(dev); 1509 | } 1510 | EXPORT_SYMBOL(ion_device_destroy); 1511 | -------------------------------------------------------------------------------- /ion/ion.h: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/ion/ion.h 3 | * 4 | * Copyright (C) 2011 Google, Inc. 5 | * 6 | * This software is licensed under the terms of the GNU General Public 7 | * License version 2, as published by the Free Software Foundation, and 8 | * may be copied, distributed, and modified under those terms. 9 | * 10 | * This program is distributed in the hope that it will be useful, 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | * GNU General Public License for more details. 14 | * 15 | */ 16 | 17 | #ifndef _LINUX_ION_H 18 | #define _LINUX_ION_H 19 | 20 | #include 21 | 22 | #include "uapi/ion.h" 23 | 24 | struct ion_handle; 25 | struct ion_device; 26 | struct ion_heap; 27 | struct ion_mapper; 28 | struct ion_client; 29 | struct ion_buffer; 30 | 31 | /* 32 | * This should be removed some day when phys_addr_t's are fully 33 | * plumbed in the kernel, and all instances of ion_phys_addr_t should 34 | * be converted to phys_addr_t. For the time being many kernel interfaces 35 | * do not accept phys_addr_t's that would have to 36 | */ 37 | #define ion_phys_addr_t unsigned long 38 | 39 | /** 40 | * struct ion_platform_heap - defines a heap in the given platform 41 | * @type: type of the heap from ion_heap_type enum 42 | * @id: unique identifier for heap. When allocating higher numbers 43 | * will be allocated from first. At allocation these are passed 44 | * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS. 45 | * @name: used for debug purposes 46 | * @base: base address of heap in physical memory if applicable 47 | * @size: size of the heap in bytes if applicable 48 | * @align: required alignment in physical memory if applicable 49 | * @priv: private info passed from the board file 50 | * 51 | * Provided by the board file. 52 | */ 53 | struct ion_platform_heap { 54 | enum ion_heap_type type; 55 | unsigned int id; 56 | const char *name; 57 | ion_phys_addr_t base; 58 | size_t size; 59 | ion_phys_addr_t align; 60 | void *priv; 61 | }; 62 | 63 | /** 64 | * struct ion_platform_data - array of platform heaps passed from board file 65 | * @nr: number of structures in the array 66 | * @heaps: array of platform_heap structions 67 | * 68 | * Provided by the board file in the form of platform data to a platform device. 69 | */ 70 | struct ion_platform_data { 71 | int nr; 72 | struct ion_platform_heap *heaps; 73 | }; 74 | 75 | /** 76 | * ion_client_create() - allocate a client and returns it 77 | * @dev: the global ion device 78 | * @name: used for debugging 79 | */ 80 | struct ion_client *ion_client_create(struct ion_device *dev, 81 | const char *name); 82 | 83 | /** 84 | * ion_client_destroy() - free's a client and all it's handles 85 | * @client: the client 86 | * 87 | * Free the provided client and all it's resources including 88 | * any handles it is holding. 89 | */ 90 | void ion_client_destroy(struct ion_client *client); 91 | 92 | /** 93 | * ion_alloc - allocate ion memory 94 | * @client: the client 95 | * @len: size of the allocation 96 | * @align: requested allocation alignment, lots of hardware blocks 97 | * have alignment requirements of some kind 98 | * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set 99 | * heaps will be tried in order from highest to lowest 100 | * id 101 | * @flags: heap flags, the low 16 bits are consumed by ion, the 102 | * high 16 bits are passed on to the respective heap and 103 | * can be heap custom 104 | * 105 | * Allocate memory in one of the heaps provided in heap mask and return 106 | * an opaque handle to it. 107 | */ 108 | struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 109 | size_t align, unsigned int heap_id_mask, 110 | unsigned int flags); 111 | 112 | /** 113 | * ion_free - free a handle 114 | * @client: the client 115 | * @handle: the handle to free 116 | * 117 | * Free the provided handle. 118 | */ 119 | void ion_free(struct ion_client *client, struct ion_handle *handle); 120 | 121 | /** 122 | * ion_map_kernel - create mapping for the given handle 123 | * @client: the client 124 | * @handle: handle to map 125 | * 126 | * Map the given handle into the kernel and return a kernel address that 127 | * can be used to access this address. 128 | */ 129 | void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); 130 | 131 | /** 132 | * ion_unmap_kernel() - destroy a kernel mapping for a handle 133 | * @client: the client 134 | * @handle: handle to unmap 135 | */ 136 | void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); 137 | 138 | /** 139 | * ion_share_dma_buf() - share buffer as dma-buf 140 | * @client: the client 141 | * @handle: the handle 142 | */ 143 | struct dma_buf *ion_share_dma_buf(struct ion_client *client, 144 | struct ion_handle *handle); 145 | 146 | /** 147 | * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd 148 | * @client: the client 149 | * @handle: the handle 150 | */ 151 | int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); 152 | 153 | /** 154 | * ion_import_dma_buf() - get ion_handle from dma-buf 155 | * @client: the client 156 | * @dmabuf: the dma-buf 157 | * 158 | * Get the ion_buffer associated with the dma-buf and return the ion_handle. 159 | * If no ion_handle exists for this buffer, return newly created ion_handle. 160 | * If dma-buf from another exporter is passed, return ERR_PTR(-EINVAL) 161 | */ 162 | struct ion_handle *ion_import_dma_buf(struct ion_client *client, 163 | struct dma_buf *dmabuf); 164 | 165 | /** 166 | * ion_import_dma_buf_fd() - given a dma-buf fd from the ion exporter get handle 167 | * @client: the client 168 | * @fd: the dma-buf fd 169 | * 170 | * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf_fd, 171 | * import that fd and return a handle representing it. If a dma-buf from 172 | * another exporter is passed in this function will return ERR_PTR(-EINVAL) 173 | */ 174 | struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd); 175 | 176 | #endif /* _LINUX_ION_H */ 177 | -------------------------------------------------------------------------------- /ion/ion_carveout_heap.c: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/ion/ion_carveout_heap.c 3 | * 4 | * Copyright (C) 2011 Google, Inc. 5 | * 6 | * This software is licensed under the terms of the GNU General Public 7 | * License version 2, as published by the Free Software Foundation, and 8 | * may be copied, distributed, and modified under those terms. 9 | * 10 | * This program is distributed in the hope that it will be useful, 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | * GNU General Public License for more details. 14 | * 15 | */ 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include "ion.h" 26 | #include "ion_priv.h" 27 | 28 | #define ION_CARVEOUT_ALLOCATE_FAIL -1 29 | 30 | struct ion_carveout_heap { 31 | struct ion_heap heap; 32 | struct gen_pool *pool; 33 | ion_phys_addr_t base; 34 | }; 35 | 36 | static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, 37 | unsigned long size, 38 | unsigned long align) 39 | { 40 | struct ion_carveout_heap *carveout_heap = 41 | container_of(heap, struct ion_carveout_heap, heap); 42 | unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); 43 | 44 | if (!offset) 45 | return ION_CARVEOUT_ALLOCATE_FAIL; 46 | 47 | return offset; 48 | } 49 | 50 | static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, 51 | unsigned long size) 52 | { 53 | struct ion_carveout_heap *carveout_heap = 54 | container_of(heap, struct ion_carveout_heap, heap); 55 | 56 | if (addr == ION_CARVEOUT_ALLOCATE_FAIL) 57 | return; 58 | gen_pool_free(carveout_heap->pool, addr, size); 59 | } 60 | 61 | static int ion_carveout_heap_allocate(struct ion_heap *heap, 62 | struct ion_buffer *buffer, 63 | unsigned long size, unsigned long align, 64 | unsigned long flags) 65 | { 66 | struct sg_table *table; 67 | ion_phys_addr_t paddr; 68 | int ret; 69 | 70 | if (align > PAGE_SIZE) 71 | return -EINVAL; 72 | 73 | table = kmalloc(sizeof(*table), GFP_KERNEL); 74 | if (!table) 75 | return -ENOMEM; 76 | ret = sg_alloc_table(table, 1, GFP_KERNEL); 77 | if (ret) 78 | goto err_free; 79 | 80 | paddr = ion_carveout_allocate(heap, size, align); 81 | if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) { 82 | ret = -ENOMEM; 83 | goto err_free_table; 84 | } 85 | 86 | sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0); 87 | buffer->sg_table = table; 88 | 89 | return 0; 90 | 91 | err_free_table: 92 | sg_free_table(table); 93 | err_free: 94 | kfree(table); 95 | return ret; 96 | } 97 | 98 | static void ion_carveout_heap_free(struct ion_buffer *buffer) 99 | { 100 | struct ion_heap *heap = buffer->heap; 101 | struct sg_table *table = buffer->sg_table; 102 | struct page *page = sg_page(table->sgl); 103 | ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); 104 | 105 | ion_heap_buffer_zero(buffer); 106 | 107 | if (ion_buffer_cached(buffer)) 108 | dma_sync_sg_for_device(NULL, table->sgl, table->nents, 109 | DMA_BIDIRECTIONAL); 110 | 111 | ion_carveout_free(heap, paddr, buffer->size); 112 | sg_free_table(table); 113 | kfree(table); 114 | } 115 | 116 | static struct ion_heap_ops carveout_heap_ops = { 117 | .allocate = ion_carveout_heap_allocate, 118 | .free = ion_carveout_heap_free, 119 | .map_user = ion_heap_map_user, 120 | .map_kernel = ion_heap_map_kernel, 121 | .unmap_kernel = ion_heap_unmap_kernel, 122 | }; 123 | 124 | struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) 125 | { 126 | struct ion_carveout_heap *carveout_heap; 127 | int ret; 128 | 129 | struct page *page; 130 | size_t size; 131 | 132 | page = pfn_to_page(PFN_DOWN(heap_data->base)); 133 | size = heap_data->size; 134 | 135 | ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); 136 | 137 | ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); 138 | if (ret) 139 | return ERR_PTR(ret); 140 | 141 | carveout_heap = kzalloc(sizeof(*carveout_heap), GFP_KERNEL); 142 | if (!carveout_heap) 143 | return ERR_PTR(-ENOMEM); 144 | 145 | carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1); 146 | if (!carveout_heap->pool) { 147 | kfree(carveout_heap); 148 | return ERR_PTR(-ENOMEM); 149 | } 150 | carveout_heap->base = heap_data->base; 151 | gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, 152 | -1); 153 | carveout_heap->heap.ops = &carveout_heap_ops; 154 | carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; 155 | carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; 156 | 157 | return &carveout_heap->heap; 158 | } 159 | 160 | void ion_carveout_heap_destroy(struct ion_heap *heap) 161 | { 162 | struct ion_carveout_heap *carveout_heap = 163 | container_of(heap, struct ion_carveout_heap, heap); 164 | 165 | gen_pool_destroy(carveout_heap->pool); 166 | kfree(carveout_heap); 167 | carveout_heap = NULL; 168 | } 169 | -------------------------------------------------------------------------------- /ion/ion_chunk_heap.c: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/ion/ion_chunk_heap.c 3 | * 4 | * Copyright (C) 2012 Google, Inc. 5 | * 6 | * This software is licensed under the terms of the GNU General Public 7 | * License version 2, as published by the Free Software Foundation, and 8 | * may be copied, distributed, and modified under those terms. 9 | * 10 | * This program is distributed in the hope that it will be useful, 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | * GNU General Public License for more details. 14 | * 15 | */ 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include "ion.h" 25 | #include "ion_priv.h" 26 | 27 | struct ion_chunk_heap { 28 | struct ion_heap heap; 29 | struct gen_pool *pool; 30 | ion_phys_addr_t base; 31 | unsigned long chunk_size; 32 | unsigned long size; 33 | unsigned long allocated; 34 | }; 35 | 36 | static int ion_chunk_heap_allocate(struct ion_heap *heap, 37 | struct ion_buffer *buffer, 38 | unsigned long size, unsigned long align, 39 | unsigned long flags) 40 | { 41 | struct ion_chunk_heap *chunk_heap = 42 | container_of(heap, struct ion_chunk_heap, heap); 43 | struct sg_table *table; 44 | struct scatterlist *sg; 45 | int ret, i; 46 | unsigned long num_chunks; 47 | unsigned long allocated_size; 48 | 49 | if (align > chunk_heap->chunk_size) 50 | return -EINVAL; 51 | 52 | allocated_size = ALIGN(size, chunk_heap->chunk_size); 53 | num_chunks = allocated_size / chunk_heap->chunk_size; 54 | 55 | if (allocated_size > chunk_heap->size - chunk_heap->allocated) 56 | return -ENOMEM; 57 | 58 | table = kmalloc(sizeof(*table), GFP_KERNEL); 59 | if (!table) 60 | return -ENOMEM; 61 | ret = sg_alloc_table(table, num_chunks, GFP_KERNEL); 62 | if (ret) { 63 | kfree(table); 64 | return ret; 65 | } 66 | 67 | sg = table->sgl; 68 | for (i = 0; i < num_chunks; i++) { 69 | unsigned long paddr = gen_pool_alloc(chunk_heap->pool, 70 | chunk_heap->chunk_size); 71 | if (!paddr) 72 | goto err; 73 | sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)), 74 | chunk_heap->chunk_size, 0); 75 | sg = sg_next(sg); 76 | } 77 | 78 | buffer->sg_table = table; 79 | chunk_heap->allocated += allocated_size; 80 | return 0; 81 | err: 82 | sg = table->sgl; 83 | for (i -= 1; i >= 0; i--) { 84 | gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), 85 | sg->length); 86 | sg = sg_next(sg); 87 | } 88 | sg_free_table(table); 89 | kfree(table); 90 | return -ENOMEM; 91 | } 92 | 93 | static void ion_chunk_heap_free(struct ion_buffer *buffer) 94 | { 95 | struct ion_heap *heap = buffer->heap; 96 | struct ion_chunk_heap *chunk_heap = 97 | container_of(heap, struct ion_chunk_heap, heap); 98 | struct sg_table *table = buffer->sg_table; 99 | struct scatterlist *sg; 100 | int i; 101 | unsigned long allocated_size; 102 | 103 | allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size); 104 | 105 | ion_heap_buffer_zero(buffer); 106 | 107 | if (ion_buffer_cached(buffer)) 108 | dma_sync_sg_for_device(NULL, table->sgl, table->nents, 109 | DMA_BIDIRECTIONAL); 110 | 111 | for_each_sg(table->sgl, sg, table->nents, i) { 112 | gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), 113 | sg->length); 114 | } 115 | chunk_heap->allocated -= allocated_size; 116 | sg_free_table(table); 117 | kfree(table); 118 | } 119 | 120 | static struct ion_heap_ops chunk_heap_ops = { 121 | .allocate = ion_chunk_heap_allocate, 122 | .free = ion_chunk_heap_free, 123 | .map_user = ion_heap_map_user, 124 | .map_kernel = ion_heap_map_kernel, 125 | .unmap_kernel = ion_heap_unmap_kernel, 126 | }; 127 | 128 | struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) 129 | { 130 | struct ion_chunk_heap *chunk_heap; 131 | int ret; 132 | struct page *page; 133 | size_t size; 134 | 135 | page = pfn_to_page(PFN_DOWN(heap_data->base)); 136 | size = heap_data->size; 137 | 138 | ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); 139 | 140 | ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); 141 | if (ret) 142 | return ERR_PTR(ret); 143 | 144 | chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL); 145 | if (!chunk_heap) 146 | return ERR_PTR(-ENOMEM); 147 | 148 | chunk_heap->chunk_size = (unsigned long)heap_data->priv; 149 | chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + 150 | PAGE_SHIFT, -1); 151 | if (!chunk_heap->pool) { 152 | ret = -ENOMEM; 153 | goto error_gen_pool_create; 154 | } 155 | chunk_heap->base = heap_data->base; 156 | chunk_heap->size = heap_data->size; 157 | chunk_heap->allocated = 0; 158 | 159 | gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); 160 | chunk_heap->heap.ops = &chunk_heap_ops; 161 | chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; 162 | chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; 163 | pr_debug("%s: base %lu size %zu align %ld\n", __func__, 164 | chunk_heap->base, heap_data->size, heap_data->align); 165 | 166 | return &chunk_heap->heap; 167 | 168 | error_gen_pool_create: 169 | kfree(chunk_heap); 170 | return ERR_PTR(ret); 171 | } 172 | 173 | void ion_chunk_heap_destroy(struct ion_heap *heap) 174 | { 175 | struct ion_chunk_heap *chunk_heap = 176 | container_of(heap, struct ion_chunk_heap, heap); 177 | 178 | gen_pool_destroy(chunk_heap->pool); 179 | kfree(chunk_heap); 180 | chunk_heap = NULL; 181 | } 182 | -------------------------------------------------------------------------------- /ion/ion_cma_heap.c: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/ion/ion_cma_heap.c 3 | * 4 | * Copyright (C) Linaro 2012 5 | * Author: for ST-Ericsson. 6 | * 7 | * This software is licensed under the terms of the GNU General Public 8 | * License version 2, as published by the Free Software Foundation, and 9 | * may be copied, distributed, and modified under those terms. 10 | * 11 | * This program is distributed in the hope that it will be useful, 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | * GNU General Public License for more details. 15 | * 16 | */ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | #include "ion.h" 25 | #include "ion_priv.h" 26 | 27 | #define ION_CMA_ALLOCATE_FAILED -1 28 | 29 | struct ion_cma_heap { 30 | struct ion_heap heap; 31 | struct device *dev; 32 | }; 33 | 34 | #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) 35 | 36 | struct ion_cma_buffer_info { 37 | void *cpu_addr; 38 | dma_addr_t handle; 39 | struct sg_table *table; 40 | }; 41 | 42 | 43 | /* ION CMA heap operations functions */ 44 | static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, 45 | unsigned long len, unsigned long align, 46 | unsigned long flags) 47 | { 48 | struct ion_cma_heap *cma_heap = to_cma_heap(heap); 49 | struct device *dev = cma_heap->dev; 50 | struct ion_cma_buffer_info *info; 51 | 52 | dev_dbg(dev, "Request buffer allocation len %ld\n", len); 53 | 54 | // if (buffer->flags & ION_FLAG_CACHED){ 55 | // return -EINVAL; 56 | // } 57 | 58 | if (align > PAGE_SIZE) 59 | return -EINVAL; 60 | 61 | info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL); 62 | if (!info) 63 | return ION_CMA_ALLOCATE_FAILED; 64 | 65 | info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), 66 | GFP_HIGHUSER | __GFP_ZERO); 67 | 68 | if (!info->cpu_addr) { 69 | dev_err(dev, "Fail to allocate buffer\n"); 70 | goto err; 71 | } 72 | 73 | info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 74 | if (!info->table) 75 | goto free_mem; 76 | 77 | if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle, 78 | len)) 79 | goto free_table; 80 | /* keep this for memory release */ 81 | buffer->priv_virt = info; 82 | buffer->sg_table = info->table; 83 | dev_dbg(dev, "Allocate buffer %p\n", buffer); 84 | return 0; 85 | 86 | free_table: 87 | kfree(info->table); 88 | free_mem: 89 | dma_free_coherent(dev, len, info->cpu_addr, info->handle); 90 | err: 91 | kfree(info); 92 | return ION_CMA_ALLOCATE_FAILED; 93 | } 94 | 95 | static void ion_cma_free(struct ion_buffer *buffer) 96 | { 97 | struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); 98 | struct device *dev = cma_heap->dev; 99 | struct ion_cma_buffer_info *info = buffer->priv_virt; 100 | 101 | dev_dbg(dev, "Release buffer %p\n", buffer); 102 | /* release memory */ 103 | dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); 104 | /* release sg table */ 105 | sg_free_table(info->table); 106 | kfree(info->table); 107 | kfree(info); 108 | } 109 | 110 | static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, 111 | struct vm_area_struct *vma) 112 | { 113 | struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); 114 | struct device *dev = cma_heap->dev; 115 | struct ion_cma_buffer_info *info = buffer->priv_virt; 116 | 117 | if (buffer->flags & ION_FLAG_CACHED){ 118 | return remap_pfn_range(vma, vma->vm_start, 119 | __phys_to_pfn((u32)info->handle) + vma->vm_pgoff, 120 | vma->vm_end - vma->vm_start, 121 | vma->vm_page_prot); 122 | } 123 | 124 | return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle, 125 | buffer->size); 126 | } 127 | 128 | static void *ion_cma_map_kernel(struct ion_heap *heap, 129 | struct ion_buffer *buffer) 130 | { 131 | struct ion_cma_buffer_info *info = buffer->priv_virt; 132 | /* kernel memory mapping has been done at allocation time */ 133 | return info->cpu_addr; 134 | } 135 | 136 | static void ion_cma_unmap_kernel(struct ion_heap *heap, 137 | struct ion_buffer *buffer) 138 | { 139 | } 140 | 141 | static struct ion_heap_ops ion_cma_ops = { 142 | .allocate = ion_cma_allocate, 143 | .free = ion_cma_free, 144 | .map_user = ion_cma_mmap, 145 | .map_kernel = ion_cma_map_kernel, 146 | .unmap_kernel = ion_cma_unmap_kernel, 147 | }; 148 | 149 | struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data) 150 | { 151 | struct ion_cma_heap *cma_heap; 152 | 153 | cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL); 154 | 155 | if (!cma_heap) 156 | return ERR_PTR(-ENOMEM); 157 | 158 | cma_heap->heap.ops = &ion_cma_ops; 159 | /* 160 | * get device from private heaps data, later it will be 161 | * used to make the link with reserved CMA memory 162 | */ 163 | cma_heap->dev = data->priv; 164 | cma_heap->heap.type = ION_HEAP_TYPE_DMA; 165 | return &cma_heap->heap; 166 | } 167 | 168 | void ion_cma_heap_destroy(struct ion_heap *heap) 169 | { 170 | struct ion_cma_heap *cma_heap = to_cma_heap(heap); 171 | 172 | kfree(cma_heap); 173 | } 174 | -------------------------------------------------------------------------------- /ion/ion_heap.c: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/ion/ion_heap.c 3 | * 4 | * Copyright (C) 2011 Google, Inc. 5 | * 6 | * This software is licensed under the terms of the GNU General Public 7 | * License version 2, as published by the Free Software Foundation, and 8 | * may be copied, distributed, and modified under those terms. 9 | * 10 | * This program is distributed in the hope that it will be useful, 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | * GNU General Public License for more details. 14 | * 15 | */ 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include "ion.h" 27 | #include "ion_priv.h" 28 | 29 | void *ion_heap_map_kernel(struct ion_heap *heap, 30 | struct ion_buffer *buffer) 31 | { 32 | struct scatterlist *sg; 33 | int i, j; 34 | void *vaddr; 35 | pgprot_t pgprot; 36 | struct sg_table *table = buffer->sg_table; 37 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; 38 | struct page **pages = vmalloc(sizeof(struct page *) * npages); 39 | struct page **tmp = pages; 40 | 41 | if (!pages) 42 | return ERR_PTR(-ENOMEM); 43 | 44 | if (buffer->flags & ION_FLAG_CACHED) 45 | pgprot = PAGE_KERNEL; 46 | else 47 | pgprot = pgprot_writecombine(PAGE_KERNEL); 48 | 49 | for_each_sg(table->sgl, sg, table->nents, i) { 50 | int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; 51 | struct page *page = sg_page(sg); 52 | 53 | BUG_ON(i >= npages); 54 | for (j = 0; j < npages_this_entry; j++) 55 | *(tmp++) = page++; 56 | } 57 | vaddr = vmap(pages, npages, VM_MAP, pgprot); 58 | vfree(pages); 59 | 60 | if (!vaddr) 61 | return ERR_PTR(-ENOMEM); 62 | 63 | return vaddr; 64 | } 65 | 66 | void ion_heap_unmap_kernel(struct ion_heap *heap, 67 | struct ion_buffer *buffer) 68 | { 69 | vunmap(buffer->vaddr); 70 | } 71 | 72 | int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, 73 | struct vm_area_struct *vma) 74 | { 75 | struct sg_table *table = buffer->sg_table; 76 | unsigned long addr = vma->vm_start; 77 | unsigned long offset = vma->vm_pgoff * PAGE_SIZE; 78 | struct scatterlist *sg; 79 | int i; 80 | int ret; 81 | 82 | for_each_sg(table->sgl, sg, table->nents, i) { 83 | struct page *page = sg_page(sg); 84 | unsigned long remainder = vma->vm_end - addr; 85 | unsigned long len = sg->length; 86 | 87 | if (offset >= sg->length) { 88 | offset -= sg->length; 89 | continue; 90 | } else if (offset) { 91 | page += offset / PAGE_SIZE; 92 | len = sg->length - offset; 93 | offset = 0; 94 | } 95 | len = min(len, remainder); 96 | ret = remap_pfn_range(vma, addr, page_to_pfn(page), len, 97 | vma->vm_page_prot); 98 | if (ret) 99 | return ret; 100 | addr += len; 101 | if (addr >= vma->vm_end) 102 | return 0; 103 | } 104 | return 0; 105 | } 106 | 107 | static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) 108 | { 109 | void *addr = vm_map_ram(pages, num, -1, pgprot); 110 | 111 | if (!addr) 112 | return -ENOMEM; 113 | memset(addr, 0, PAGE_SIZE * num); 114 | vm_unmap_ram(addr, num); 115 | 116 | return 0; 117 | } 118 | 119 | static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents, 120 | pgprot_t pgprot) 121 | { 122 | int p = 0; 123 | int ret = 0; 124 | struct sg_page_iter piter; 125 | struct page *pages[32]; 126 | 127 | for_each_sg_page(sgl, &piter, nents, 0) { 128 | pages[p++] = sg_page_iter_page(&piter); 129 | if (p == ARRAY_SIZE(pages)) { 130 | ret = ion_heap_clear_pages(pages, p, pgprot); 131 | if (ret) 132 | return ret; 133 | p = 0; 134 | } 135 | } 136 | if (p) 137 | ret = ion_heap_clear_pages(pages, p, pgprot); 138 | 139 | return ret; 140 | } 141 | 142 | int ion_heap_buffer_zero(struct ion_buffer *buffer) 143 | { 144 | struct sg_table *table = buffer->sg_table; 145 | pgprot_t pgprot; 146 | 147 | if (buffer->flags & ION_FLAG_CACHED) 148 | pgprot = PAGE_KERNEL; 149 | else 150 | pgprot = pgprot_writecombine(PAGE_KERNEL); 151 | 152 | return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); 153 | } 154 | 155 | int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) 156 | { 157 | struct scatterlist sg; 158 | 159 | sg_init_table(&sg, 1); 160 | sg_set_page(&sg, page, size, 0); 161 | return ion_heap_sglist_zero(&sg, 1, pgprot); 162 | } 163 | 164 | void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) 165 | { 166 | spin_lock(&heap->free_lock); 167 | list_add(&buffer->list, &heap->free_list); 168 | heap->free_list_size += buffer->size; 169 | spin_unlock(&heap->free_lock); 170 | wake_up(&heap->waitqueue); 171 | } 172 | 173 | size_t ion_heap_freelist_size(struct ion_heap *heap) 174 | { 175 | size_t size; 176 | 177 | spin_lock(&heap->free_lock); 178 | size = heap->free_list_size; 179 | spin_unlock(&heap->free_lock); 180 | 181 | return size; 182 | } 183 | 184 | static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size, 185 | bool skip_pools) 186 | { 187 | struct ion_buffer *buffer; 188 | size_t total_drained = 0; 189 | 190 | if (ion_heap_freelist_size(heap) == 0) 191 | return 0; 192 | 193 | spin_lock(&heap->free_lock); 194 | if (size == 0) 195 | size = heap->free_list_size; 196 | 197 | while (!list_empty(&heap->free_list)) { 198 | if (total_drained >= size) 199 | break; 200 | buffer = list_first_entry(&heap->free_list, struct ion_buffer, 201 | list); 202 | list_del(&buffer->list); 203 | heap->free_list_size -= buffer->size; 204 | if (skip_pools) 205 | buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE; 206 | total_drained += buffer->size; 207 | spin_unlock(&heap->free_lock); 208 | ion_buffer_destroy(buffer); 209 | spin_lock(&heap->free_lock); 210 | } 211 | spin_unlock(&heap->free_lock); 212 | 213 | return total_drained; 214 | } 215 | 216 | size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) 217 | { 218 | return _ion_heap_freelist_drain(heap, size, false); 219 | } 220 | 221 | size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size) 222 | { 223 | return _ion_heap_freelist_drain(heap, size, true); 224 | } 225 | 226 | static int ion_heap_deferred_free(void *data) 227 | { 228 | struct ion_heap *heap = data; 229 | 230 | while (true) { 231 | struct ion_buffer *buffer; 232 | 233 | wait_event_freezable(heap->waitqueue, 234 | ion_heap_freelist_size(heap) > 0); 235 | 236 | spin_lock(&heap->free_lock); 237 | if (list_empty(&heap->free_list)) { 238 | spin_unlock(&heap->free_lock); 239 | continue; 240 | } 241 | buffer = list_first_entry(&heap->free_list, struct ion_buffer, 242 | list); 243 | list_del(&buffer->list); 244 | heap->free_list_size -= buffer->size; 245 | spin_unlock(&heap->free_lock); 246 | ion_buffer_destroy(buffer); 247 | } 248 | 249 | return 0; 250 | } 251 | 252 | int ion_heap_init_deferred_free(struct ion_heap *heap) 253 | { 254 | struct sched_param param = { .sched_priority = 0 }; 255 | 256 | INIT_LIST_HEAD(&heap->free_list); 257 | init_waitqueue_head(&heap->waitqueue); 258 | heap->task = kthread_run(ion_heap_deferred_free, heap, 259 | "%s", heap->name); 260 | if (IS_ERR(heap->task)) { 261 | pr_err("%s: creating thread for deferred free failed\n", 262 | __func__); 263 | return PTR_ERR_OR_ZERO(heap->task); 264 | } 265 | sched_setscheduler(heap->task, SCHED_IDLE, ¶m); 266 | return 0; 267 | } 268 | 269 | static unsigned long ion_heap_shrink_count(struct shrinker *shrinker, 270 | struct shrink_control *sc) 271 | { 272 | struct ion_heap *heap = container_of(shrinker, struct ion_heap, 273 | shrinker); 274 | int total = 0; 275 | 276 | total = ion_heap_freelist_size(heap) / PAGE_SIZE; 277 | if (heap->ops->shrink) 278 | total += heap->ops->shrink(heap, sc->gfp_mask, 0); 279 | return total; 280 | } 281 | 282 | static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker, 283 | struct shrink_control *sc) 284 | { 285 | struct ion_heap *heap = container_of(shrinker, struct ion_heap, 286 | shrinker); 287 | int freed = 0; 288 | int to_scan = sc->nr_to_scan; 289 | 290 | if (to_scan == 0) 291 | return 0; 292 | 293 | /* 294 | * shrink the free list first, no point in zeroing the memory if we're 295 | * just going to reclaim it. Also, skip any possible page pooling. 296 | */ 297 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 298 | freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) / 299 | PAGE_SIZE; 300 | 301 | to_scan -= freed; 302 | if (to_scan <= 0) 303 | return freed; 304 | 305 | if (heap->ops->shrink) 306 | freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan); 307 | return freed; 308 | } 309 | 310 | void ion_heap_init_shrinker(struct ion_heap *heap) 311 | { 312 | heap->shrinker.count_objects = ion_heap_shrink_count; 313 | heap->shrinker.scan_objects = ion_heap_shrink_scan; 314 | heap->shrinker.seeks = DEFAULT_SEEKS; 315 | heap->shrinker.batch = 0; 316 | register_shrinker(&heap->shrinker); 317 | } 318 | 319 | struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) 320 | { 321 | struct ion_heap *heap = NULL; 322 | 323 | switch (heap_data->type) { 324 | case ION_HEAP_TYPE_SYSTEM_CONTIG: 325 | heap = ion_system_contig_heap_create(heap_data); 326 | break; 327 | case ION_HEAP_TYPE_SYSTEM: 328 | heap = ion_system_heap_create(heap_data); 329 | break; 330 | case ION_HEAP_TYPE_CARVEOUT: 331 | heap = ion_carveout_heap_create(heap_data); 332 | break; 333 | case ION_HEAP_TYPE_CHUNK: 334 | heap = ion_chunk_heap_create(heap_data); 335 | break; 336 | case ION_HEAP_TYPE_DMA: 337 | heap = ion_cma_heap_create(heap_data); 338 | break; 339 | default: 340 | pr_err("%s: Invalid heap type %d\n", __func__, 341 | heap_data->type); 342 | return ERR_PTR(-EINVAL); 343 | } 344 | 345 | if (IS_ERR_OR_NULL(heap)) { 346 | pr_err("%s: error creating heap %s type %d base %lu size %zu\n", 347 | __func__, heap_data->name, heap_data->type, 348 | heap_data->base, heap_data->size); 349 | return ERR_PTR(-EINVAL); 350 | } 351 | 352 | heap->name = heap_data->name; 353 | heap->id = heap_data->id; 354 | return heap; 355 | } 356 | EXPORT_SYMBOL(ion_heap_create); 357 | 358 | void ion_heap_destroy(struct ion_heap *heap) 359 | { 360 | if (!heap) 361 | return; 362 | 363 | switch (heap->type) { 364 | case ION_HEAP_TYPE_SYSTEM_CONTIG: 365 | ion_system_contig_heap_destroy(heap); 366 | break; 367 | case ION_HEAP_TYPE_SYSTEM: 368 | ion_system_heap_destroy(heap); 369 | break; 370 | case ION_HEAP_TYPE_CARVEOUT: 371 | ion_carveout_heap_destroy(heap); 372 | break; 373 | case ION_HEAP_TYPE_CHUNK: 374 | ion_chunk_heap_destroy(heap); 375 | break; 376 | case ION_HEAP_TYPE_DMA: 377 | ion_cma_heap_destroy(heap); 378 | break; 379 | default: 380 | pr_err("%s: Invalid heap type %d\n", __func__, 381 | heap->type); 382 | } 383 | } 384 | EXPORT_SYMBOL(ion_heap_destroy); 385 | -------------------------------------------------------------------------------- /ion/ion_of.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Based on work from: 3 | * Andrew Andrianov 4 | * Google 5 | * The Linux Foundation 6 | * 7 | * This program is free software; you can redistribute it and/or modify 8 | * it under the terms of the GNU General Public License version 2 as 9 | * published by the Free Software Foundation. 10 | */ 11 | 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include "ion.h" 25 | #include "ion_priv.h" 26 | #include "ion_of.h" 27 | 28 | static int ion_parse_dt_heap_common(struct device_node *heap_node, 29 | struct ion_platform_heap *heap, 30 | struct ion_of_heap *compatible) 31 | { 32 | int i; 33 | 34 | for (i = 0; compatible[i].name; i++) { 35 | if (of_device_is_compatible(heap_node, compatible[i].compat)) 36 | break; 37 | } 38 | 39 | if (!compatible[i].name) 40 | return -ENODEV; 41 | 42 | heap->id = compatible[i].heap_id; 43 | heap->type = compatible[i].type; 44 | heap->name = compatible[i].name; 45 | heap->align = compatible[i].align; 46 | 47 | /* Some kind of callback function pointer? */ 48 | 49 | pr_info("%s: id %d type %d name %s align %lx\n", __func__, 50 | heap->id, heap->type, heap->name, heap->align); 51 | return 0; 52 | } 53 | 54 | static int ion_setup_heap_common(struct platform_device *parent, 55 | struct device_node *heap_node, 56 | struct ion_platform_heap *heap) 57 | { 58 | int ret = 0; 59 | 60 | switch (heap->type) { 61 | case ION_HEAP_TYPE_CARVEOUT: 62 | case ION_HEAP_TYPE_CHUNK: 63 | if (heap->base && heap->size) 64 | return 0; 65 | 66 | ret = of_reserved_mem_device_init(heap->priv); 67 | break; 68 | default: 69 | break; 70 | } 71 | 72 | return ret; 73 | } 74 | 75 | struct ion_platform_data *ion_parse_dt(struct platform_device *pdev, 76 | struct ion_of_heap *compatible) 77 | { 78 | int num_heaps, ret; 79 | const struct device_node *dt_node = pdev->dev.of_node; 80 | struct device_node *node; 81 | struct ion_platform_heap *heaps; 82 | struct ion_platform_data *data; 83 | int i = 0; 84 | 85 | num_heaps = of_get_available_child_count(dt_node); 86 | 87 | if (!num_heaps) 88 | return ERR_PTR(-EINVAL); 89 | 90 | heaps = devm_kzalloc(&pdev->dev, 91 | sizeof(struct ion_platform_heap) * num_heaps, 92 | GFP_KERNEL); 93 | if (!heaps) 94 | return ERR_PTR(-ENOMEM); 95 | 96 | data = devm_kzalloc(&pdev->dev, sizeof(struct ion_platform_data), 97 | GFP_KERNEL); 98 | if (!data) 99 | return ERR_PTR(-ENOMEM); 100 | 101 | for_each_available_child_of_node(dt_node, node) { 102 | struct platform_device *heap_pdev; 103 | 104 | ret = ion_parse_dt_heap_common(node, &heaps[i], compatible); 105 | if (ret) 106 | return ERR_PTR(ret); 107 | 108 | heap_pdev = of_platform_device_create(node, heaps[i].name, 109 | &pdev->dev); 110 | if (!heap_pdev) 111 | return ERR_PTR(-ENOMEM); 112 | heap_pdev->dev.platform_data = &heaps[i]; 113 | 114 | heaps[i].priv = &heap_pdev->dev; 115 | 116 | ret = ion_setup_heap_common(pdev, node, &heaps[i]); 117 | if (ret) 118 | goto out_err; 119 | i++; 120 | } 121 | 122 | data->heaps = heaps; 123 | data->nr = num_heaps; 124 | return data; 125 | 126 | out_err: 127 | for ( ; i >= 0; i--) 128 | if (heaps[i].priv) 129 | of_device_unregister(to_platform_device(heaps[i].priv)); 130 | 131 | return ERR_PTR(ret); 132 | } 133 | 134 | void ion_destroy_platform_data(struct ion_platform_data *data) 135 | { 136 | int i; 137 | 138 | for (i = 0; i < data->nr; i++) 139 | if (data->heaps[i].priv) 140 | of_device_unregister(to_platform_device( 141 | data->heaps[i].priv)); 142 | } 143 | 144 | #ifdef CONFIG_OF_RESERVED_MEM 145 | #include 146 | #include 147 | #include 148 | 149 | static int rmem_ion_device_init(struct reserved_mem *rmem, struct device *dev) 150 | { 151 | struct platform_device *pdev = to_platform_device(dev); 152 | struct ion_platform_heap *heap = pdev->dev.platform_data; 153 | 154 | heap->base = rmem->base; 155 | heap->base = rmem->size; 156 | pr_debug("%s: heap %s base %pa size %pa dev %p\n", __func__, 157 | heap->name, &rmem->base, &rmem->size, dev); 158 | return 0; 159 | } 160 | 161 | static void rmem_ion_device_release(struct reserved_mem *rmem, 162 | struct device *dev) 163 | { 164 | return; 165 | } 166 | 167 | static const struct reserved_mem_ops rmem_dma_ops = { 168 | .device_init = rmem_ion_device_init, 169 | .device_release = rmem_ion_device_release, 170 | }; 171 | 172 | static int __init rmem_ion_setup(struct reserved_mem *rmem) 173 | { 174 | phys_addr_t size = rmem->size; 175 | 176 | size = size / 1024; 177 | 178 | pr_info("Ion memory setup at %pa size %pa MiB\n", 179 | &rmem->base, &size); 180 | rmem->ops = &rmem_dma_ops; 181 | return 0; 182 | } 183 | 184 | RESERVEDMEM_OF_DECLARE(ion, "ion-region", rmem_ion_setup); 185 | #endif 186 | -------------------------------------------------------------------------------- /ion/ion_of.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Based on work from: 3 | * Andrew Andrianov 4 | * Google 5 | * The Linux Foundation 6 | * 7 | * This program is free software; you can redistribute it and/or modify 8 | * it under the terms of the GNU General Public License version 2 as 9 | * published by the Free Software Foundation. 10 | */ 11 | 12 | #ifndef _ION_OF_H 13 | #define _ION_OF_H 14 | 15 | struct ion_of_heap { 16 | const char *compat; 17 | int heap_id; 18 | int type; 19 | const char *name; 20 | int align; 21 | }; 22 | 23 | #define PLATFORM_HEAP(_compat, _id, _type, _name) \ 24 | { \ 25 | .compat = _compat, \ 26 | .heap_id = _id, \ 27 | .type = _type, \ 28 | .name = _name, \ 29 | .align = PAGE_SIZE, \ 30 | } 31 | 32 | struct ion_platform_data *ion_parse_dt(struct platform_device *pdev, 33 | struct ion_of_heap *compatible); 34 | 35 | void ion_destroy_platform_data(struct ion_platform_data *data); 36 | 37 | #endif 38 | -------------------------------------------------------------------------------- /ion/ion_page_pool.c: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/ion/ion_mem_pool.c 3 | * 4 | * Copyright (C) 2011 Google, Inc. 5 | * 6 | * This software is licensed under the terms of the GNU General Public 7 | * License version 2, as published by the Free Software Foundation, and 8 | * may be copied, distributed, and modified under those terms. 9 | * 10 | * This program is distributed in the hope that it will be useful, 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | * GNU General Public License for more details. 14 | * 15 | */ 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include "ion_priv.h" 26 | 27 | static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) 28 | { 29 | struct page *page = alloc_pages(pool->gfp_mask, pool->order); 30 | 31 | if (!page) 32 | return NULL; 33 | if (!pool->cached) 34 | ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, 35 | DMA_BIDIRECTIONAL); 36 | return page; 37 | } 38 | 39 | static void ion_page_pool_free_pages(struct ion_page_pool *pool, 40 | struct page *page) 41 | { 42 | __free_pages(page, pool->order); 43 | } 44 | 45 | static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) 46 | { 47 | mutex_lock(&pool->mutex); 48 | if (PageHighMem(page)) { 49 | list_add_tail(&page->lru, &pool->high_items); 50 | pool->high_count++; 51 | } else { 52 | list_add_tail(&page->lru, &pool->low_items); 53 | pool->low_count++; 54 | } 55 | mutex_unlock(&pool->mutex); 56 | return 0; 57 | } 58 | 59 | static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) 60 | { 61 | struct page *page; 62 | 63 | if (high) { 64 | BUG_ON(!pool->high_count); 65 | page = list_first_entry(&pool->high_items, struct page, lru); 66 | pool->high_count--; 67 | } else { 68 | BUG_ON(!pool->low_count); 69 | page = list_first_entry(&pool->low_items, struct page, lru); 70 | pool->low_count--; 71 | } 72 | 73 | list_del(&page->lru); 74 | return page; 75 | } 76 | 77 | struct page *ion_page_pool_alloc(struct ion_page_pool *pool) 78 | { 79 | struct page *page = NULL; 80 | 81 | BUG_ON(!pool); 82 | 83 | mutex_lock(&pool->mutex); 84 | if (pool->high_count) 85 | page = ion_page_pool_remove(pool, true); 86 | else if (pool->low_count) 87 | page = ion_page_pool_remove(pool, false); 88 | mutex_unlock(&pool->mutex); 89 | 90 | if (!page) 91 | page = ion_page_pool_alloc_pages(pool); 92 | 93 | return page; 94 | } 95 | 96 | void ion_page_pool_free(struct ion_page_pool *pool, struct page *page) 97 | { 98 | int ret; 99 | 100 | BUG_ON(pool->order != compound_order(page)); 101 | 102 | ret = ion_page_pool_add(pool, page); 103 | if (ret) 104 | ion_page_pool_free_pages(pool, page); 105 | } 106 | 107 | static int ion_page_pool_total(struct ion_page_pool *pool, bool high) 108 | { 109 | int count = pool->low_count; 110 | 111 | if (high) 112 | count += pool->high_count; 113 | 114 | return count << pool->order; 115 | } 116 | 117 | int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, 118 | int nr_to_scan) 119 | { 120 | int freed = 0; 121 | bool high; 122 | 123 | if (current_is_kswapd()) 124 | high = true; 125 | else 126 | high = !!(gfp_mask & __GFP_HIGHMEM); 127 | 128 | if (nr_to_scan == 0) 129 | return ion_page_pool_total(pool, high); 130 | 131 | while (freed < nr_to_scan) { 132 | struct page *page; 133 | 134 | mutex_lock(&pool->mutex); 135 | if (pool->low_count) { 136 | page = ion_page_pool_remove(pool, false); 137 | } else if (high && pool->high_count) { 138 | page = ion_page_pool_remove(pool, true); 139 | } else { 140 | mutex_unlock(&pool->mutex); 141 | break; 142 | } 143 | mutex_unlock(&pool->mutex); 144 | ion_page_pool_free_pages(pool, page); 145 | freed += (1 << pool->order); 146 | } 147 | 148 | return freed; 149 | } 150 | 151 | struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order, 152 | bool cached) 153 | { 154 | struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL); 155 | 156 | if (!pool) 157 | return NULL; 158 | pool->high_count = 0; 159 | pool->low_count = 0; 160 | INIT_LIST_HEAD(&pool->low_items); 161 | INIT_LIST_HEAD(&pool->high_items); 162 | pool->gfp_mask = gfp_mask | __GFP_COMP; 163 | pool->order = order; 164 | mutex_init(&pool->mutex); 165 | plist_node_init(&pool->list, order); 166 | if (cached) 167 | pool->cached = true; 168 | 169 | return pool; 170 | } 171 | 172 | void ion_page_pool_destroy(struct ion_page_pool *pool) 173 | { 174 | kfree(pool); 175 | } 176 | 177 | static int __init ion_page_pool_init(void) 178 | { 179 | return 0; 180 | } 181 | device_initcall(ion_page_pool_init); 182 | -------------------------------------------------------------------------------- /ion/ion_priv.h: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/ion/ion_priv.h 3 | * 4 | * Copyright (C) 2011 Google, Inc. 5 | * 6 | * This software is licensed under the terms of the GNU General Public 7 | * License version 2, as published by the Free Software Foundation, and 8 | * may be copied, distributed, and modified under those terms. 9 | * 10 | * This program is distributed in the hope that it will be useful, 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | * GNU General Public License for more details. 14 | * 15 | */ 16 | 17 | #ifndef _ION_PRIV_H 18 | #define _ION_PRIV_H 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | #include "ion.h" 32 | 33 | /** 34 | * struct ion_buffer - metadata for a particular buffer 35 | * @ref: reference count 36 | * @node: node in the ion_device buffers tree 37 | * @dev: back pointer to the ion_device 38 | * @heap: back pointer to the heap the buffer came from 39 | * @flags: buffer specific flags 40 | * @private_flags: internal buffer specific flags 41 | * @size: size of the buffer 42 | * @priv_virt: private data to the buffer representable as 43 | * a void * 44 | * @lock: protects the buffers cnt fields 45 | * @kmap_cnt: number of times the buffer is mapped to the kernel 46 | * @vaddr: the kernel mapping if kmap_cnt is not zero 47 | * @dmap_cnt: number of times the buffer is mapped for dma 48 | * @sg_table: the sg table for the buffer if dmap_cnt is not zero 49 | * @pages: flat array of pages in the buffer -- used by fault 50 | * handler and only valid for buffers that are faulted in 51 | * @vmas: list of vma's mapping this buffer 52 | * @handle_count: count of handles referencing this buffer 53 | * @task_comm: taskcomm of last client to reference this buffer in a 54 | * handle, used for debugging 55 | * @pid: pid of last client to reference this buffer in a 56 | * handle, used for debugging 57 | */ 58 | struct ion_buffer { 59 | struct kref ref; 60 | union { 61 | struct rb_node node; 62 | struct list_head list; 63 | }; 64 | struct ion_device *dev; 65 | struct ion_heap *heap; 66 | unsigned long flags; 67 | unsigned long private_flags; 68 | size_t size; 69 | void *priv_virt; 70 | struct mutex lock; 71 | int kmap_cnt; 72 | void *vaddr; 73 | int dmap_cnt; 74 | struct sg_table *sg_table; 75 | struct page **pages; 76 | struct list_head vmas; 77 | /* used to track orphaned buffers */ 78 | int handle_count; 79 | char task_comm[TASK_COMM_LEN]; 80 | pid_t pid; 81 | }; 82 | void ion_buffer_destroy(struct ion_buffer *buffer); 83 | 84 | /** 85 | * struct ion_device - the metadata of the ion device node 86 | * @dev: the actual misc device 87 | * @buffers: an rb tree of all the existing buffers 88 | * @buffer_lock: lock protecting the tree of buffers 89 | * @lock: rwsem protecting the tree of heaps and clients 90 | * @heaps: list of all the heaps in the system 91 | * @user_clients: list of all the clients created from userspace 92 | */ 93 | struct ion_device { 94 | struct miscdevice dev; 95 | struct rb_root buffers; 96 | struct mutex buffer_lock; 97 | struct rw_semaphore lock; 98 | struct plist_head heaps; 99 | long (*custom_ioctl)(struct ion_client *client, unsigned int cmd, 100 | unsigned long arg); 101 | struct rb_root clients; 102 | struct dentry *debug_root; 103 | struct dentry *heaps_debug_root; 104 | struct dentry *clients_debug_root; 105 | int heap_cnt; 106 | }; 107 | 108 | /** 109 | * struct ion_client - a process/hw block local address space 110 | * @node: node in the tree of all clients 111 | * @dev: backpointer to ion device 112 | * @handles: an rb tree of all the handles in this client 113 | * @idr: an idr space for allocating handle ids 114 | * @lock: lock protecting the tree of handles 115 | * @name: used for debugging 116 | * @display_name: used for debugging (unique version of @name) 117 | * @display_serial: used for debugging (to make display_name unique) 118 | * @task: used for debugging 119 | * 120 | * A client represents a list of buffers this client may access. 121 | * The mutex stored here is used to protect both handles tree 122 | * as well as the handles themselves, and should be held while modifying either. 123 | */ 124 | struct ion_client { 125 | struct rb_node node; 126 | struct ion_device *dev; 127 | struct rb_root handles; 128 | struct idr idr; 129 | struct mutex lock; 130 | const char *name; 131 | char *display_name; 132 | int display_serial; 133 | struct task_struct *task; 134 | pid_t pid; 135 | struct dentry *debug_root; 136 | }; 137 | 138 | /** 139 | * ion_handle - a client local reference to a buffer 140 | * @ref: reference count 141 | * @client: back pointer to the client the buffer resides in 142 | * @buffer: pointer to the buffer 143 | * @node: node in the client's handle rbtree 144 | * @kmap_cnt: count of times this client has mapped to kernel 145 | * @id: client-unique id allocated by client->idr 146 | * 147 | * Modifications to node, map_cnt or mapping should be protected by the 148 | * lock in the client. Other fields are never changed after initialization. 149 | */ 150 | struct ion_handle { 151 | struct kref ref; 152 | struct ion_client *client; 153 | struct ion_buffer *buffer; 154 | struct rb_node node; 155 | unsigned int kmap_cnt; 156 | int id; 157 | }; 158 | 159 | /** 160 | * struct ion_heap_ops - ops to operate on a given heap 161 | * @allocate: allocate memory 162 | * @free: free memory 163 | * @map_kernel map memory to the kernel 164 | * @unmap_kernel unmap memory to the kernel 165 | * @map_user map memory to userspace 166 | * 167 | * allocate, phys, and map_user return 0 on success, -errno on error. 168 | * map_dma and map_kernel return pointer on success, ERR_PTR on 169 | * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in 170 | * the buffer's private_flags when called from a shrinker. In that 171 | * case, the pages being free'd must be truly free'd back to the 172 | * system, not put in a page pool or otherwise cached. 173 | */ 174 | struct ion_heap_ops { 175 | int (*allocate)(struct ion_heap *heap, 176 | struct ion_buffer *buffer, unsigned long len, 177 | unsigned long align, unsigned long flags); 178 | void (*free)(struct ion_buffer *buffer); 179 | void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); 180 | void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); 181 | int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, 182 | struct vm_area_struct *vma); 183 | int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); 184 | }; 185 | 186 | /** 187 | * heap flags - flags between the heaps and core ion code 188 | */ 189 | #define ION_HEAP_FLAG_DEFER_FREE (1 << 0) 190 | 191 | /** 192 | * private flags - flags internal to ion 193 | */ 194 | /* 195 | * Buffer is being freed from a shrinker function. Skip any possible 196 | * heap-specific caching mechanism (e.g. page pools). Guarantees that 197 | * any buffer storage that came from the system allocator will be 198 | * returned to the system allocator. 199 | */ 200 | #define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0) 201 | 202 | /** 203 | * struct ion_heap - represents a heap in the system 204 | * @node: rb node to put the heap on the device's tree of heaps 205 | * @dev: back pointer to the ion_device 206 | * @type: type of heap 207 | * @ops: ops struct as above 208 | * @flags: flags 209 | * @id: id of heap, also indicates priority of this heap when 210 | * allocating. These are specified by platform data and 211 | * MUST be unique 212 | * @name: used for debugging 213 | * @shrinker: a shrinker for the heap 214 | * @free_list: free list head if deferred free is used 215 | * @free_list_size size of the deferred free list in bytes 216 | * @lock: protects the free list 217 | * @waitqueue: queue to wait on from deferred free thread 218 | * @task: task struct of deferred free thread 219 | * @debug_show: called when heap debug file is read to add any 220 | * heap specific debug info to output 221 | * 222 | * Represents a pool of memory from which buffers can be made. In some 223 | * systems the only heap is regular system memory allocated via vmalloc. 224 | * On others, some blocks might require large physically contiguous buffers 225 | * that are allocated from a specially reserved heap. 226 | */ 227 | struct ion_heap { 228 | struct plist_node node; 229 | struct ion_device *dev; 230 | enum ion_heap_type type; 231 | struct ion_heap_ops *ops; 232 | unsigned long flags; 233 | unsigned int id; 234 | const char *name; 235 | struct shrinker shrinker; 236 | struct list_head free_list; 237 | size_t free_list_size; 238 | spinlock_t free_lock; 239 | wait_queue_head_t waitqueue; 240 | struct task_struct *task; 241 | 242 | int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); 243 | }; 244 | 245 | /** 246 | * ion_buffer_cached - this ion buffer is cached 247 | * @buffer: buffer 248 | * 249 | * indicates whether this ion buffer is cached 250 | */ 251 | bool ion_buffer_cached(struct ion_buffer *buffer); 252 | 253 | /** 254 | * ion_buffer_fault_user_mappings - fault in user mappings of this buffer 255 | * @buffer: buffer 256 | * 257 | * indicates whether userspace mappings of this buffer will be faulted 258 | * in, this can affect how buffers are allocated from the heap. 259 | */ 260 | bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); 261 | 262 | /** 263 | * ion_device_create - allocates and returns an ion device 264 | * @custom_ioctl: arch specific ioctl function if applicable 265 | * 266 | * returns a valid device or -PTR_ERR 267 | */ 268 | struct ion_device *ion_device_create(long (*custom_ioctl) 269 | (struct ion_client *client, 270 | unsigned int cmd, 271 | unsigned long arg)); 272 | 273 | /** 274 | * ion_device_destroy - free and device and it's resource 275 | * @dev: the device 276 | */ 277 | void ion_device_destroy(struct ion_device *dev); 278 | 279 | /** 280 | * ion_device_add_heap - adds a heap to the ion device 281 | * @dev: the device 282 | * @heap: the heap to add 283 | */ 284 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); 285 | 286 | /** 287 | * some helpers for common operations on buffers using the sg_table 288 | * and vaddr fields 289 | */ 290 | void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *); 291 | void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *); 292 | int ion_heap_map_user(struct ion_heap *, struct ion_buffer *, 293 | struct vm_area_struct *); 294 | int ion_heap_buffer_zero(struct ion_buffer *buffer); 295 | int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); 296 | 297 | /** 298 | * ion_heap_init_shrinker 299 | * @heap: the heap 300 | * 301 | * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op 302 | * this function will be called to setup a shrinker to shrink the freelists 303 | * and call the heap's shrink op. 304 | */ 305 | void ion_heap_init_shrinker(struct ion_heap *heap); 306 | 307 | /** 308 | * ion_heap_init_deferred_free -- initialize deferred free functionality 309 | * @heap: the heap 310 | * 311 | * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will 312 | * be called to setup deferred frees. Calls to free the buffer will 313 | * return immediately and the actual free will occur some time later 314 | */ 315 | int ion_heap_init_deferred_free(struct ion_heap *heap); 316 | 317 | /** 318 | * ion_heap_freelist_add - add a buffer to the deferred free list 319 | * @heap: the heap 320 | * @buffer: the buffer 321 | * 322 | * Adds an item to the deferred freelist. 323 | */ 324 | void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); 325 | 326 | /** 327 | * ion_heap_freelist_drain - drain the deferred free list 328 | * @heap: the heap 329 | * @size: amount of memory to drain in bytes 330 | * 331 | * Drains the indicated amount of memory from the deferred freelist immediately. 332 | * Returns the total amount freed. The total freed may be higher depending 333 | * on the size of the items in the list, or lower if there is insufficient 334 | * total memory on the freelist. 335 | */ 336 | size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); 337 | 338 | /** 339 | * ion_heap_freelist_shrink - drain the deferred free 340 | * list, skipping any heap-specific 341 | * pooling or caching mechanisms 342 | * 343 | * @heap: the heap 344 | * @size: amount of memory to drain in bytes 345 | * 346 | * Drains the indicated amount of memory from the deferred freelist immediately. 347 | * Returns the total amount freed. The total freed may be higher depending 348 | * on the size of the items in the list, or lower if there is insufficient 349 | * total memory on the freelist. 350 | * 351 | * Unlike with @ion_heap_freelist_drain, don't put any pages back into 352 | * page pools or otherwise cache the pages. Everything must be 353 | * genuinely free'd back to the system. If you're free'ing from a 354 | * shrinker you probably want to use this. Note that this relies on 355 | * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE 356 | * flag. 357 | */ 358 | size_t ion_heap_freelist_shrink(struct ion_heap *heap, 359 | size_t size); 360 | 361 | /** 362 | * ion_heap_freelist_size - returns the size of the freelist in bytes 363 | * @heap: the heap 364 | */ 365 | size_t ion_heap_freelist_size(struct ion_heap *heap); 366 | 367 | 368 | /** 369 | * functions for creating and destroying the built in ion heaps. 370 | * architectures can add their own custom architecture specific 371 | * heaps as appropriate. 372 | */ 373 | 374 | struct ion_heap *ion_heap_create(struct ion_platform_heap *); 375 | void ion_heap_destroy(struct ion_heap *); 376 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); 377 | void ion_system_heap_destroy(struct ion_heap *); 378 | 379 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); 380 | void ion_system_contig_heap_destroy(struct ion_heap *); 381 | 382 | struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); 383 | void ion_carveout_heap_destroy(struct ion_heap *); 384 | 385 | struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *); 386 | void ion_chunk_heap_destroy(struct ion_heap *); 387 | struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *); 388 | void ion_cma_heap_destroy(struct ion_heap *); 389 | 390 | /** 391 | * functions for creating and destroying a heap pool -- allows you 392 | * to keep a pool of pre allocated memory to use from your heap. Keeping 393 | * a pool of memory that is ready for dma, ie any cached mapping have been 394 | * invalidated from the cache, provides a significant performance benefit on 395 | * many systems 396 | */ 397 | 398 | /** 399 | * struct ion_page_pool - pagepool struct 400 | * @high_count: number of highmem items in the pool 401 | * @low_count: number of lowmem items in the pool 402 | * @high_items: list of highmem items 403 | * @low_items: list of lowmem items 404 | * @mutex: lock protecting this struct and especially the count 405 | * item list 406 | * @gfp_mask: gfp_mask to use from alloc 407 | * @order: order of pages in the pool 408 | * @list: plist node for list of pools 409 | * @cached: it's cached pool or not 410 | * 411 | * Allows you to keep a pool of pre allocated pages to use from your heap. 412 | * Keeping a pool of pages that is ready for dma, ie any cached mapping have 413 | * been invalidated from the cache, provides a significant performance benefit 414 | * on many systems 415 | */ 416 | struct ion_page_pool { 417 | int high_count; 418 | int low_count; 419 | bool cached; 420 | struct list_head high_items; 421 | struct list_head low_items; 422 | struct mutex mutex; 423 | gfp_t gfp_mask; 424 | unsigned int order; 425 | struct plist_node list; 426 | }; 427 | 428 | struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order, 429 | bool cached); 430 | void ion_page_pool_destroy(struct ion_page_pool *); 431 | struct page *ion_page_pool_alloc(struct ion_page_pool *); 432 | void ion_page_pool_free(struct ion_page_pool *, struct page *); 433 | 434 | /** ion_page_pool_shrink - shrinks the size of the memory cached in the pool 435 | * @pool: the pool 436 | * @gfp_mask: the memory type to reclaim 437 | * @nr_to_scan: number of items to shrink in pages 438 | * 439 | * returns the number of items freed in pages 440 | */ 441 | int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, 442 | int nr_to_scan); 443 | 444 | /** 445 | * ion_pages_sync_for_device - cache flush pages for use with the specified 446 | * device 447 | * @dev: the device the pages will be used with 448 | * @page: the first page to be flushed 449 | * @size: size in bytes of region to be flushed 450 | * @dir: direction of dma transfer 451 | */ 452 | void ion_pages_sync_for_device(struct device *dev, struct page *page, 453 | size_t size, enum dma_data_direction dir); 454 | 455 | long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 456 | 457 | int ion_sync_for_device(struct ion_client *client, int fd); 458 | 459 | struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, 460 | int id); 461 | 462 | void ion_free_nolock(struct ion_client *client, struct ion_handle *handle); 463 | 464 | int ion_handle_put_nolock(struct ion_handle *handle); 465 | 466 | int ion_handle_put(struct ion_handle *handle); 467 | 468 | int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query); 469 | 470 | int ion_share_dma_buf_fd_nolock(struct ion_client *client, 471 | struct ion_handle *handle); 472 | 473 | #endif /* _ION_PRIV_H */ 474 | -------------------------------------------------------------------------------- /ion/ion_system_heap.c: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/ion/ion_system_heap.c 3 | * 4 | * Copyright (C) 2011 Google, Inc. 5 | * 6 | * This software is licensed under the terms of the GNU General Public 7 | * License version 2, as published by the Free Software Foundation, and 8 | * may be copied, distributed, and modified under those terms. 9 | * 10 | * This program is distributed in the hope that it will be useful, 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | * GNU General Public License for more details. 14 | * 15 | */ 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include "ion.h" 27 | #include "ion_priv.h" 28 | 29 | #define NUM_ORDERS ARRAY_SIZE(orders) 30 | 31 | static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | 32 | __GFP_NORETRY) & ~__GFP_RECLAIM; 33 | static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO); 34 | static const unsigned int orders[] = {8, 4, 0}; 35 | 36 | static int order_to_index(unsigned int order) 37 | { 38 | int i; 39 | 40 | for (i = 0; i < NUM_ORDERS; i++) 41 | if (order == orders[i]) 42 | return i; 43 | BUG(); 44 | return -1; 45 | } 46 | 47 | static inline unsigned int order_to_size(int order) 48 | { 49 | return PAGE_SIZE << order; 50 | } 51 | 52 | struct ion_system_heap { 53 | struct ion_heap heap; 54 | struct ion_page_pool *uncached_pools[NUM_ORDERS]; 55 | struct ion_page_pool *cached_pools[NUM_ORDERS]; 56 | }; 57 | 58 | /** 59 | * The page from page-pool are all zeroed before. We need do cache 60 | * clean for cached buffer. The uncached buffer are always non-cached 61 | * since it's allocated. So no need for non-cached pages. 62 | */ 63 | static struct page *alloc_buffer_page(struct ion_system_heap *heap, 64 | struct ion_buffer *buffer, 65 | unsigned long order) 66 | { 67 | bool cached = ion_buffer_cached(buffer); 68 | struct ion_page_pool *pool; 69 | struct page *page; 70 | 71 | if (!cached) 72 | pool = heap->uncached_pools[order_to_index(order)]; 73 | else 74 | pool = heap->cached_pools[order_to_index(order)]; 75 | 76 | page = ion_page_pool_alloc(pool); 77 | 78 | if (cached) 79 | ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, 80 | DMA_BIDIRECTIONAL); 81 | return page; 82 | } 83 | 84 | static void free_buffer_page(struct ion_system_heap *heap, 85 | struct ion_buffer *buffer, struct page *page) 86 | { 87 | struct ion_page_pool *pool; 88 | unsigned int order = compound_order(page); 89 | bool cached = ion_buffer_cached(buffer); 90 | 91 | /* go to system */ 92 | if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) { 93 | __free_pages(page, order); 94 | return; 95 | } 96 | 97 | if (!cached) 98 | pool = heap->uncached_pools[order_to_index(order)]; 99 | else 100 | pool = heap->cached_pools[order_to_index(order)]; 101 | 102 | ion_page_pool_free(pool, page); 103 | } 104 | 105 | 106 | static struct page *alloc_largest_available(struct ion_system_heap *heap, 107 | struct ion_buffer *buffer, 108 | unsigned long size, 109 | unsigned int max_order) 110 | { 111 | struct page *page; 112 | int i; 113 | 114 | for (i = 0; i < NUM_ORDERS; i++) { 115 | if (size < order_to_size(orders[i])) 116 | continue; 117 | if (max_order < orders[i]) 118 | continue; 119 | 120 | page = alloc_buffer_page(heap, buffer, orders[i]); 121 | if (!page) 122 | continue; 123 | 124 | return page; 125 | } 126 | 127 | return NULL; 128 | } 129 | 130 | static int ion_system_heap_allocate(struct ion_heap *heap, 131 | struct ion_buffer *buffer, 132 | unsigned long size, unsigned long align, 133 | unsigned long flags) 134 | { 135 | struct ion_system_heap *sys_heap = container_of(heap, 136 | struct ion_system_heap, 137 | heap); 138 | struct sg_table *table; 139 | struct scatterlist *sg; 140 | struct list_head pages; 141 | struct page *page, *tmp_page; 142 | int i = 0; 143 | unsigned long size_remaining = PAGE_ALIGN(size); 144 | unsigned int max_order = orders[0]; 145 | 146 | if (align > PAGE_SIZE) 147 | return -EINVAL; 148 | 149 | if ((size / PAGE_SIZE) > (totalram_pages() / 2)) 150 | return -ENOMEM; 151 | 152 | INIT_LIST_HEAD(&pages); 153 | while (size_remaining > 0) { 154 | page = alloc_largest_available(sys_heap, buffer, size_remaining, 155 | max_order); 156 | if (!page) 157 | goto free_pages; 158 | list_add_tail(&page->lru, &pages); 159 | size_remaining -= PAGE_SIZE << compound_order(page); 160 | max_order = compound_order(page); 161 | i++; 162 | } 163 | table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 164 | if (!table) 165 | goto free_pages; 166 | 167 | if (sg_alloc_table(table, i, GFP_KERNEL)) 168 | goto free_table; 169 | 170 | sg = table->sgl; 171 | list_for_each_entry_safe(page, tmp_page, &pages, lru) { 172 | sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0); 173 | sg = sg_next(sg); 174 | list_del(&page->lru); 175 | } 176 | 177 | buffer->sg_table = table; 178 | return 0; 179 | 180 | free_table: 181 | kfree(table); 182 | free_pages: 183 | list_for_each_entry_safe(page, tmp_page, &pages, lru) 184 | free_buffer_page(sys_heap, buffer, page); 185 | return -ENOMEM; 186 | } 187 | 188 | static void ion_system_heap_free(struct ion_buffer *buffer) 189 | { 190 | struct ion_system_heap *sys_heap = container_of(buffer->heap, 191 | struct ion_system_heap, 192 | heap); 193 | struct sg_table *table = buffer->sg_table; 194 | struct scatterlist *sg; 195 | int i; 196 | 197 | /* zero the buffer before goto page pool */ 198 | if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) 199 | ion_heap_buffer_zero(buffer); 200 | 201 | for_each_sg(table->sgl, sg, table->nents, i) 202 | free_buffer_page(sys_heap, buffer, sg_page(sg)); 203 | sg_free_table(table); 204 | kfree(table); 205 | } 206 | 207 | static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask, 208 | int nr_to_scan) 209 | { 210 | struct ion_page_pool *uncached_pool; 211 | struct ion_page_pool *cached_pool; 212 | struct ion_system_heap *sys_heap; 213 | int nr_total = 0; 214 | int i, nr_freed; 215 | int only_scan = 0; 216 | 217 | sys_heap = container_of(heap, struct ion_system_heap, heap); 218 | 219 | if (!nr_to_scan) 220 | only_scan = 1; 221 | 222 | for (i = 0; i < NUM_ORDERS; i++) { 223 | uncached_pool = sys_heap->uncached_pools[i]; 224 | cached_pool = sys_heap->cached_pools[i]; 225 | 226 | if (only_scan) { 227 | nr_total += ion_page_pool_shrink(uncached_pool, 228 | gfp_mask, 229 | nr_to_scan); 230 | 231 | nr_total += ion_page_pool_shrink(cached_pool, 232 | gfp_mask, 233 | nr_to_scan); 234 | } else { 235 | nr_freed = ion_page_pool_shrink(uncached_pool, 236 | gfp_mask, 237 | nr_to_scan); 238 | nr_to_scan -= nr_freed; 239 | nr_total += nr_freed; 240 | if (nr_to_scan <= 0) 241 | break; 242 | nr_freed = ion_page_pool_shrink(cached_pool, 243 | gfp_mask, 244 | nr_to_scan); 245 | nr_to_scan -= nr_freed; 246 | nr_total += nr_freed; 247 | if (nr_to_scan <= 0) 248 | break; 249 | } 250 | } 251 | return nr_total; 252 | } 253 | 254 | static struct ion_heap_ops system_heap_ops = { 255 | .allocate = ion_system_heap_allocate, 256 | .free = ion_system_heap_free, 257 | .map_kernel = ion_heap_map_kernel, 258 | .unmap_kernel = ion_heap_unmap_kernel, 259 | .map_user = ion_heap_map_user, 260 | .shrink = ion_system_heap_shrink, 261 | }; 262 | 263 | static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, 264 | void *unused) 265 | { 266 | 267 | struct ion_system_heap *sys_heap = container_of(heap, 268 | struct ion_system_heap, 269 | heap); 270 | int i; 271 | struct ion_page_pool *pool; 272 | 273 | for (i = 0; i < NUM_ORDERS; i++) { 274 | pool = sys_heap->uncached_pools[i]; 275 | 276 | seq_printf(s, "%d order %u highmem pages uncached %lu total\n", 277 | pool->high_count, pool->order, 278 | (PAGE_SIZE << pool->order) * pool->high_count); 279 | seq_printf(s, "%d order %u lowmem pages uncached %lu total\n", 280 | pool->low_count, pool->order, 281 | (PAGE_SIZE << pool->order) * pool->low_count); 282 | } 283 | 284 | for (i = 0; i < NUM_ORDERS; i++) { 285 | pool = sys_heap->cached_pools[i]; 286 | 287 | seq_printf(s, "%d order %u highmem pages cached %lu total\n", 288 | pool->high_count, pool->order, 289 | (PAGE_SIZE << pool->order) * pool->high_count); 290 | seq_printf(s, "%d order %u lowmem pages cached %lu total\n", 291 | pool->low_count, pool->order, 292 | (PAGE_SIZE << pool->order) * pool->low_count); 293 | } 294 | return 0; 295 | } 296 | 297 | static void ion_system_heap_destroy_pools(struct ion_page_pool **pools) 298 | { 299 | int i; 300 | 301 | for (i = 0; i < NUM_ORDERS; i++) 302 | if (pools[i]) 303 | ion_page_pool_destroy(pools[i]); 304 | } 305 | 306 | static int ion_system_heap_create_pools(struct ion_page_pool **pools, 307 | bool cached) 308 | { 309 | int i; 310 | 311 | for (i = 0; i < NUM_ORDERS; i++) { 312 | struct ion_page_pool *pool; 313 | gfp_t gfp_flags = low_order_gfp_flags; 314 | 315 | if (orders[i] > 4) 316 | gfp_flags = high_order_gfp_flags; 317 | 318 | pool = ion_page_pool_create(gfp_flags, orders[i], cached); 319 | if (!pool) 320 | goto err_create_pool; 321 | pools[i] = pool; 322 | } 323 | return 0; 324 | 325 | err_create_pool: 326 | ion_system_heap_destroy_pools(pools); 327 | return -ENOMEM; 328 | } 329 | 330 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) 331 | { 332 | struct ion_system_heap *heap; 333 | 334 | heap = kzalloc(sizeof(*heap), GFP_KERNEL); 335 | if (!heap) 336 | return ERR_PTR(-ENOMEM); 337 | heap->heap.ops = &system_heap_ops; 338 | heap->heap.type = ION_HEAP_TYPE_SYSTEM; 339 | heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; 340 | 341 | if (ion_system_heap_create_pools(heap->uncached_pools, false)) 342 | goto free_heap; 343 | 344 | if (ion_system_heap_create_pools(heap->cached_pools, true)) 345 | goto destroy_uncached_pools; 346 | 347 | heap->heap.debug_show = ion_system_heap_debug_show; 348 | return &heap->heap; 349 | 350 | destroy_uncached_pools: 351 | ion_system_heap_destroy_pools(heap->uncached_pools); 352 | 353 | free_heap: 354 | kfree(heap); 355 | return ERR_PTR(-ENOMEM); 356 | } 357 | 358 | void ion_system_heap_destroy(struct ion_heap *heap) 359 | { 360 | struct ion_system_heap *sys_heap = container_of(heap, 361 | struct ion_system_heap, 362 | heap); 363 | int i; 364 | 365 | for (i = 0; i < NUM_ORDERS; i++) { 366 | ion_page_pool_destroy(sys_heap->uncached_pools[i]); 367 | ion_page_pool_destroy(sys_heap->cached_pools[i]); 368 | } 369 | kfree(sys_heap); 370 | } 371 | 372 | static int ion_system_contig_heap_allocate(struct ion_heap *heap, 373 | struct ion_buffer *buffer, 374 | unsigned long len, 375 | unsigned long align, 376 | unsigned long flags) 377 | { 378 | int order = get_order(len); 379 | struct page *page; 380 | struct sg_table *table; 381 | unsigned long i; 382 | int ret; 383 | 384 | if (align > (PAGE_SIZE << order)) 385 | return -EINVAL; 386 | 387 | page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order); 388 | if (!page) 389 | return -ENOMEM; 390 | 391 | split_page(page, order); 392 | 393 | len = PAGE_ALIGN(len); 394 | for (i = len >> PAGE_SHIFT; i < (1 << order); i++) 395 | __free_page(page + i); 396 | 397 | table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 398 | if (!table) { 399 | ret = -ENOMEM; 400 | goto free_pages; 401 | } 402 | 403 | ret = sg_alloc_table(table, 1, GFP_KERNEL); 404 | if (ret) 405 | goto free_table; 406 | 407 | sg_set_page(table->sgl, page, len, 0); 408 | 409 | buffer->sg_table = table; 410 | 411 | ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL); 412 | 413 | return 0; 414 | 415 | free_table: 416 | kfree(table); 417 | free_pages: 418 | for (i = 0; i < len >> PAGE_SHIFT; i++) 419 | __free_page(page + i); 420 | 421 | return ret; 422 | } 423 | 424 | static void ion_system_contig_heap_free(struct ion_buffer *buffer) 425 | { 426 | struct sg_table *table = buffer->sg_table; 427 | struct page *page = sg_page(table->sgl); 428 | unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; 429 | unsigned long i; 430 | 431 | for (i = 0; i < pages; i++) 432 | __free_page(page + i); 433 | sg_free_table(table); 434 | kfree(table); 435 | } 436 | 437 | static struct ion_heap_ops kmalloc_ops = { 438 | .allocate = ion_system_contig_heap_allocate, 439 | .free = ion_system_contig_heap_free, 440 | .map_kernel = ion_heap_map_kernel, 441 | .unmap_kernel = ion_heap_unmap_kernel, 442 | .map_user = ion_heap_map_user, 443 | }; 444 | 445 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) 446 | { 447 | struct ion_heap *heap; 448 | 449 | heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); 450 | if (!heap) 451 | return ERR_PTR(-ENOMEM); 452 | heap->ops = &kmalloc_ops; 453 | heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; 454 | return heap; 455 | } 456 | 457 | void ion_system_contig_heap_destroy(struct ion_heap *heap) 458 | { 459 | kfree(heap); 460 | } 461 | -------------------------------------------------------------------------------- /ion/sunxi/Makefile: -------------------------------------------------------------------------------- 1 | obj-$(CONFIG_VIDEO_SUNXI_CEDAR_ION) += sunxi_ion.o cache.o 2 | -------------------------------------------------------------------------------- /ion/sunxi/cache-v7.S: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __ASSEMBLY__ 3 | #define __ASSEMBLY__ 4 | #endif 5 | #include 6 | #include 7 | 8 | /* 9 | * c code declared as follows: 10 | * int flush_clean_user_range(long start, long end); 11 | */ 12 | ENTRY(flush_clean_user_range) 13 | .macro dcache_line_size, reg, tmp 14 | mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR 15 | and \tmp, \tmp, #7 @ cache line size encoding 16 | mov \reg, #16 @ size offset 17 | mov \reg, \reg, lsl \tmp @ actual cache line size 18 | .endm 19 | 20 | .text 21 | .globl flush_clean_user_range 22 | .globl flush_dcache_all 23 | flush_clean_user_range: 24 | dcache_line_size r2, r3 25 | sub r3, r2, #1 26 | bic r0, r0, r3 27 | 1: 28 | USER( mcr p15, 0, r0, c7, c14, 1 ) @ clean and flush D line to the point of unification 29 | add r0, r0, r2 30 | 2: 31 | cmp r0, r1 32 | blo 1b 33 | mov r0, #0 34 | dsb 35 | mov pc, lr 36 | 37 | /* 38 | * Fault handling for the cache operation above. If the virtual address in r0 39 | * is not mapped, just try the next page. 40 | */ 41 | 9001: 42 | mov r0, r0, lsr #12 43 | mov r0, r0, lsl #12 44 | add r0, r0, #4096 45 | b 2b 46 | ENDPROC(flush_clean_user_range) 47 | 48 | /* 49 | * flush_dcache_all() 50 | * 51 | * Flush the whole D-cache. 52 | * 53 | * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) 54 | * 55 | * - mm - mm_struct describing address space 56 | */ 57 | ENTRY(flush_dcache_all) 58 | stmfd sp!, {r0 - r12, lr} 59 | dmb @ ensure ordering with previous memory accesses 60 | mrc p15, 1, r0, c0, c0, 1 @ read clidr 61 | ands r3, r0, #0x7000000 @ extract loc from clidr 62 | mov r3, r3, lsr #23 @ left align loc bit field 63 | beq finished @ if loc is 0, then no need to clean 64 | mov r10, #0 @ start clean at cache level 0 65 | loop1: 66 | add r2, r10, r10, lsr #1 @ work out 3x current cache level 67 | mov r1, r0, lsr r2 @ extract cache type bits from clidr 68 | and r1, r1, #7 @ mask of the bits for current cache only 69 | cmp r1, #2 @ see what cache we have at this level 70 | blt skip @ skip if no cache, or just i-cache 71 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 72 | isb @ isb to sych the new cssr&csidr 73 | mrc p15, 1, r1, c0, c0, 0 @ read the new csidr 74 | and r2, r1, #7 @ extract the length of the cache lines 75 | add r2, r2, #4 @ add 4 (line length offset) 76 | ldr r4, =0x3ff 77 | ands r4, r4, r1, lsr #3 @ find maximum number on the way size 78 | clz r5, r4 @ find bit position of way size increment 79 | ldr r7, =0x7fff 80 | ands r7, r7, r1, lsr #13 @ extract max number of the index size 81 | loop2: 82 | mov r9, r4 @ create working copy of max way size 83 | loop3: 84 | ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 85 | THUMB( lsl r6, r9, r5 ) 86 | THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 87 | ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 88 | THUMB( lsl r6, r7, r2 ) 89 | THUMB( orr r11, r11, r6 ) @ factor index number into r11 90 | mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 91 | subs r9, r9, #1 @ decrement the way 92 | bge loop3 93 | subs r7, r7, #1 @ decrement the index 94 | bge loop2 95 | skip: 96 | add r10, r10, #2 @ increment cache number 97 | cmp r3, r10 98 | bgt loop1 99 | finished: 100 | mov r10, #0 @ swith back to cache level 0 101 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 102 | dsb 103 | isb 104 | 105 | ldmfd sp!, {r0 - r12, lr} 106 | mov pc, lr 107 | ENDPROC(flush_dcache_all) 108 | 109 | /* 110 | * int flush_user_range(long start, long end); 111 | * 112 | * flush user space range. just flush, not write back. 113 | */ 114 | ENTRY(flush_user_range) 115 | dcache_line_size r2, r3 116 | sub r3, r2, #1 117 | 118 | tst r0, r3 119 | bic r0, r0, r3 120 | USER(mcrne p15, 0, r0, c7, c14, 1) @ clean & invalidate D / U line 121 | 122 | tst r1, r3 123 | bic r1, r1, r3 124 | USER(mcrne p15, 0, r1, c7, c14, 1) @ clean & invalidate D / U line 125 | 126 | 1: 127 | USER(mcr p15, 0, r0, c7, c6, 1) @ invalidate D / U line 128 | add r0, r0, r2 129 | 2: 130 | cmp r0, r1 131 | blo 1b 132 | mov r0, #0 133 | dsb 134 | mov pc, lr 135 | 136 | /* 137 | * Fault handling for the cache operation above. If the virtual address in r0 138 | * is not mapped, just try the next page. 139 | */ 140 | 9001: 141 | mov r0, r0, lsr #12 142 | mov r0, r0, lsl #12 143 | add r0, r0, #4096 144 | b 2b 145 | ENDPROC(flush_user_range) 146 | -------------------------------------------------------------------------------- /ion/sunxi/cache.S: -------------------------------------------------------------------------------- 1 | #if __LINUX_ARM_ARCH__ == 7 2 | #include "cache-v7.S" 3 | #else 4 | #warning "[sunxi-cedar] No BSP asm cache flush support, use dmac flush" 5 | #endif 6 | -------------------------------------------------------------------------------- /ion/sunxi/cache.h: -------------------------------------------------------------------------------- 1 | #ifndef _CACHE_H 2 | #define _CACHE_H 3 | int flush_clean_user_range(long start, long end); 4 | int flush_user_range(long start, long end); 5 | void flush_dcache_all(void); 6 | #endif -------------------------------------------------------------------------------- /ion/sunxi/sunxi_ion.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Allwinner SUNXI ION Driver 3 | * 4 | * Copyright (c) 2017 Allwinnertech. 5 | * 6 | * Author: fanqinghua 7 | * 8 | * This program is free software; you can redistribute it and/or modify 9 | * it under the terms of the GNU General Public License version 2 as 10 | * published by the Free Software Foundation. 11 | */ 12 | 13 | #define pr_fmt(fmt) "Ion: " fmt 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include "../ion_priv.h" 23 | #include "../ion.h" 24 | #include "../ion_of.h" 25 | #include "sunxi_ion_priv.h" 26 | 27 | struct sunxi_ion_dev { 28 | struct ion_heap **heaps; 29 | struct ion_device *idev; 30 | struct ion_platform_data *data; 31 | }; 32 | struct device *g_ion_dev; 33 | struct ion_device *idev; 34 | /* export for IMG GPU(sgx544) */ 35 | EXPORT_SYMBOL(idev); 36 | 37 | static struct ion_of_heap sunxi_heaps[] = { 38 | PLATFORM_HEAP("allwinner,sys_user", 0, ION_HEAP_TYPE_SYSTEM, 39 | "sys_user"), 40 | PLATFORM_HEAP("allwinner,sys_contig", 1, ION_HEAP_TYPE_SYSTEM_CONTIG, 41 | "sys_contig"), 42 | PLATFORM_HEAP("allwinner,cma", ION_HEAP_TYPE_DMA, ION_HEAP_TYPE_DMA, 43 | "cma"), 44 | PLATFORM_HEAP("allwinner,secure", ION_HEAP_TYPE_SECURE, 45 | ION_HEAP_TYPE_SECURE, "secure"), 46 | {} 47 | }; 48 | 49 | struct device *get_ion_dev(void) 50 | { 51 | return g_ion_dev; 52 | } 53 | 54 | long sunxi_ion_ioctl(struct ion_client *client, unsigned int cmd, 55 | unsigned long arg) 56 | { 57 | long ret = 0; 58 | switch (cmd) { 59 | case ION_IOC_SUNXI_FLUSH_RANGE: { 60 | sunxi_cache_range range; 61 | if (copy_from_user(&range, (void __user *)arg, 62 | sizeof(sunxi_cache_range))) { 63 | ret = -EINVAL; 64 | goto end; 65 | } 66 | 67 | #if __LINUX_ARM_ARCH__ == 7 68 | if (flush_clean_user_range(range.start, range.end)) { 69 | ret = -EINVAL; 70 | goto end; 71 | } 72 | #else 73 | dmac_flush_range((void*)range.start, (void*)range.end); 74 | #endif 75 | break; 76 | } 77 | #if __LINUX_ARM_ARCH__ == 7 78 | case ION_IOC_SUNXI_FLUSH_ALL: { 79 | flush_dcache_all(); 80 | break; 81 | } 82 | #endif 83 | case ION_IOC_SUNXI_PHYS_ADDR: { 84 | sunxi_phys_data data; 85 | struct ion_handle *handle; 86 | if (copy_from_user(&data, (void __user *)arg, 87 | sizeof(sunxi_phys_data))) 88 | return -EFAULT; 89 | handle = 90 | ion_handle_get_by_id_nolock(client, data.handle.handle); 91 | /* FIXME Hardcoded CMA struct pointer */ 92 | data.phys_addr = 93 | ((struct ion_cma_buffer_info *)(handle->buffer 94 | ->priv_virt)) 95 | ->handle; 96 | data.size = handle->buffer->size; 97 | if (copy_to_user((void __user *)arg, &data, 98 | sizeof(sunxi_phys_data))) 99 | return -EFAULT; 100 | break; 101 | } 102 | 103 | default: 104 | return -ENOTTY; 105 | } 106 | end: 107 | return ret; 108 | } 109 | 110 | static int sunxi_ion_probe(struct platform_device *pdev) 111 | { 112 | struct sunxi_ion_dev *ipdev; 113 | int i; 114 | 115 | ipdev = devm_kzalloc(&pdev->dev, sizeof(*ipdev), GFP_KERNEL); 116 | if (!ipdev) 117 | return -ENOMEM; 118 | 119 | g_ion_dev = &pdev->dev; 120 | platform_set_drvdata(pdev, ipdev); 121 | 122 | ipdev->idev = ion_device_create(sunxi_ion_ioctl); 123 | if (IS_ERR(ipdev->idev)) 124 | return PTR_ERR(ipdev->idev); 125 | 126 | idev = ipdev->idev; 127 | 128 | ipdev->data = ion_parse_dt(pdev, sunxi_heaps); 129 | if (IS_ERR(ipdev->data)) { 130 | pr_err("%s: ion_parse_dt error!\n", __func__); 131 | return PTR_ERR(ipdev->data); 132 | } 133 | 134 | ipdev->heaps = devm_kzalloc(&pdev->dev, 135 | sizeof(struct ion_heap) * ipdev->data->nr, 136 | GFP_KERNEL); 137 | if (!ipdev->heaps) { 138 | ion_destroy_platform_data(ipdev->data); 139 | return -ENOMEM; 140 | } 141 | 142 | for (i = 0; i < ipdev->data->nr; i++) { 143 | ipdev->heaps[i] = ion_heap_create(&ipdev->data->heaps[i]); 144 | if (!ipdev->heaps) { 145 | ion_destroy_platform_data(ipdev->data); 146 | return -ENOMEM; 147 | } else if (ipdev->heaps[i] == ERR_PTR(-EINVAL)) { 148 | return 0; 149 | } 150 | ion_device_add_heap(ipdev->idev, ipdev->heaps[i]); 151 | } 152 | return 0; 153 | } 154 | 155 | static int sunxi_ion_remove(struct platform_device *pdev) 156 | { 157 | struct sunxi_ion_dev *ipdev; 158 | int i; 159 | 160 | ipdev = platform_get_drvdata(pdev); 161 | 162 | for (i = 0; i < ipdev->data->nr; i++) 163 | ion_heap_destroy(ipdev->heaps[i]); 164 | 165 | ion_destroy_platform_data(ipdev->data); 166 | ion_device_destroy(ipdev->idev); 167 | 168 | return 0; 169 | } 170 | 171 | static const struct of_device_id sunxi_ion_match_table[] = { 172 | { .compatible = "allwinner,sunxi-ion" }, 173 | {}, 174 | }; 175 | 176 | static struct platform_driver sunxi_ion_driver = { 177 | .probe = sunxi_ion_probe, 178 | .remove = sunxi_ion_remove, 179 | .driver = { 180 | .name = "ion-sunxi", 181 | .of_match_table = sunxi_ion_match_table, 182 | }, 183 | }; 184 | 185 | static int __init sunxi_ion_init(void) 186 | { 187 | return platform_driver_register(&sunxi_ion_driver); 188 | } 189 | subsys_initcall(sunxi_ion_init); 190 | -------------------------------------------------------------------------------- /ion/sunxi/sunxi_ion.h: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/ion/sunxi/ion_sunxi.h 3 | * 4 | * Copyright(c) 2015-2020 Allwinnertech Co., Ltd. 5 | * http://www.allwinnertech.com 6 | * 7 | * Author: Wim Hwang 8 | * 9 | * sunxi ion header file 10 | * 11 | * This program is free software; you can redistribute it and/or modify 12 | * it under the terms of the GNU General Public License as published by 13 | * the Free Software Foundation; either version 2 of the License, or 14 | * (at your option) any later version. 15 | */ 16 | 17 | #ifndef _LINUX_ION_SUNXI_H 18 | #define _LINUX_ION_SUNXI_H 19 | 20 | /** 21 | * ion_client_create() - allocate a client and returns it 22 | * @name: used for debugging 23 | */ 24 | struct ion_client *sunxi_ion_client_create(const char *name); 25 | 26 | void sunxi_ion_probe_drm_info(u32 *drm_phy_addr, u32 *drm_tee_addr); 27 | 28 | int optee_probe_drm_configure( 29 | unsigned long *drm_base, 30 | size_t *drm_size, 31 | unsigned long *tee_base); 32 | 33 | #endif 34 | -------------------------------------------------------------------------------- /ion/sunxi/sunxi_ion_priv.h: -------------------------------------------------------------------------------- 1 | #ifndef _SUNXI_ION_PRIV_H 2 | #define _SUNXI_ION_PRIV_H 3 | 4 | #include "cache.h" 5 | 6 | #define ION_IOC_SUNXI_FLUSH_RANGE 5 7 | #define ION_IOC_SUNXI_FLUSH_ALL 6 8 | #define ION_IOC_SUNXI_PHYS_ADDR 7 9 | #define ION_IOC_SUNXI_DMA_COPY 8 10 | #define ION_IOC_SUNXI_DUMP 9 11 | #define ION_IOC_SUNXI_POOL_FREE 10 12 | 13 | typedef struct { 14 | long start; 15 | long end; 16 | }sunxi_cache_range; 17 | 18 | typedef struct { 19 | struct ion_handle_data handle; 20 | unsigned int phys_addr; 21 | unsigned int size; 22 | }sunxi_phys_data; 23 | 24 | struct ion_cma_buffer_info { 25 | void *cpu_addr; 26 | dma_addr_t handle; 27 | struct sg_table *table; 28 | }; 29 | 30 | #endif -------------------------------------------------------------------------------- /ion/uapi/ion.h: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers/staging/android/uapi/ion.h 3 | * 4 | * Copyright (C) 2011 Google, Inc. 5 | * 6 | * This software is licensed under the terms of the GNU General Public 7 | * License version 2, as published by the Free Software Foundation, and 8 | * may be copied, distributed, and modified under those terms. 9 | * 10 | * This program is distributed in the hope that it will be useful, 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | * GNU General Public License for more details. 14 | * 15 | */ 16 | 17 | #ifndef _UAPI_LINUX_ION_H 18 | #define _UAPI_LINUX_ION_H 19 | 20 | #include 21 | #include 22 | 23 | typedef int ion_user_handle_t; 24 | 25 | /** 26 | * enum ion_heap_types - list of all possible types of heaps 27 | * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc 28 | * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc 29 | * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved 30 | * carveout heap, allocations are physically 31 | * contiguous 32 | * @ION_HEAP_TYPE_DMA: memory allocated via DMA API 33 | * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask 34 | * is used to identify the heaps, so only 32 35 | * total heap types are supported 36 | */ 37 | enum ion_heap_type { 38 | ION_HEAP_TYPE_SYSTEM, 39 | ION_HEAP_TYPE_SYSTEM_CONTIG, 40 | ION_HEAP_TYPE_CARVEOUT, 41 | ION_HEAP_TYPE_CHUNK, 42 | ION_HEAP_TYPE_DMA, 43 | ION_HEAP_TYPE_SECURE, /* allwinner add */ 44 | ION_HEAP_TYPE_CUSTOM, /* 45 | * must be last so device specific heaps always 46 | * are at the end of this enum 47 | */ 48 | }; 49 | 50 | #define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8) 51 | 52 | /** 53 | * allocation flags - the lower 16 bits are used by core ion, the upper 16 54 | * bits are reserved for use by the heaps themselves. 55 | */ 56 | 57 | /* 58 | * mappings of this buffer should be cached, ion will do cache maintenance 59 | * when the buffer is mapped for dma 60 | */ 61 | #define ION_FLAG_CACHED 1 62 | 63 | /* 64 | * mappings of this buffer will created at mmap time, if this is set 65 | * caches must be managed manually 66 | */ 67 | #define ION_FLAG_CACHED_NEEDS_SYNC 2 68 | 69 | /** 70 | * DOC: Ion Userspace API 71 | * 72 | * create a client by opening /dev/ion 73 | * most operations handled via following ioctls 74 | * 75 | */ 76 | 77 | /** 78 | * struct ion_allocation_data - metadata passed from userspace for allocations 79 | * @len: size of the allocation 80 | * @align: required alignment of the allocation 81 | * @heap_id_mask: mask of heap ids to allocate from 82 | * @flags: flags passed to heap 83 | * @handle: pointer that will be populated with a cookie to use to 84 | * refer to this allocation 85 | * 86 | * Provided by userspace as an argument to the ioctl 87 | */ 88 | struct ion_allocation_data { 89 | size_t len; 90 | size_t align; 91 | unsigned int heap_id_mask; 92 | unsigned int flags; 93 | ion_user_handle_t handle; 94 | }; 95 | 96 | /** 97 | * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair 98 | * @handle: a handle 99 | * @fd: a file descriptor representing that handle 100 | * 101 | * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with 102 | * the handle returned from ion alloc, and the kernel returns the file 103 | * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace 104 | * provides the file descriptor and the kernel returns the handle. 105 | */ 106 | struct ion_fd_data { 107 | ion_user_handle_t handle; 108 | int fd; 109 | }; 110 | 111 | /** 112 | * struct ion_handle_data - a handle passed to/from the kernel 113 | * @handle: a handle 114 | */ 115 | struct ion_handle_data { 116 | ion_user_handle_t handle; 117 | }; 118 | 119 | /** 120 | * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl 121 | * @cmd: the custom ioctl function to call 122 | * @arg: additional data to pass to the custom ioctl, typically a user 123 | * pointer to a predefined structure 124 | * 125 | * This works just like the regular cmd and arg fields of an ioctl. 126 | */ 127 | struct ion_custom_data { 128 | unsigned int cmd; 129 | unsigned long arg; 130 | }; 131 | 132 | #define MAX_HEAP_NAME 32 133 | 134 | /** 135 | * struct ion_heap_data - data about a heap 136 | * @name - first 32 characters of the heap name 137 | * @type - heap type 138 | * @heap_id - heap id for the heap 139 | */ 140 | struct ion_heap_data { 141 | char name[MAX_HEAP_NAME]; 142 | __u32 type; 143 | __u32 heap_id; 144 | __u32 reserved0; 145 | __u32 reserved1; 146 | __u32 reserved2; 147 | }; 148 | 149 | /** 150 | * struct ion_heap_query - collection of data about all heaps 151 | * @cnt - total number of heaps to be copied 152 | * @heaps - buffer to copy heap data 153 | */ 154 | struct ion_heap_query { 155 | __u32 cnt; /* Total number of heaps to be copied */ 156 | __u32 reserved0; /* align to 64bits */ 157 | __u64 heaps; /* buffer to be populated */ 158 | __u32 reserved1; 159 | __u32 reserved2; 160 | }; 161 | 162 | #define ION_IOC_MAGIC 'I' 163 | 164 | /** 165 | * DOC: ION_IOC_ALLOC - allocate memory 166 | * 167 | * Takes an ion_allocation_data struct and returns it with the handle field 168 | * populated with the opaque handle for the allocation. 169 | */ 170 | #define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ 171 | struct ion_allocation_data) 172 | 173 | /** 174 | * DOC: ION_IOC_FREE - free memory 175 | * 176 | * Takes an ion_handle_data struct and frees the handle. 177 | */ 178 | #define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) 179 | 180 | /** 181 | * DOC: ION_IOC_MAP - get a file descriptor to mmap 182 | * 183 | * Takes an ion_fd_data struct with the handle field populated with a valid 184 | * opaque handle. Returns the struct with the fd field set to a file 185 | * descriptor open in the current address space. This file descriptor 186 | * can then be used as an argument to mmap. 187 | */ 188 | #define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) 189 | 190 | /** 191 | * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation 192 | * 193 | * Takes an ion_fd_data struct with the handle field populated with a valid 194 | * opaque handle. Returns the struct with the fd field set to a file 195 | * descriptor open in the current address space. This file descriptor 196 | * can then be passed to another process. The corresponding opaque handle can 197 | * be retrieved via ION_IOC_IMPORT. 198 | */ 199 | #define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) 200 | 201 | /** 202 | * DOC: ION_IOC_IMPORT - imports a shared file descriptor 203 | * 204 | * Takes an ion_fd_data struct with the fd field populated with a valid file 205 | * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle 206 | * filed set to the corresponding opaque handle. 207 | */ 208 | #define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) 209 | 210 | /** 211 | * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory 212 | * 213 | * Deprecated in favor of using the dma_buf api's correctly (syncing 214 | * will happen automatically when the buffer is mapped to a device). 215 | * If necessary should be used after touching a cached buffer from the cpu, 216 | * this will make the buffer in memory coherent. 217 | */ 218 | #define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data) 219 | 220 | /** 221 | * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl 222 | * 223 | * Takes the argument of the architecture specific ioctl to call and 224 | * passes appropriate userdata for that ioctl 225 | */ 226 | #define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) 227 | 228 | /** 229 | * DOC: ION_IOC_HEAP_QUERY - information about available heaps 230 | * 231 | * Takes an ion_heap_query structure and populates information about 232 | * available Ion heaps. 233 | */ 234 | #define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, \ 235 | struct ion_heap_query) 236 | 237 | #endif /* _UAPI_LINUX_ION_H */ 238 | -------------------------------------------------------------------------------- /ve/Kconfig: -------------------------------------------------------------------------------- 1 | config VIDEO_SUNXI_CEDAR_VE 2 | tristate "Allwinner CedarX VideoEngine Driver" 3 | select DMA_SHARED_BUFFER 4 | ---help--- 5 | This is the driver for sunxi video decoder, including h264/ 6 | mpeg4/mpeg2/vc1/rmvb. 7 | To compile this driver as a module, choose M here: the 8 | module will be called cedar_dev. 9 | -------------------------------------------------------------------------------- /ve/Makefile: -------------------------------------------------------------------------------- 1 | obj-$(CONFIG_VIDEO_SUNXI_CEDAR_VE) += cedar_ve.o -------------------------------------------------------------------------------- /ve/cedar_ve.c: -------------------------------------------------------------------------------- 1 | /* 2 | * drivers\media\cedar_ve 3 | * (C) Copyright 2010-2016 4 | * Reuuimlla Technology Co., Ltd. 5 | * fangning 6 | * 7 | * some simple description for this code 8 | * 9 | * This program is free software; you can redistribute it and/or 10 | * modify it under the terms of the GNU General Public License as 11 | * published by the Free Software Foundation; either version 2 of 12 | * the License, or (at your option) any later version. 13 | * 14 | */ 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | #include 44 | #include 45 | #include 46 | #include 47 | #include 48 | #include 49 | #include 50 | #include 51 | #include 52 | #include "cedar_ve_priv.h" 53 | #include "cedar_ve.h" 54 | #include 55 | #include 56 | #include 57 | #include 58 | 59 | #define DRV_VERSION "0.01alpha" 60 | 61 | struct dentry *ve_debugfs_root; 62 | struct ve_debugfs_buffer ve_debug_proc_info; 63 | 64 | int g_dev_major = CEDARDEV_MAJOR; 65 | int g_dev_minor = CEDARDEV_MINOR; 66 | 67 | /*S_IRUGO represent that g_dev_major can be read,but canot be write*/ 68 | module_param(g_dev_major, int, S_IRUGO); 69 | module_param(g_dev_minor, int, S_IRUGO); 70 | 71 | static DECLARE_WAIT_QUEUE_HEAD(wait_ve); 72 | 73 | static struct cedar_dev *cedar_devp; 74 | 75 | static int clk_status; 76 | static LIST_HEAD(run_task_list); 77 | static LIST_HEAD(del_task_list); 78 | static spinlock_t cedar_spin_lock; 79 | 80 | static irqreturn_t VideoEngineInterupt(int irq, void *data) 81 | { 82 | unsigned long ve_int_status_reg; 83 | unsigned long ve_int_ctrl_reg; 84 | unsigned int status; 85 | volatile int val; 86 | int modual_sel; 87 | unsigned int interrupt_enable; 88 | struct cedar_dev *dev = data; 89 | 90 | modual_sel = readl(cedar_devp->regs_macc + 0); 91 | if (dev->capabilities & CEDARV_ISP_OLD) { 92 | if (modual_sel & 0xa) { 93 | if ((modual_sel & 0xb) == 0xb) { 94 | /*jpg enc*/ 95 | ve_int_status_reg = 96 | (unsigned long)(cedar_devp->regs_macc + 97 | 0xb00 + 0x1c); 98 | ve_int_ctrl_reg = 99 | (unsigned long)(cedar_devp->regs_macc + 100 | 0xb00 + 0x14); 101 | interrupt_enable = 102 | readl((void *)ve_int_ctrl_reg) & (0x7); 103 | status = readl((void *)ve_int_status_reg); 104 | status &= 0xf; 105 | } else { 106 | /*isp*/ 107 | ve_int_status_reg = 108 | (unsigned long)(cedar_devp->regs_macc + 109 | 0xa00 + 0x10); 110 | ve_int_ctrl_reg = 111 | (unsigned long)(cedar_devp->regs_macc + 112 | 0xa00 + 0x08); 113 | interrupt_enable = 114 | readl((void *)ve_int_ctrl_reg) & (0x1); 115 | status = readl((void *)ve_int_status_reg); 116 | status &= 0x1; 117 | } 118 | 119 | if (status && interrupt_enable) { 120 | /*disable interrupt*/ 121 | if ((modual_sel & 0xb) == 0xb) { 122 | ve_int_ctrl_reg = 123 | (unsigned long)(cedar_devp 124 | ->regs_macc + 125 | 0xb00 + 0x14); 126 | val = readl((void *)ve_int_ctrl_reg); 127 | writel(val & (~0x7), 128 | (void *)ve_int_ctrl_reg); 129 | } else { 130 | ve_int_ctrl_reg = 131 | (unsigned long)(cedar_devp 132 | ->regs_macc + 133 | 0xa00 + 0x08); 134 | val = readl((void *)ve_int_ctrl_reg); 135 | writel(val & (~0x1), 136 | (void *)ve_int_ctrl_reg); 137 | } 138 | 139 | cedar_devp->en_irq_value = 140 | 1; /*hx modify 2011-8-1 16:08:47*/ 141 | cedar_devp->en_irq_flag = 1; 142 | /*any interrupt will wake up wait queue*/ 143 | wake_up(&wait_ve); /*ioctl*/ 144 | } 145 | } 146 | } else { 147 | if (modual_sel & (3 << 6)) { 148 | if (modual_sel & (1 << 7)) { 149 | /*avc enc*/ 150 | ve_int_status_reg = 151 | (unsigned long)(cedar_devp->regs_macc + 152 | 0xb00 + 0x1c); 153 | ve_int_ctrl_reg = 154 | (unsigned long)(cedar_devp->regs_macc + 155 | 0xb00 + 0x14); 156 | interrupt_enable = 157 | readl((void *)ve_int_ctrl_reg) & (0x7); 158 | status = readl((void *)ve_int_status_reg); 159 | status &= 0xf; 160 | } else { 161 | /*isp*/ 162 | ve_int_status_reg = 163 | (unsigned long)(cedar_devp->regs_macc + 164 | 0xa00 + 0x10); 165 | ve_int_ctrl_reg = 166 | (unsigned long)(cedar_devp->regs_macc + 167 | 0xa00 + 0x08); 168 | interrupt_enable = 169 | readl((void *)ve_int_ctrl_reg) & (0x1); 170 | status = readl((void *)ve_int_status_reg); 171 | status &= 0x1; 172 | } 173 | 174 | /*modify by fangning 2013-05-22*/ 175 | if (status && interrupt_enable) { 176 | /*disable interrupt*/ 177 | /*avc enc*/ 178 | if (modual_sel & (1 << 7)) { 179 | ve_int_ctrl_reg = 180 | (unsigned long)(cedar_devp 181 | ->regs_macc + 182 | 0xb00 + 0x14); 183 | val = readl((void *)ve_int_ctrl_reg); 184 | writel(val & (~0x7), 185 | (void *)ve_int_ctrl_reg); 186 | } else { 187 | /*isp*/ 188 | ve_int_ctrl_reg = 189 | (unsigned long)(cedar_devp 190 | ->regs_macc + 191 | 0xa00 + 0x08); 192 | val = readl((void *)ve_int_ctrl_reg); 193 | writel(val & (~0x1), 194 | (void *)ve_int_ctrl_reg); 195 | } 196 | /*hx modify 2011-8-1 16:08:47*/ 197 | cedar_devp->en_irq_value = 1; 198 | cedar_devp->en_irq_flag = 1; 199 | /*any interrupt will wake up wait queue*/ 200 | wake_up(&wait_ve); 201 | } 202 | } 203 | if (dev->capabilities & CEDARV_ISP_NEW) { 204 | if (modual_sel & (0x20)) { 205 | ve_int_status_reg = 206 | (unsigned long)(cedar_devp->regs_macc + 207 | 0xe00 + 0x1c); 208 | ve_int_ctrl_reg = 209 | (unsigned long)(cedar_devp->regs_macc + 210 | 0xe00 + 0x14); 211 | interrupt_enable = 212 | readl((void *)ve_int_ctrl_reg) & (0x38); 213 | 214 | status = readl((void *)ve_int_status_reg); 215 | 216 | if ((status & 0x7) && interrupt_enable) { 217 | /*disable interrupt*/ 218 | val = readl((void *)ve_int_ctrl_reg); 219 | writel(val & (~0x38), 220 | (void *)ve_int_ctrl_reg); 221 | 222 | cedar_devp->jpeg_irq_value = 1; 223 | cedar_devp->jpeg_irq_flag = 1; 224 | 225 | /*any interrupt will wake up wait queue*/ 226 | wake_up(&wait_ve); 227 | } 228 | } 229 | } 230 | } 231 | 232 | modual_sel &= 0xf; 233 | if (modual_sel <= 4) { 234 | /*estimate Which video format*/ 235 | switch (modual_sel) { 236 | case 0: /*mpeg124*/ 237 | ve_int_status_reg = 238 | (unsigned long)(cedar_devp->regs_macc + 0x100 + 239 | 0x1c); 240 | ve_int_ctrl_reg = 241 | (unsigned long)(cedar_devp->regs_macc + 0x100 + 242 | 0x14); 243 | interrupt_enable = 244 | readl((void *)ve_int_ctrl_reg) & (0x7c); 245 | break; 246 | case 1: /*h264*/ 247 | ve_int_status_reg = 248 | (unsigned long)(cedar_devp->regs_macc + 0x200 + 249 | 0x28); 250 | ve_int_ctrl_reg = 251 | (unsigned long)(cedar_devp->regs_macc + 0x200 + 252 | 0x20); 253 | interrupt_enable = 254 | readl((void *)ve_int_ctrl_reg) & (0xf); 255 | break; 256 | case 2: /*vc1*/ 257 | ve_int_status_reg = 258 | (unsigned long)(cedar_devp->regs_macc + 0x300 + 259 | 0x2c); 260 | ve_int_ctrl_reg = 261 | (unsigned long)(cedar_devp->regs_macc + 0x300 + 262 | 0x24); 263 | interrupt_enable = 264 | readl((void *)ve_int_ctrl_reg) & (0xf); 265 | break; 266 | case 3: /*rv*/ 267 | ve_int_status_reg = 268 | (unsigned long)(cedar_devp->regs_macc + 0x400 + 269 | 0x1c); 270 | ve_int_ctrl_reg = 271 | (unsigned long)(cedar_devp->regs_macc + 0x400 + 272 | 0x14); 273 | interrupt_enable = 274 | readl((void *)ve_int_ctrl_reg) & (0xf); 275 | break; 276 | 277 | case 4: /*hevc*/ 278 | ve_int_status_reg = 279 | (unsigned long)(cedar_devp->regs_macc + 0x500 + 280 | 0x38); 281 | ve_int_ctrl_reg = 282 | (unsigned long)(cedar_devp->regs_macc + 0x500 + 283 | 0x30); 284 | interrupt_enable = 285 | readl((void *)ve_int_ctrl_reg) & (0xf); 286 | break; 287 | 288 | default: 289 | ve_int_status_reg = 290 | (unsigned long)(cedar_devp->regs_macc + 0x100 + 291 | 0x1c); 292 | ve_int_ctrl_reg = 293 | (unsigned long)(cedar_devp->regs_macc + 0x100 + 294 | 0x14); 295 | interrupt_enable = 296 | readl((void *)ve_int_ctrl_reg) & (0xf); 297 | dev_warn(cedar_devp->platform_dev, 298 | "ve mode :%x " 299 | "not defined!\n", 300 | modual_sel); 301 | break; 302 | } 303 | 304 | status = readl((void *)ve_int_status_reg); 305 | 306 | /*modify by fangning 2013-05-22*/ 307 | if ((status & 0xf) && interrupt_enable) { 308 | /*disable interrupt*/ 309 | if (modual_sel == 0) { 310 | val = readl((void *)ve_int_ctrl_reg); 311 | writel(val & (~0x7c), (void *)ve_int_ctrl_reg); 312 | } else { 313 | val = readl((void *)ve_int_ctrl_reg); 314 | writel(val & (~0xf), (void *)ve_int_ctrl_reg); 315 | } 316 | 317 | cedar_devp->de_irq_value = 1; 318 | cedar_devp->de_irq_flag = 1; 319 | /*any interrupt will wake up wait queue*/ 320 | wake_up(&wait_ve); 321 | } 322 | } 323 | 324 | return IRQ_HANDLED; 325 | } 326 | 327 | int enable_cedar_hw_clk(void) 328 | { 329 | unsigned long flags; 330 | int res = -EFAULT; 331 | 332 | spin_lock_irqsave(&cedar_spin_lock, flags); 333 | 334 | if (clk_status == 1) 335 | goto out; 336 | 337 | clk_status = 1; 338 | 339 | reset_control_deassert(cedar_devp->rstc); 340 | if (clk_enable(cedar_devp->mod_clk)) { 341 | dev_warn(cedar_devp->platform_dev, 342 | "enable cedar_devp->mod_clk failed;\n"); 343 | goto out; 344 | } else { 345 | res = 0; 346 | } 347 | 348 | AW_MEM_INIT_LIST_HEAD(&cedar_devp->list); 349 | dev_dbg(cedar_devp->platform_dev, "%s,%d\n", __func__, __LINE__); 350 | 351 | out: 352 | spin_unlock_irqrestore(&cedar_spin_lock, flags); 353 | return res; 354 | } 355 | 356 | int disable_cedar_hw_clk(void) 357 | { 358 | unsigned long flags; 359 | struct aw_mem_list_head *pos, *q; 360 | int res = -EFAULT; 361 | 362 | spin_lock_irqsave(&cedar_spin_lock, flags); 363 | 364 | if (clk_status == 0) { 365 | res = 0; 366 | goto out; 367 | } 368 | clk_status = 0; 369 | 370 | if ((NULL == cedar_devp->mod_clk) || (IS_ERR(cedar_devp->mod_clk))) 371 | dev_warn(cedar_devp->platform_dev, 372 | "cedar_devp->mod_clk is invalid\n"); 373 | else { 374 | clk_disable(cedar_devp->mod_clk); 375 | reset_control_assert(cedar_devp->rstc); 376 | res = 0; 377 | } 378 | 379 | aw_mem_list_for_each_safe(pos, q, &cedar_devp->list) 380 | { 381 | struct cedarv_iommu_buffer *tmp; 382 | 383 | tmp = aw_mem_list_entry(pos, struct cedarv_iommu_buffer, 384 | i_list); 385 | aw_mem_list_del(pos); 386 | kfree(tmp); 387 | } 388 | dev_dbg(cedar_devp->platform_dev, "%s,%d\n", __func__, __LINE__); 389 | 390 | out: 391 | spin_unlock_irqrestore(&cedar_spin_lock, flags); 392 | return res; 393 | } 394 | 395 | void cedardev_insert_task(struct cedarv_engine_task *new_task) 396 | { 397 | struct cedarv_engine_task *task_entry; 398 | unsigned long flags; 399 | 400 | spin_lock_irqsave(&cedar_spin_lock, flags); 401 | 402 | if (list_empty(&run_task_list)) 403 | new_task->is_first_task = 1; 404 | 405 | list_for_each_entry (task_entry, &run_task_list, list) { 406 | if ((task_entry->is_first_task == 0) && 407 | (task_entry->running == 0) && 408 | (task_entry->t.task_prio < new_task->t.task_prio)) { 409 | break; 410 | } 411 | } 412 | 413 | list_add(&new_task->list, task_entry->list.prev); 414 | 415 | dev_dbg(cedar_devp->platform_dev, "%s,%d, TASK_ID:", __func__, 416 | __LINE__); 417 | list_for_each_entry (task_entry, &run_task_list, list) { 418 | dev_dbg(cedar_devp->platform_dev, "%d!", task_entry->t.ID); 419 | } 420 | dev_dbg(cedar_devp->platform_dev, "\n"); 421 | 422 | mod_timer(&cedar_devp->cedar_engine_timer, jiffies + 0); 423 | 424 | spin_unlock_irqrestore(&cedar_spin_lock, flags); 425 | } 426 | 427 | int cedardev_del_task(int task_id) 428 | { 429 | struct cedarv_engine_task *task_entry; 430 | unsigned long flags; 431 | 432 | spin_lock_irqsave(&cedar_spin_lock, flags); 433 | 434 | list_for_each_entry (task_entry, &run_task_list, list) { 435 | if (task_entry->t.ID == task_id && 436 | task_entry->status != TASK_RELEASE) { 437 | task_entry->status = TASK_RELEASE; 438 | 439 | spin_unlock_irqrestore(&cedar_spin_lock, flags); 440 | mod_timer(&cedar_devp->cedar_engine_timer, jiffies + 0); 441 | return 0; 442 | } 443 | } 444 | spin_unlock_irqrestore(&cedar_spin_lock, flags); 445 | 446 | return -1; 447 | } 448 | 449 | int cedardev_check_delay(int check_prio) 450 | { 451 | struct cedarv_engine_task *task_entry; 452 | int timeout_total = 0; 453 | unsigned long flags; 454 | 455 | spin_lock_irqsave(&cedar_spin_lock, flags); 456 | list_for_each_entry (task_entry, &run_task_list, list) { 457 | if ((task_entry->t.task_prio >= check_prio) || 458 | (task_entry->running == 1) || 459 | (task_entry->is_first_task == 1)) 460 | timeout_total = timeout_total + task_entry->t.frametime; 461 | } 462 | 463 | spin_unlock_irqrestore(&cedar_spin_lock, flags); 464 | dev_dbg(cedar_devp->platform_dev, "%s,%d,%d\n", __func__, __LINE__, 465 | timeout_total); 466 | return timeout_total; 467 | } 468 | 469 | static void cedar_engine_for_timer_rel(struct timer_list *arg) 470 | { 471 | unsigned long flags; 472 | int ret = 0; 473 | spin_lock_irqsave(&cedar_spin_lock, flags); 474 | 475 | if (list_empty(&run_task_list)) { 476 | ret = disable_cedar_hw_clk(); 477 | if (ret < 0) { 478 | dev_warn(cedar_devp->platform_dev, 479 | "clk disable error!\n"); 480 | } 481 | } else { 482 | dev_warn(cedar_devp->platform_dev, "clk disable time out " 483 | "but task left\n"); 484 | mod_timer(&cedar_devp->cedar_engine_timer, 485 | jiffies + msecs_to_jiffies(TIMER_CIRCLE)); 486 | } 487 | 488 | spin_unlock_irqrestore(&cedar_spin_lock, flags); 489 | } 490 | 491 | static void cedar_engine_for_events(struct timer_list *arg) 492 | { 493 | struct cedarv_engine_task *task_entry, *task_entry_tmp; 494 | struct kernel_siginfo info; 495 | unsigned long flags; 496 | 497 | spin_lock_irqsave(&cedar_spin_lock, flags); 498 | 499 | list_for_each_entry_safe (task_entry, task_entry_tmp, &run_task_list, 500 | list) { 501 | mod_timer(&cedar_devp->cedar_engine_timer_rel, 502 | jiffies + msecs_to_jiffies(CLK_REL_TIME)); 503 | if (task_entry->status == TASK_RELEASE || 504 | time_after(jiffies, task_entry->t.timeout)) { 505 | if (task_entry->status == TASK_INIT) 506 | task_entry->status = TASK_TIMEOUT; 507 | list_move(&task_entry->list, &del_task_list); 508 | } 509 | } 510 | 511 | list_for_each_entry_safe (task_entry, task_entry_tmp, &del_task_list, 512 | list) { 513 | info.si_signo = SIG_CEDAR; 514 | info.si_code = task_entry->t.ID; 515 | if (task_entry->status == TASK_TIMEOUT) { 516 | info.si_errno = TASK_TIMEOUT; 517 | send_sig_info(SIG_CEDAR, &info, 518 | task_entry->task_handle); 519 | } else if (task_entry->status == TASK_RELEASE) { 520 | info.si_errno = TASK_RELEASE; 521 | send_sig_info(SIG_CEDAR, &info, 522 | task_entry->task_handle); 523 | } 524 | list_del(&task_entry->list); 525 | kfree(task_entry); 526 | } 527 | 528 | if (!list_empty(&run_task_list)) { 529 | task_entry = list_entry(run_task_list.next, 530 | struct cedarv_engine_task, list); 531 | if (task_entry->running == 0) { 532 | task_entry->running = 1; 533 | info.si_signo = SIG_CEDAR; 534 | info.si_code = task_entry->t.ID; 535 | info.si_errno = TASK_INIT; 536 | send_sig_info(SIG_CEDAR, &info, 537 | task_entry->task_handle); 538 | } 539 | 540 | mod_timer(&cedar_devp->cedar_engine_timer, 541 | jiffies + msecs_to_jiffies(TIMER_CIRCLE)); 542 | } 543 | 544 | spin_unlock_irqrestore(&cedar_spin_lock, flags); 545 | } 546 | 547 | static long compat_cedardev_ioctl(struct file *filp, unsigned int cmd, 548 | unsigned long arg) 549 | { 550 | long ret = 0; 551 | int ve_timeout = 0; 552 | /*struct cedar_dev *devp;*/ 553 | unsigned long flags; 554 | struct ve_info *info; 555 | 556 | info = filp->private_data; 557 | 558 | switch (cmd) { 559 | case IOCTL_ENGINE_REQ: 560 | if (down_interruptible(&cedar_devp->sem)) 561 | return -ERESTARTSYS; 562 | cedar_devp->ref_count++; 563 | if (1 == cedar_devp->ref_count) { 564 | cedar_devp->last_min_freq = 0; 565 | enable_cedar_hw_clk(); 566 | } 567 | up(&cedar_devp->sem); 568 | break; 569 | case IOCTL_ENGINE_REL: 570 | if (down_interruptible(&cedar_devp->sem)) 571 | return -ERESTARTSYS; 572 | cedar_devp->ref_count--; 573 | if (0 == cedar_devp->ref_count) { 574 | ret = disable_cedar_hw_clk(); 575 | if (ret < 0) { 576 | dev_warn(cedar_devp->platform_dev, 577 | "IOCTL_ENGINE_REL " 578 | "clk disable error!\n"); 579 | up(&cedar_devp->sem); 580 | return -EFAULT; 581 | } 582 | } 583 | up(&cedar_devp->sem); 584 | return ret; 585 | case IOCTL_ENGINE_CHECK_DELAY: { 586 | struct cedarv_engine_task_info task_info; 587 | 588 | if (copy_from_user(&task_info, (void __user *)arg, 589 | sizeof(struct cedarv_engine_task_info))) { 590 | dev_warn(cedar_devp->platform_dev, 591 | "%d " 592 | "copy_from_user fail\n", 593 | IOCTL_ENGINE_CHECK_DELAY); 594 | return -EFAULT; 595 | } 596 | task_info.total_time = 597 | cedardev_check_delay(task_info.task_prio); 598 | dev_dbg(cedar_devp->platform_dev, "%s,%d,%d\n", __func__, 599 | __LINE__, task_info.total_time); 600 | task_info.frametime = 0; 601 | spin_lock_irqsave(&cedar_spin_lock, flags); 602 | if (!list_empty(&run_task_list)) { 603 | struct cedarv_engine_task *task_entry; 604 | dev_dbg(cedar_devp->platform_dev, "%s,%d\n", __func__, 605 | __LINE__); 606 | task_entry = 607 | list_entry(run_task_list.next, 608 | struct cedarv_engine_task, list); 609 | if (task_entry->running == 1) 610 | task_info.frametime = task_entry->t.frametime; 611 | dev_dbg(cedar_devp->platform_dev, "%s,%d,%d\n", 612 | __func__, __LINE__, task_info.frametime); 613 | } 614 | spin_unlock_irqrestore(&cedar_spin_lock, flags); 615 | 616 | if (copy_to_user((void *)arg, &task_info, 617 | sizeof(struct cedarv_engine_task_info))) { 618 | dev_warn(cedar_devp->platform_dev, 619 | "%d " 620 | "copy_to_user fail\n", 621 | IOCTL_ENGINE_CHECK_DELAY); 622 | return -EFAULT; 623 | } 624 | } break; 625 | case IOCTL_WAIT_VE_DE: 626 | ve_timeout = (int)arg; 627 | cedar_devp->de_irq_value = 0; 628 | 629 | spin_lock_irqsave(&cedar_spin_lock, flags); 630 | if (cedar_devp->de_irq_flag) 631 | cedar_devp->de_irq_value = 1; 632 | spin_unlock_irqrestore(&cedar_spin_lock, flags); 633 | wait_event_timeout(wait_ve, cedar_devp->de_irq_flag, 634 | ve_timeout * HZ); 635 | cedar_devp->de_irq_flag = 0; 636 | 637 | return cedar_devp->de_irq_value; 638 | 639 | case IOCTL_WAIT_VE_EN: 640 | 641 | ve_timeout = (int)arg; 642 | cedar_devp->en_irq_value = 0; 643 | 644 | spin_lock_irqsave(&cedar_spin_lock, flags); 645 | if (cedar_devp->en_irq_flag) 646 | cedar_devp->en_irq_value = 1; 647 | spin_unlock_irqrestore(&cedar_spin_lock, flags); 648 | 649 | wait_event_timeout(wait_ve, cedar_devp->en_irq_flag, 650 | ve_timeout * HZ); 651 | cedar_devp->en_irq_flag = 0; 652 | 653 | return cedar_devp->en_irq_value; 654 | 655 | case IOCTL_WAIT_JPEG_DEC: 656 | ve_timeout = (int)arg; 657 | cedar_devp->jpeg_irq_value = 0; 658 | 659 | spin_lock_irqsave(&cedar_spin_lock, flags); 660 | if (cedar_devp->jpeg_irq_flag) 661 | cedar_devp->jpeg_irq_value = 1; 662 | spin_unlock_irqrestore(&cedar_spin_lock, flags); 663 | 664 | wait_event_timeout(wait_ve, cedar_devp->jpeg_irq_flag, 665 | ve_timeout * HZ); 666 | cedar_devp->jpeg_irq_flag = 0; 667 | return cedar_devp->jpeg_irq_value; 668 | 669 | case IOCTL_ENABLE_VE: 670 | if (clk_prepare_enable(cedar_devp->mod_clk)) { 671 | dev_warn(cedar_devp->platform_dev, 672 | "IOCTL_ENABLE_VE " 673 | "enable cedar_devp->mod_clk failed!\n"); 674 | } 675 | break; 676 | 677 | case IOCTL_DISABLE_VE: 678 | if ((NULL == cedar_devp->mod_clk) || 679 | IS_ERR(cedar_devp->mod_clk)) { 680 | dev_warn(cedar_devp->platform_dev, 681 | "IOCTL_DISABLE_VE " 682 | "cedar_devp->mod_clk is invalid\n"); 683 | return -EFAULT; 684 | } else { 685 | clk_disable_unprepare(cedar_devp->mod_clk); 686 | } 687 | break; 688 | 689 | case IOCTL_RESET_VE: 690 | reset_control_assert(cedar_devp->rstc); 691 | reset_control_deassert(cedar_devp->rstc); 692 | break; 693 | 694 | case IOCTL_SET_DRAM_HIGH_CHANNAL: { 695 | dev_err(cedar_devp->platform_dev, 696 | "IOCTL_SET_DRAM_HIGH_CHANNAL NOT IMPL\n"); 697 | break; 698 | } 699 | 700 | case IOCTL_SET_VE_FREQ: { 701 | int arg_rate = (int)arg; 702 | 703 | if (0 == cedar_devp->last_min_freq) { 704 | cedar_devp->last_min_freq = arg_rate; 705 | } else { 706 | if (arg_rate > cedar_devp->last_min_freq) { 707 | arg_rate = cedar_devp->last_min_freq; 708 | } else { 709 | cedar_devp->last_min_freq = arg_rate; 710 | } 711 | } 712 | if (arg_rate >= VE_CLK_LOW_WATER && 713 | arg_rate <= VE_CLK_HIGH_WATER && 714 | clk_get_rate(cedar_devp->mod_clk) / 1000000 != arg_rate) { 715 | clk_get_rate(cedar_devp->ahb_clk); 716 | if (clk_set_rate(cedar_devp->mod_clk, 717 | arg_rate * 1000000)) { 718 | dev_warn(cedar_devp->platform_dev, 719 | "set ve clock failed\n"); 720 | } 721 | } 722 | ret = clk_get_rate(cedar_devp->mod_clk); 723 | break; 724 | } 725 | case IOCTL_GETVALUE_AVS2: 726 | case IOCTL_ADJUST_AVS2: 727 | case IOCTL_ADJUST_AVS2_ABS: 728 | case IOCTL_CONFIG_AVS2: 729 | case IOCTL_RESET_AVS2: 730 | case IOCTL_PAUSE_AVS2: 731 | case IOCTL_START_AVS2: 732 | dev_warn(cedar_devp->platform_dev, 733 | "do not supprot this ioctrl now\n"); 734 | break; 735 | 736 | case IOCTL_GET_ENV_INFO: { 737 | struct cedarv_env_infomation_compat env_info; 738 | 739 | env_info.phymem_start = 0; 740 | env_info.phymem_total_size = 0; 741 | env_info.address_macc = 0; 742 | if (copy_to_user((char *)arg, &env_info, 743 | sizeof(struct cedarv_env_infomation_compat))) 744 | return -EFAULT; 745 | } break; 746 | case IOCTL_GET_IC_VER: { 747 | return 0; 748 | } 749 | case IOCTL_SET_REFCOUNT: 750 | cedar_devp->ref_count = (int)arg; 751 | break; 752 | case IOCTL_SET_VOL: { 753 | break; 754 | } 755 | case IOCTL_GET_LOCK: { 756 | int lock_ctl_ret = 0; 757 | u32 lock_type = arg; 758 | struct ve_info *vi = filp->private_data; 759 | 760 | if (lock_type == VE_LOCK_VDEC) 761 | mutex_lock(&cedar_devp->lock_vdec); 762 | else if (lock_type == VE_LOCK_VENC) 763 | mutex_lock(&cedar_devp->lock_venc); 764 | else if (lock_type == VE_LOCK_JDEC) 765 | mutex_lock(&cedar_devp->lock_jdec); 766 | else if (lock_type == VE_LOCK_00_REG) 767 | mutex_lock(&cedar_devp->lock_00_reg); 768 | else if (lock_type == VE_LOCK_04_REG) 769 | mutex_lock(&cedar_devp->lock_04_reg); 770 | else 771 | dev_err(cedar_devp->platform_dev, 772 | "invalid lock type '%d'", lock_type); 773 | 774 | if ((vi->lock_flags & lock_type) != 0) 775 | dev_err(cedar_devp->platform_dev, 776 | "when get lock, this should be 0!!!"); 777 | 778 | mutex_lock(&vi->lock_flag_io); 779 | vi->lock_flags |= lock_type; 780 | mutex_unlock(&vi->lock_flag_io); 781 | 782 | return lock_ctl_ret; 783 | } 784 | case IOCTL_SET_PROC_INFO: { 785 | struct VE_PROC_INFO ve_info; 786 | unsigned char channel_id = 0; 787 | 788 | mutex_lock(&ve_debug_proc_info.lock_proc); 789 | if (copy_from_user(&ve_info, (void __user *)arg, 790 | sizeof(struct VE_PROC_INFO))) { 791 | dev_warn(cedar_devp->platform_dev, 792 | "IOCTL_SET_PROC_INFO copy_from_user fail\n"); 793 | mutex_unlock(&ve_debug_proc_info.lock_proc); 794 | return -EFAULT; 795 | } 796 | 797 | channel_id = ve_info.channel_id; 798 | if (channel_id >= VE_DEBUGFS_MAX_CHANNEL) { 799 | dev_warn( 800 | cedar_devp->platform_dev, 801 | "set channel[%c] is bigger than max channel[%d]\n", 802 | channel_id, VE_DEBUGFS_MAX_CHANNEL); 803 | mutex_unlock(&ve_debug_proc_info.lock_proc); 804 | return -EFAULT; 805 | } 806 | 807 | ve_debug_proc_info.cur_channel_id = ve_info.channel_id; 808 | ve_debug_proc_info.proc_len[channel_id] = ve_info.proc_info_len; 809 | ve_debug_proc_info.proc_buf[channel_id] = 810 | ve_debug_proc_info.data + 811 | channel_id * VE_DEBUGFS_BUF_SIZE; 812 | break; 813 | } 814 | case IOCTL_COPY_PROC_INFO: { 815 | unsigned char channel_id; 816 | 817 | channel_id = ve_debug_proc_info.cur_channel_id; 818 | if (copy_from_user(ve_debug_proc_info.proc_buf[channel_id], 819 | (void __user *)arg, 820 | ve_debug_proc_info.proc_len[channel_id])) { 821 | dev_err(cedar_devp->platform_dev, 822 | "IOCTL_COPY_PROC_INFO copy_from_user fail\n"); 823 | mutex_unlock(&ve_debug_proc_info.lock_proc); 824 | return -EFAULT; 825 | } 826 | mutex_unlock(&ve_debug_proc_info.lock_proc); 827 | break; 828 | } 829 | case IOCTL_STOP_PROC_INFO: { 830 | unsigned char channel_id; 831 | 832 | channel_id = arg; 833 | ve_debug_proc_info.proc_buf[channel_id] = NULL; 834 | 835 | break; 836 | } 837 | case IOCTL_RELEASE_LOCK: { 838 | int lock_ctl_ret = 0; 839 | do { 840 | u32 lock_type = arg; 841 | struct ve_info *vi = filp->private_data; 842 | 843 | if (!(vi->lock_flags & lock_type)) { 844 | dev_err(cedar_devp->platform_dev, 845 | "Not lock? flags: '%x/%x'.", 846 | vi->lock_flags, lock_type); 847 | lock_ctl_ret = -1; 848 | break; /* break 'do...while' */ 849 | } 850 | 851 | mutex_lock(&vi->lock_flag_io); 852 | vi->lock_flags &= (~lock_type); 853 | mutex_unlock(&vi->lock_flag_io); 854 | 855 | if (lock_type == VE_LOCK_VDEC) 856 | mutex_unlock(&cedar_devp->lock_vdec); 857 | else if (lock_type == VE_LOCK_VENC) 858 | mutex_unlock(&cedar_devp->lock_venc); 859 | else if (lock_type == VE_LOCK_JDEC) 860 | mutex_unlock(&cedar_devp->lock_jdec); 861 | else if (lock_type == VE_LOCK_00_REG) 862 | mutex_unlock(&cedar_devp->lock_00_reg); 863 | else if (lock_type == VE_LOCK_04_REG) 864 | mutex_unlock(&cedar_devp->lock_04_reg); 865 | else 866 | dev_err(cedar_devp->platform_dev, 867 | "invalid lock type '%d'", lock_type); 868 | } while (0); 869 | return lock_ctl_ret; 870 | } 871 | case IOCTL_GET_IOMMU_ADDR: { 872 | int ret, i; 873 | struct sg_table *sgt, *sgt_bak; 874 | struct scatterlist *sgl, *sgl_bak; 875 | struct user_iommu_param sUserIommuParam; 876 | struct cedarv_iommu_buffer *pVeIommuBuf = NULL; 877 | 878 | pVeIommuBuf = (struct cedarv_iommu_buffer *)kmalloc( 879 | sizeof(struct cedarv_iommu_buffer), GFP_KERNEL); 880 | if (pVeIommuBuf == NULL) { 881 | dev_err(cedar_devp->platform_dev, 882 | "IOCTL_GET_IOMMU_ADDR malloc cedarv_iommu_buffererror\n"); 883 | return -EFAULT; 884 | } 885 | if (copy_from_user(&sUserIommuParam, (void __user *)arg, 886 | sizeof(struct user_iommu_param))) { 887 | dev_err(cedar_devp->platform_dev, 888 | "IOCTL_GET_IOMMU_ADDR copy_from_user error"); 889 | return -EFAULT; 890 | } 891 | 892 | pVeIommuBuf->fd = sUserIommuParam.fd; 893 | pVeIommuBuf->dma_buf = dma_buf_get(pVeIommuBuf->fd); 894 | if (pVeIommuBuf->dma_buf < 0) { 895 | dev_err(cedar_devp->platform_dev, 896 | "ve get dma_buf error"); 897 | return -EFAULT; 898 | } 899 | 900 | pVeIommuBuf->attachment = dma_buf_attach( 901 | pVeIommuBuf->dma_buf, cedar_devp->platform_dev); 902 | if (pVeIommuBuf->attachment < 0) { 903 | dev_err(cedar_devp->platform_dev, 904 | "ve get dma_buf_attachment error"); 905 | goto RELEASE_DMA_BUF; 906 | } 907 | 908 | sgt = dma_buf_map_attachment(pVeIommuBuf->attachment, 909 | DMA_BIDIRECTIONAL); 910 | 911 | sgt_bak = kmalloc(sizeof(struct sg_table), 912 | GFP_KERNEL | __GFP_ZERO); 913 | if (sgt_bak == NULL) 914 | dev_err(cedar_devp->platform_dev, "alloc sgt fail\n"); 915 | 916 | ret = sg_alloc_table(sgt_bak, sgt->nents, GFP_KERNEL); 917 | if (ret != 0) 918 | dev_err(cedar_devp->platform_dev, "alloc sgt fail\n"); 919 | 920 | sgl_bak = sgt_bak->sgl; 921 | for_each_sg (sgt->sgl, sgl, sgt->nents, i) { 922 | sg_set_page(sgl_bak, sg_page(sgl), sgl->length, 923 | sgl->offset); 924 | sgl_bak = sg_next(sgl_bak); 925 | } 926 | 927 | pVeIommuBuf->sgt = sgt_bak; 928 | if (pVeIommuBuf->sgt < 0) { 929 | dev_err(cedar_devp->platform_dev, 930 | "ve get sg_table error\n"); 931 | goto RELEASE_DMA_BUF; 932 | } 933 | 934 | ret = dma_map_sg(cedar_devp->platform_dev, 935 | pVeIommuBuf->sgt->sgl, pVeIommuBuf->sgt->nents, 936 | DMA_BIDIRECTIONAL); 937 | if (ret != 1) { 938 | dev_err(cedar_devp->platform_dev, 939 | "ve dma_map_sg error\n"); 940 | goto RELEASE_DMA_BUF; 941 | } 942 | 943 | pVeIommuBuf->iommu_addr = sg_dma_address(pVeIommuBuf->sgt->sgl); 944 | sUserIommuParam.iommu_addr = 945 | (unsigned int)(pVeIommuBuf->iommu_addr & 0xffffffff); 946 | 947 | if (copy_to_user((void __user *)arg, &sUserIommuParam, 948 | sizeof(struct user_iommu_param))) { 949 | dev_err(cedar_devp->platform_dev, 950 | "ve get iommu copy_to_user error\n"); 951 | goto RELEASE_DMA_BUF; 952 | } 953 | 954 | pVeIommuBuf->p_id = current->tgid; 955 | dev_dbg(cedar_devp->platform_dev, 956 | "fd:%d, iommu_addr:%lx, dma_buf:%p, dma_buf_attach:%p, sg_table:%p, nents:%d, pid:%d\n", 957 | pVeIommuBuf->fd, pVeIommuBuf->iommu_addr, 958 | pVeIommuBuf->dma_buf, pVeIommuBuf->attachment, 959 | pVeIommuBuf->sgt, pVeIommuBuf->sgt->nents, 960 | pVeIommuBuf->p_id); 961 | 962 | mutex_lock(&cedar_devp->lock_mem); 963 | aw_mem_list_add_tail(&pVeIommuBuf->i_list, &cedar_devp->list); 964 | mutex_unlock(&cedar_devp->lock_mem); 965 | break; 966 | 967 | RELEASE_DMA_BUF: 968 | if (pVeIommuBuf->dma_buf > 0) { 969 | if (pVeIommuBuf->attachment > 0) { 970 | if (pVeIommuBuf->sgt > 0) { 971 | dma_unmap_sg(cedar_devp->platform_dev, 972 | pVeIommuBuf->sgt->sgl, 973 | pVeIommuBuf->sgt->nents, 974 | DMA_BIDIRECTIONAL); 975 | dma_buf_unmap_attachment( 976 | pVeIommuBuf->attachment, 977 | pVeIommuBuf->sgt, 978 | DMA_BIDIRECTIONAL); 979 | sg_free_table(pVeIommuBuf->sgt); 980 | kfree(pVeIommuBuf->sgt); 981 | } 982 | 983 | dma_buf_detach(pVeIommuBuf->dma_buf, 984 | pVeIommuBuf->attachment); 985 | } 986 | 987 | dma_buf_put(pVeIommuBuf->dma_buf); 988 | return -1; 989 | } 990 | kfree(pVeIommuBuf); 991 | break; 992 | } 993 | case IOCTL_FREE_IOMMU_ADDR: { 994 | struct user_iommu_param sUserIommuParam; 995 | struct cedarv_iommu_buffer *pVeIommuBuf; 996 | 997 | if (copy_from_user(&sUserIommuParam, (void __user *)arg, 998 | sizeof(struct user_iommu_param))) { 999 | dev_err(cedar_devp->platform_dev, 1000 | "IOCTL_FREE_IOMMU_ADDR copy_from_user error"); 1001 | return -EFAULT; 1002 | } 1003 | aw_mem_list_for_each_entry(pVeIommuBuf, &cedar_devp->list, 1004 | i_list) 1005 | { 1006 | if (pVeIommuBuf->fd == sUserIommuParam.fd && 1007 | pVeIommuBuf->p_id == current->tgid) { 1008 | dev_dbg(cedar_devp->platform_dev, 1009 | "free: fd:%d, iommu_addr:%lx, dma_buf:%p, dma_buf_attach:%p, sg_table:%p nets:%d, pid:%d\n", 1010 | pVeIommuBuf->fd, 1011 | pVeIommuBuf->iommu_addr, 1012 | pVeIommuBuf->dma_buf, 1013 | pVeIommuBuf->attachment, 1014 | pVeIommuBuf->sgt, 1015 | pVeIommuBuf->sgt->nents, 1016 | pVeIommuBuf->p_id); 1017 | 1018 | if (pVeIommuBuf->dma_buf > 0) { 1019 | if (pVeIommuBuf->attachment > 0) { 1020 | if (pVeIommuBuf->sgt > 0) { 1021 | dma_unmap_sg( 1022 | cedar_devp 1023 | ->platform_dev, 1024 | pVeIommuBuf->sgt 1025 | ->sgl, 1026 | pVeIommuBuf->sgt 1027 | ->nents, 1028 | DMA_BIDIRECTIONAL); 1029 | dma_buf_unmap_attachment( 1030 | pVeIommuBuf 1031 | ->attachment, 1032 | pVeIommuBuf->sgt, 1033 | DMA_BIDIRECTIONAL); 1034 | sg_free_table( 1035 | pVeIommuBuf 1036 | ->sgt); 1037 | kfree(pVeIommuBuf->sgt); 1038 | } 1039 | 1040 | dma_buf_detach( 1041 | pVeIommuBuf->dma_buf, 1042 | pVeIommuBuf->attachment); 1043 | } 1044 | 1045 | dma_buf_put(pVeIommuBuf->dma_buf); 1046 | } 1047 | 1048 | mutex_lock(&cedar_devp->lock_mem); 1049 | aw_mem_list_del(&pVeIommuBuf->i_list); 1050 | kfree(pVeIommuBuf); 1051 | mutex_unlock(&cedar_devp->lock_mem); 1052 | break; 1053 | } 1054 | } 1055 | break; 1056 | } 1057 | default: 1058 | return -1; 1059 | } 1060 | return ret; 1061 | } 1062 | 1063 | static int cedardev_open(struct inode *inode, struct file *filp) 1064 | { 1065 | struct ve_info *info; 1066 | 1067 | info = kmalloc(sizeof(struct ve_info), GFP_KERNEL); 1068 | if (!info) 1069 | return -ENOMEM; 1070 | 1071 | info->set_vol_flag = 0; 1072 | 1073 | filp->private_data = info; 1074 | if (down_interruptible(&cedar_devp->sem)) { 1075 | return -ERESTARTSYS; 1076 | } 1077 | 1078 | /* init other resource here */ 1079 | if (0 == cedar_devp->ref_count) { 1080 | cedar_devp->de_irq_flag = 0; 1081 | cedar_devp->en_irq_flag = 0; 1082 | cedar_devp->jpeg_irq_flag = 0; 1083 | } 1084 | 1085 | up(&cedar_devp->sem); 1086 | nonseekable_open(inode, filp); 1087 | 1088 | mutex_init(&info->lock_flag_io); 1089 | info->lock_flags = 0; 1090 | 1091 | return 0; 1092 | } 1093 | 1094 | static int cedardev_release(struct inode *inode, struct file *filp) 1095 | { 1096 | struct ve_info *info; 1097 | 1098 | info = filp->private_data; 1099 | 1100 | mutex_lock(&info->lock_flag_io); 1101 | /* lock status */ 1102 | if (info->lock_flags) { 1103 | dev_warn(cedar_devp->platform_dev, "release lost-lock..."); 1104 | if (info->lock_flags & VE_LOCK_VDEC) 1105 | mutex_unlock(&cedar_devp->lock_vdec); 1106 | 1107 | if (info->lock_flags & VE_LOCK_VENC) 1108 | mutex_unlock(&cedar_devp->lock_venc); 1109 | 1110 | if (info->lock_flags & VE_LOCK_JDEC) 1111 | mutex_unlock(&cedar_devp->lock_jdec); 1112 | 1113 | if (info->lock_flags & VE_LOCK_00_REG) 1114 | mutex_unlock(&cedar_devp->lock_00_reg); 1115 | 1116 | if (info->lock_flags & VE_LOCK_04_REG) 1117 | mutex_unlock(&cedar_devp->lock_04_reg); 1118 | 1119 | info->lock_flags = 0; 1120 | } 1121 | 1122 | mutex_unlock(&info->lock_flag_io); 1123 | mutex_destroy(&info->lock_flag_io); 1124 | 1125 | if (down_interruptible(&cedar_devp->sem)) { 1126 | return -ERESTARTSYS; 1127 | } 1128 | 1129 | /* release other resource here */ 1130 | if (0 == cedar_devp->ref_count) { 1131 | cedar_devp->de_irq_flag = 1; 1132 | cedar_devp->en_irq_flag = 1; 1133 | cedar_devp->jpeg_irq_flag = 1; 1134 | } 1135 | up(&cedar_devp->sem); 1136 | 1137 | kfree(info); 1138 | return 0; 1139 | } 1140 | 1141 | static void cedardev_vma_open(struct vm_area_struct *vma) 1142 | { 1143 | } 1144 | 1145 | static void cedardev_vma_close(struct vm_area_struct *vma) 1146 | { 1147 | } 1148 | 1149 | static struct vm_operations_struct cedardev_remap_vm_ops = { 1150 | .open = cedardev_vma_open, 1151 | .close = cedardev_vma_close, 1152 | }; 1153 | 1154 | #ifdef CONFIG_PM 1155 | static int snd_sw_cedar_suspend(struct platform_device *pdev, 1156 | pm_message_t state) 1157 | { 1158 | int ret = 0; 1159 | 1160 | printk("[cedar] standby suspend\n"); 1161 | ret = disable_cedar_hw_clk(); 1162 | 1163 | if (ret < 0) { 1164 | dev_warn(cedar_devp->platform_dev, 1165 | "cedar clk disable somewhere error!\n"); 1166 | return -EFAULT; 1167 | } 1168 | 1169 | return 0; 1170 | } 1171 | 1172 | static int snd_sw_cedar_resume(struct platform_device *pdev) 1173 | { 1174 | int ret = 0; 1175 | 1176 | printk("[cedar] standby resume\n"); 1177 | 1178 | if (cedar_devp->ref_count == 0) { 1179 | return 0; 1180 | } 1181 | 1182 | ret = enable_cedar_hw_clk(); 1183 | if (ret < 0) { 1184 | dev_warn(cedar_devp->platform_dev, 1185 | "cedar clk enable somewhere error!\n"); 1186 | return -EFAULT; 1187 | } 1188 | return 0; 1189 | } 1190 | #endif 1191 | 1192 | static int cedardev_mmap(struct file *filp, struct vm_area_struct *vma) 1193 | { 1194 | unsigned long temp_pfn; 1195 | 1196 | if (vma->vm_end - vma->vm_start == 0) { 1197 | dev_warn(cedar_devp->platform_dev, 1198 | "vma->vm_end is equal vma->vm_start : %lx\n", 1199 | vma->vm_start); 1200 | return 0; 1201 | } 1202 | if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) { 1203 | dev_err(cedar_devp->platform_dev, 1204 | "the vma->vm_pgoff is %lx,it is large than the largest page number\n", 1205 | vma->vm_pgoff); 1206 | return -EINVAL; 1207 | } 1208 | 1209 | temp_pfn = cedar_devp->phy_addr >> 12; 1210 | 1211 | /* Set reserved and I/O flag for the area. */ 1212 | vma->vm_flags |= /*VM_RESERVED | */ VM_IO; 1213 | /* Select uncached access. */ 1214 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1215 | 1216 | if (io_remap_pfn_range(vma, vma->vm_start, temp_pfn, 1217 | vma->vm_end - vma->vm_start, 1218 | vma->vm_page_prot)) { 1219 | return -EAGAIN; 1220 | } 1221 | 1222 | vma->vm_ops = &cedardev_remap_vm_ops; 1223 | cedardev_vma_open(vma); 1224 | 1225 | return 0; 1226 | } 1227 | 1228 | static const struct file_operations cedardev_fops = { 1229 | .owner = THIS_MODULE, 1230 | .mmap = cedardev_mmap, 1231 | .open = cedardev_open, 1232 | .release = cedardev_release, 1233 | .llseek = no_llseek, 1234 | .unlocked_ioctl = compat_cedardev_ioctl, 1235 | #ifdef CONFIG_COMPAT 1236 | .compat_ioctl = compat_cedardev_ioctl, 1237 | #endif 1238 | }; 1239 | 1240 | static int ve_debugfs_open(struct inode *inode, struct file *file) 1241 | { 1242 | int i = 0; 1243 | char *pData; 1244 | struct ve_debugfs_proc *pVeProc; 1245 | 1246 | pVeProc = kmalloc(sizeof(struct ve_debugfs_proc), GFP_KERNEL); 1247 | if (pVeProc == NULL) { 1248 | dev_err(cedar_devp->platform_dev, "kmalloc pVeProc fail\n"); 1249 | return -ENOMEM; 1250 | } 1251 | pVeProc->len = 0; 1252 | memset(pVeProc->data, 0, VE_DEBUGFS_BUF_SIZE * VE_DEBUGFS_MAX_CHANNEL); 1253 | 1254 | pData = pVeProc->data; 1255 | mutex_lock(&ve_debug_proc_info.lock_proc); 1256 | for (i = 0; i < VE_DEBUGFS_MAX_CHANNEL; i++) { 1257 | if (ve_debug_proc_info.proc_buf[i] != NULL) { 1258 | memcpy(pData, ve_debug_proc_info.proc_buf[i], 1259 | ve_debug_proc_info.proc_len[i]); 1260 | pData += ve_debug_proc_info.proc_len[i]; 1261 | pVeProc->len += ve_debug_proc_info.proc_len[i]; 1262 | } 1263 | } 1264 | mutex_unlock(&ve_debug_proc_info.lock_proc); 1265 | 1266 | file->private_data = pVeProc; 1267 | return 0; 1268 | } 1269 | 1270 | static ssize_t ve_debugfs_read(struct file *file, char __user *user_buf, 1271 | size_t nbytes, loff_t *ppos) 1272 | { 1273 | struct ve_debugfs_proc *pVeProc = file->private_data; 1274 | 1275 | if (pVeProc->len == 0) { 1276 | dev_dbg(cedar_devp->platform_dev, 1277 | "there is no any codec working currently\n"); 1278 | return 0; 1279 | } 1280 | 1281 | return simple_read_from_buffer(user_buf, nbytes, ppos, pVeProc->data, 1282 | pVeProc->len); 1283 | } 1284 | 1285 | static int ve_debugfs_release(struct inode *inode, struct file *file) 1286 | { 1287 | struct ve_debugfs_proc *pVeProc = file->private_data; 1288 | 1289 | kfree(pVeProc); 1290 | pVeProc = NULL; 1291 | file->private_data = NULL; 1292 | 1293 | return 0; 1294 | } 1295 | 1296 | static const struct file_operations ve_debugfs_fops = { 1297 | .owner = THIS_MODULE, 1298 | .open = ve_debugfs_open, 1299 | .llseek = no_llseek, 1300 | .read = ve_debugfs_read, 1301 | .release = ve_debugfs_release, 1302 | }; 1303 | 1304 | int sunxi_ve_debug_register_driver(void) 1305 | { 1306 | struct dentry *dent; 1307 | 1308 | ve_debugfs_root = debugfs_create_dir("mpp", 0); 1309 | 1310 | if (ve_debugfs_root == NULL) { 1311 | dev_err(cedar_devp->platform_dev, 1312 | "get debugfs_mpp_root is NULL, please check mpp\n"); 1313 | return -ENOENT; 1314 | } 1315 | 1316 | dent = debugfs_create_file("ve", 0444, ve_debugfs_root, NULL, 1317 | &ve_debugfs_fops); 1318 | if (IS_ERR_OR_NULL(dent)) { 1319 | dev_err(cedar_devp->platform_dev, 1320 | "Unable to create debugfs status file.\n"); 1321 | debugfs_remove_recursive(ve_debugfs_root); 1322 | ve_debugfs_root = NULL; 1323 | return -ENODEV; 1324 | } 1325 | 1326 | return 0; 1327 | } 1328 | 1329 | void sunxi_ve_debug_unregister_driver(void) 1330 | { 1331 | if (ve_debugfs_root == NULL) 1332 | return; 1333 | debugfs_remove_recursive(ve_debugfs_root); 1334 | ve_debugfs_root = NULL; 1335 | } 1336 | 1337 | static int cedardev_init(struct platform_device *pdev) 1338 | { 1339 | int ret = 0; 1340 | int i = 0; 1341 | int devno; 1342 | struct device_node *node; 1343 | dev_t dev; 1344 | const struct cedar_variant *variant; 1345 | struct resource *res; 1346 | 1347 | node = pdev->dev.of_node; 1348 | dev = 0; 1349 | dev_info(&pdev->dev, "sunxi cedar version " DRV_VERSION "\n"); 1350 | dev_dbg(cedar_devp->platform_dev, "install start!!!\n"); 1351 | 1352 | variant = of_device_get_match_data(&pdev->dev); 1353 | if (!variant) 1354 | return -EINVAL; 1355 | 1356 | spin_lock_init(&cedar_spin_lock); 1357 | cedar_devp = kcalloc(1, sizeof(struct cedar_dev), GFP_KERNEL); 1358 | if (cedar_devp == NULL) { 1359 | dev_warn(cedar_devp->platform_dev, 1360 | "malloc mem for cedar device err\n"); 1361 | return -ENOMEM; 1362 | } 1363 | 1364 | cedar_devp->platform_dev = &pdev->dev; 1365 | cedar_devp->capabilities = variant->capabilities; 1366 | 1367 | ret = sunxi_sram_claim(cedar_devp->platform_dev); 1368 | if (ret) { 1369 | dev_err(cedar_devp->platform_dev, "Failed to claim SRAM\n"); 1370 | goto err_mem; 1371 | } 1372 | 1373 | cedar_devp->ahb_clk = devm_clk_get(cedar_devp->platform_dev, "ahb"); 1374 | if (IS_ERR(cedar_devp->ahb_clk)) { 1375 | dev_err(cedar_devp->platform_dev, "Failed to get AHB clock\n"); 1376 | ret = PTR_ERR(cedar_devp->ahb_clk); 1377 | goto err_sram; 1378 | } 1379 | 1380 | cedar_devp->mod_clk = devm_clk_get(cedar_devp->platform_dev, "mod"); 1381 | if (IS_ERR(cedar_devp->mod_clk)) { 1382 | dev_err(cedar_devp->platform_dev, "Failed to get MOD clock\n"); 1383 | ret = PTR_ERR(cedar_devp->mod_clk); 1384 | goto err_sram; 1385 | } 1386 | 1387 | cedar_devp->ram_clk = devm_clk_get(cedar_devp->platform_dev, "ram"); 1388 | if (IS_ERR(cedar_devp->ram_clk)) { 1389 | dev_err(cedar_devp->platform_dev, "Failed to get RAM clock\n"); 1390 | ret = PTR_ERR(cedar_devp->ram_clk); 1391 | goto err_sram; 1392 | } 1393 | 1394 | cedar_devp->rstc = 1395 | devm_reset_control_get(cedar_devp->platform_dev, NULL); 1396 | if (IS_ERR(cedar_devp->rstc)) { 1397 | dev_err(cedar_devp->platform_dev, 1398 | "Failed to get reset control\n"); 1399 | ret = PTR_ERR(cedar_devp->rstc); 1400 | goto err_sram; 1401 | } 1402 | 1403 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1404 | cedar_devp->regs_macc = 1405 | devm_ioremap_resource(cedar_devp->platform_dev, res); 1406 | if (IS_ERR(cedar_devp->regs_macc)) { 1407 | dev_err(cedar_devp->platform_dev, "Failed to map registers\n"); 1408 | ret = PTR_ERR(cedar_devp->regs_macc); 1409 | goto err_sram; 1410 | } 1411 | cedar_devp->phy_addr = res->start; 1412 | 1413 | ret = clk_set_rate(cedar_devp->mod_clk, variant->mod_rate); 1414 | if (ret) { 1415 | dev_err(cedar_devp->platform_dev, "Failed to set clock rate\n"); 1416 | goto err_sram; 1417 | } 1418 | 1419 | ret = clk_prepare_enable(cedar_devp->ahb_clk); 1420 | if (ret) { 1421 | dev_err(cedar_devp->platform_dev, 1422 | "Failed to enable AHB clock\n"); 1423 | goto err_sram; 1424 | } 1425 | 1426 | ret = clk_prepare_enable(cedar_devp->mod_clk); 1427 | if (ret) { 1428 | dev_err(cedar_devp->platform_dev, 1429 | "Failed to enable MOD clock\n"); 1430 | goto err_ahb_clk; 1431 | } 1432 | 1433 | ret = clk_prepare_enable(cedar_devp->ram_clk); 1434 | if (ret) { 1435 | dev_err(cedar_devp->platform_dev, 1436 | "Failed to enable RAM clock\n"); 1437 | goto err_mod_clk; 1438 | } 1439 | 1440 | ret = reset_control_reset(cedar_devp->rstc); 1441 | if (ret) { 1442 | dev_err(cedar_devp->platform_dev, "Failed to apply reset\n"); 1443 | goto err_ram_clk; 1444 | } 1445 | 1446 | cedar_devp->irq = irq_of_parse_and_map(node, 0); 1447 | dev_info(cedar_devp->platform_dev, "cedar-ve the get irq is %d\n", 1448 | cedar_devp->irq); 1449 | if (cedar_devp->irq <= 0) 1450 | dev_err(cedar_devp->platform_dev, "Can't parse IRQ"); 1451 | 1452 | sema_init(&cedar_devp->sem, 1); 1453 | init_waitqueue_head(&cedar_devp->wq); 1454 | 1455 | ret = request_irq(cedar_devp->irq, VideoEngineInterupt, 0, "cedar_dev", 1456 | cedar_devp); 1457 | if (ret < 0) { 1458 | dev_err(cedar_devp->platform_dev, "request irq err\n"); 1459 | return -EINVAL; 1460 | } 1461 | 1462 | /*register or alloc the device number.*/ 1463 | if (g_dev_major) { 1464 | dev = MKDEV(g_dev_major, g_dev_minor); 1465 | ret = register_chrdev_region(dev, 1, "cedar_dev"); 1466 | } else { 1467 | ret = alloc_chrdev_region(&dev, g_dev_minor, 1, "cedar_dev"); 1468 | g_dev_major = MAJOR(dev); 1469 | g_dev_minor = MINOR(dev); 1470 | } 1471 | if (ret < 0) { 1472 | dev_err(cedar_devp->platform_dev, 1473 | "cedar_dev: can't get major %d\n", g_dev_major); 1474 | return ret; 1475 | } 1476 | 1477 | /* Create char device */ 1478 | devno = MKDEV(g_dev_major, g_dev_minor); 1479 | cdev_init(&cedar_devp->cdev, &cedardev_fops); 1480 | cedar_devp->cdev.owner = THIS_MODULE; 1481 | /* cedar_devp->cdev.ops = &cedardev_fops; */ 1482 | ret = cdev_add(&cedar_devp->cdev, devno, 1); 1483 | if (ret) { 1484 | dev_warn(cedar_devp->platform_dev, "Err:%d add cedardev", ret); 1485 | } 1486 | cedar_devp->class = class_create(THIS_MODULE, "cedar_dev"); 1487 | cedar_devp->dev = device_create(cedar_devp->class, NULL, devno, NULL, 1488 | "cedar_dev"); 1489 | 1490 | timer_setup(&cedar_devp->cedar_engine_timer, cedar_engine_for_events, 1491 | 0); 1492 | timer_setup(&cedar_devp->cedar_engine_timer_rel, 1493 | cedar_engine_for_timer_rel, 0); 1494 | 1495 | mutex_init(&cedar_devp->lock_vdec); 1496 | mutex_init(&cedar_devp->lock_venc); 1497 | mutex_init(&cedar_devp->lock_jdec); 1498 | mutex_init(&cedar_devp->lock_00_reg); 1499 | mutex_init(&cedar_devp->lock_04_reg); 1500 | mutex_init(&cedar_devp->lock_mem); 1501 | 1502 | ret = sunxi_ve_debug_register_driver(); 1503 | if (ret) { 1504 | dev_err(cedar_devp->platform_dev, 1505 | "sunxi ve debug register driver failed!\n"); 1506 | return ret; 1507 | } 1508 | 1509 | memset(&ve_debug_proc_info, 0, sizeof(struct ve_debugfs_buffer)); 1510 | for (i = 0; i < VE_DEBUGFS_MAX_CHANNEL; i++) { 1511 | ve_debug_proc_info.proc_buf[i] = NULL; 1512 | } 1513 | ve_debug_proc_info.data = kmalloc( 1514 | VE_DEBUGFS_BUF_SIZE * VE_DEBUGFS_MAX_CHANNEL, GFP_KERNEL); 1515 | if (!ve_debug_proc_info.data) { 1516 | dev_err(cedar_devp->platform_dev, 1517 | "kmalloc proc buffer failed!\n"); 1518 | return -ENOMEM; 1519 | } 1520 | mutex_init(&ve_debug_proc_info.lock_proc); 1521 | dev_dbg(cedar_devp->platform_dev, 1522 | "ve_debug_proc_info:%p, data:%p, lock:%p\n", 1523 | &ve_debug_proc_info, ve_debug_proc_info.data, 1524 | &ve_debug_proc_info.lock_proc); 1525 | 1526 | dev_dbg(cedar_devp->platform_dev, "install end!!!\n"); 1527 | return 0; 1528 | 1529 | err_ram_clk: 1530 | clk_disable_unprepare(cedar_devp->ram_clk); 1531 | err_mod_clk: 1532 | clk_disable_unprepare(cedar_devp->mod_clk); 1533 | err_ahb_clk: 1534 | clk_disable_unprepare(cedar_devp->ahb_clk); 1535 | err_sram: 1536 | sunxi_sram_release(cedar_devp->platform_dev); 1537 | err_mem: 1538 | kfree(cedar_devp); 1539 | return ret; 1540 | } 1541 | 1542 | static void cedardev_exit(void) 1543 | { 1544 | dev_t dev; 1545 | dev = MKDEV(g_dev_major, g_dev_minor); 1546 | 1547 | free_irq(cedar_devp->irq, cedar_devp); 1548 | 1549 | /* Destroy char device */ 1550 | if (cedar_devp) { 1551 | cdev_del(&cedar_devp->cdev); 1552 | device_destroy(cedar_devp->class, dev); 1553 | class_destroy(cedar_devp->class); 1554 | } 1555 | 1556 | reset_control_assert(cedar_devp->rstc); 1557 | clk_disable_unprepare(cedar_devp->ram_clk); 1558 | clk_disable_unprepare(cedar_devp->mod_clk); 1559 | clk_disable_unprepare(cedar_devp->ahb_clk); 1560 | sunxi_sram_release(cedar_devp->platform_dev); 1561 | 1562 | unregister_chrdev_region(dev, 1); 1563 | if (cedar_devp) { 1564 | kfree(cedar_devp); 1565 | } 1566 | 1567 | sunxi_ve_debug_unregister_driver(); 1568 | kfree(ve_debug_proc_info.data); 1569 | } 1570 | 1571 | static int sunxi_cedar_remove(struct platform_device *pdev) 1572 | { 1573 | cedardev_exit(); 1574 | return 0; 1575 | } 1576 | 1577 | static int sunxi_cedar_probe(struct platform_device *pdev) 1578 | { 1579 | cedardev_init(pdev); 1580 | return 0; 1581 | } 1582 | 1583 | static struct cedar_variant sun8i_v3_quirk = { 1584 | .capabilities = CEDARV_ISP_NEW, 1585 | .mod_rate = 402000000, 1586 | }; 1587 | 1588 | static struct cedar_variant sun8i_h3_quirk = { 1589 | .capabilities = 0, 1590 | .mod_rate = 402000000, 1591 | }; 1592 | 1593 | static struct cedar_variant suniv_f1c100s_quirk = { 1594 | .capabilities = CEDARV_ISP_OLD, 1595 | .mod_rate = 300000000, 1596 | }; 1597 | 1598 | static struct of_device_id sunxi_cedar_ve_match[] = { 1599 | { .compatible = "allwinner,sun8i-v3-cedar", .data = &sun8i_v3_quirk }, 1600 | { .compatible = "allwinner,sun8i-h3-cedar", .data = &sun8i_h3_quirk }, 1601 | { .compatible = "allwinner,suniv-f1c100s-cedar", .data = &suniv_f1c100s_quirk }, 1602 | {} 1603 | }; 1604 | MODULE_DEVICE_TABLE(of, sunxi_cedar_ve_match); 1605 | 1606 | static struct platform_driver sunxi_cedar_driver = { 1607 | .probe = sunxi_cedar_probe, 1608 | .remove = sunxi_cedar_remove, 1609 | #if defined(CONFIG_PM) 1610 | .suspend = snd_sw_cedar_suspend, 1611 | .resume = snd_sw_cedar_resume, 1612 | #endif 1613 | .driver = { 1614 | .name = "sunxi-cedar", 1615 | .owner = THIS_MODULE, 1616 | .of_match_table = sunxi_cedar_ve_match, 1617 | }, 1618 | }; 1619 | 1620 | static int __init sunxi_cedar_init(void) 1621 | { 1622 | return platform_driver_register(&sunxi_cedar_driver); 1623 | } 1624 | 1625 | static void __exit sunxi_cedar_exit(void) 1626 | { 1627 | platform_driver_unregister(&sunxi_cedar_driver); 1628 | } 1629 | 1630 | module_init(sunxi_cedar_init); 1631 | module_exit(sunxi_cedar_exit); 1632 | 1633 | MODULE_AUTHOR("Soft-Reuuimlla"); 1634 | MODULE_DESCRIPTION("User mode CEDAR device interface"); 1635 | MODULE_LICENSE("GPL"); 1636 | MODULE_VERSION(DRV_VERSION); 1637 | MODULE_ALIAS("platform:cedarx-sunxi"); 1638 | -------------------------------------------------------------------------------- /ve/cedar_ve.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Filename: cedarv_ve.h 3 | * Version: 0.01alpha 4 | * Description: Video engine driver API, Don't modify it in user space. 5 | * License: GPLv2 6 | * 7 | * Author : xyliu 8 | * Date : 2016/04/13 9 | * 10 | * This program is free software; you can redistribute it and/or modify 11 | * it under the terms of the GNU General Public License version 2 as 12 | * published by the Free Software Foundation. 13 | * 14 | * This program is distributed in the hope that it will be useful, 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | * GNU General Public License for more details. 18 | */ 19 | /* Notice: It's video engine driver API, Don't modify it in user space. */ 20 | #ifndef _CEDAR_VE_H_ 21 | #define _CEDAR_VE_H_ 22 | 23 | enum IOCTL_CMD 24 | { 25 | IOCTL_UNKOWN = 0x100, 26 | IOCTL_GET_ENV_INFO, 27 | IOCTL_WAIT_VE_DE, 28 | IOCTL_WAIT_VE_EN, 29 | IOCTL_RESET_VE, 30 | IOCTL_ENABLE_VE, 31 | IOCTL_DISABLE_VE, 32 | IOCTL_SET_VE_FREQ, 33 | 34 | IOCTL_CONFIG_AVS2 = 0x200, 35 | IOCTL_GETVALUE_AVS2, 36 | IOCTL_PAUSE_AVS2, 37 | IOCTL_START_AVS2, 38 | IOCTL_RESET_AVS2, 39 | IOCTL_ADJUST_AVS2, 40 | IOCTL_ENGINE_REQ, 41 | IOCTL_ENGINE_REL, 42 | IOCTL_ENGINE_CHECK_DELAY, 43 | IOCTL_GET_IC_VER, 44 | IOCTL_ADJUST_AVS2_ABS, 45 | IOCTL_FLUSH_CACHE, 46 | IOCTL_SET_REFCOUNT, 47 | IOCTL_FLUSH_CACHE_ALL, 48 | IOCTL_TEST_VERSION, 49 | 50 | IOCTL_GET_LOCK = 0x310, 51 | IOCTL_RELEASE_LOCK, 52 | 53 | IOCTL_SET_VOL = 0x400, 54 | 55 | IOCTL_WAIT_JPEG_DEC = 0x500, 56 | /*for get the ve ref_count for ipc to delete the semphore*/ 57 | IOCTL_GET_REFCOUNT, 58 | 59 | /*for iommu*/ 60 | IOCTL_GET_IOMMU_ADDR, 61 | IOCTL_FREE_IOMMU_ADDR, 62 | 63 | /*for debug*/ 64 | IOCTL_SET_PROC_INFO, 65 | IOCTL_STOP_PROC_INFO, 66 | IOCTL_COPY_PROC_INFO, 67 | 68 | IOCTL_SET_DRAM_HIGH_CHANNAL = 0x600, 69 | }; 70 | 71 | #define VE_LOCK_VDEC 0x01 72 | #define VE_LOCK_VENC 0x02 73 | #define VE_LOCK_JDEC 0x04 74 | #define VE_LOCK_00_REG 0x08 75 | #define VE_LOCK_04_REG 0x10 76 | #define VE_LOCK_ERR 0x80 77 | 78 | struct cedarv_env_infomation 79 | { 80 | unsigned int phymem_start; 81 | int phymem_total_size; 82 | unsigned long address_macc; 83 | }; 84 | 85 | #endif 86 | -------------------------------------------------------------------------------- /ve/cedar_ve_priv.h: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | #ifndef _CEDAR_VE_PRIV_H_ 3 | #define _CEDAR_VE_PRIV_H_ 4 | #include "ve_mem_list.h" 5 | 6 | #ifndef CEDARDEV_MAJOR 7 | #define CEDARDEV_MAJOR (150) 8 | #endif 9 | #ifndef CEDARDEV_MINOR 10 | #define CEDARDEV_MINOR (0) 11 | #endif 12 | 13 | #define VE_CLK_HIGH_WATER (900) 14 | #define VE_CLK_LOW_WATER (100) 15 | 16 | #define PRINTK_IOMMU_ADDR 0 17 | 18 | #define VE_DEBUGFS_MAX_CHANNEL 16 19 | #define VE_DEBUGFS_BUF_SIZE 1024 20 | 21 | #define CEDAR_RUN_LIST_NONULL -1 22 | #define CEDAR_NONBLOCK_TASK 0 23 | #define CEDAR_BLOCK_TASK 1 24 | #define CLK_REL_TIME 10000 25 | #define TIMER_CIRCLE 50 26 | #define TASK_INIT 0x00 27 | #define TASK_TIMEOUT 0x55 28 | #define TASK_RELEASE 0xaa 29 | #define SIG_CEDAR 35 30 | 31 | struct ve_debugfs_proc 32 | { 33 | unsigned int len; 34 | char data[VE_DEBUGFS_BUF_SIZE * VE_DEBUGFS_MAX_CHANNEL]; 35 | }; 36 | 37 | struct ve_debugfs_buffer 38 | { 39 | unsigned char cur_channel_id; 40 | unsigned int proc_len[VE_DEBUGFS_MAX_CHANNEL]; 41 | char *proc_buf[VE_DEBUGFS_MAX_CHANNEL]; 42 | char *data; 43 | struct mutex lock_proc; 44 | }; 45 | 46 | struct __cedarv_task 47 | { 48 | int task_prio; 49 | int ID; 50 | unsigned long timeout; 51 | unsigned int frametime; 52 | unsigned int block_mode; 53 | }; 54 | 55 | struct cedarv_engine_task 56 | { 57 | struct __cedarv_task t; 58 | struct list_head list; 59 | struct task_struct *task_handle; 60 | unsigned int status; 61 | unsigned int running; 62 | unsigned int is_first_task; 63 | }; 64 | 65 | struct cedarv_engine_task_info 66 | { 67 | int task_prio; 68 | unsigned int frametime; 69 | unsigned int total_time; 70 | }; 71 | 72 | struct cedarv_regop 73 | { 74 | unsigned long addr; 75 | unsigned int value; 76 | }; 77 | 78 | struct cedarv_env_infomation_compat 79 | { 80 | unsigned int phymem_start; 81 | int phymem_total_size; 82 | uint32_t address_macc; 83 | }; 84 | 85 | struct __cedarv_task_compat 86 | { 87 | int task_prio; 88 | int ID; 89 | uint32_t timeout; 90 | unsigned int frametime; 91 | unsigned int block_mode; 92 | }; 93 | 94 | struct cedarv_regop_compat 95 | { 96 | uint32_t addr; 97 | unsigned int value; 98 | }; 99 | 100 | struct VE_PROC_INFO 101 | { 102 | unsigned char channel_id; 103 | unsigned int proc_info_len; 104 | }; 105 | 106 | struct cedar_dev 107 | { 108 | struct cdev cdev; /* char device struct*/ 109 | struct device *dev; /* ptr to class device struct*/ 110 | struct device *platform_dev; /* ptr to class device struct */ 111 | struct class *class; /* class for auto create device node */ 112 | 113 | struct semaphore sem; /* mutual exclusion semaphore */ 114 | 115 | wait_queue_head_t wq; /* wait queue for poll ops */ 116 | 117 | struct timer_list cedar_engine_timer; 118 | struct timer_list cedar_engine_timer_rel; 119 | 120 | uint32_t irq; /* cedar video engine irq number */ 121 | uint32_t de_irq_flag; /* flag of video decoder engine irq generated */ 122 | uint32_t de_irq_value; /* value of video decoder engine irq */ 123 | uint32_t en_irq_flag; /* flag of video encoder engine irq generated */ 124 | uint32_t en_irq_value; /* value of video encoder engine irq */ 125 | uint32_t irq_has_enable; 126 | uint32_t ref_count; 127 | int last_min_freq; 128 | 129 | uint32_t jpeg_irq_flag; /* flag of video jpeg dec irq generated */ 130 | uint32_t jpeg_irq_value; /* value of video jpeg dec irq */ 131 | 132 | struct mutex lock_vdec; 133 | struct mutex lock_jdec; 134 | struct mutex lock_venc; 135 | struct mutex lock_00_reg; 136 | struct mutex lock_04_reg; 137 | struct aw_mem_list_head list; /* buffer list */ 138 | struct mutex lock_mem; 139 | 140 | struct clk *ahb_clk; 141 | struct clk *mod_clk; 142 | struct clk *ram_clk; 143 | struct reset_control *rstc; 144 | int capabilities; 145 | phys_addr_t phy_addr; 146 | 147 | void __iomem *regs_macc; 148 | }; 149 | 150 | struct ve_info 151 | { /* each object will bind a new file handler */ 152 | unsigned int set_vol_flag; 153 | struct mutex lock_flag_io; 154 | uint32_t lock_flags; /* if flags is 0, means unlock status */ 155 | }; 156 | 157 | struct user_iommu_param 158 | { 159 | int fd; 160 | unsigned int iommu_addr; 161 | }; 162 | 163 | struct cedarv_iommu_buffer 164 | { 165 | struct aw_mem_list_head i_list; 166 | int fd; 167 | unsigned long iommu_addr; 168 | struct dma_buf *dma_buf; 169 | struct dma_buf_attachment *attachment; 170 | struct sg_table *sgt; 171 | int p_id; 172 | }; 173 | 174 | struct cedar_variant 175 | { 176 | int capabilities; 177 | unsigned long mod_rate; 178 | }; 179 | 180 | #define CEDARV_ISP_OLD (1 << 0) 181 | #define CEDARV_ISP_NEW (1 << 1) 182 | 183 | #endif -------------------------------------------------------------------------------- /ve/ve_mem_list.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Filename: ve_mem_list.h 3 | * Version: 0.01alpha 4 | * Description: Video engine driver memory list management. 5 | * License: GPLv2 6 | * 7 | * Author : yangcaoyuan 8 | * Date : 2017/04/04 9 | * 10 | * This program is free software; you can redistribute it and/or modify 11 | * it under the terms of the GNU General Public License version 2 as 12 | * published by the Free Software Foundation. 13 | * 14 | * This program is distributed in the hope that it will be useful, 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | * GNU General Public License for more details. 18 | * 19 | * You should have received a copy of the GNU General Public License 20 | * along with this program; 21 | * 22 | */ 23 | #ifndef _VE_MEM__LIST_H 24 | #define _VE_MEM__LIST_H 25 | 26 | #define ion_offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) 27 | 28 | #define aw_container_of(aw_ptr, type, member) ({ \ 29 | const typeof(((type *)0)->member)*__mptr = (aw_ptr); \ 30 | (type *)((char *)__mptr - ion_offsetof(type, member)); }) 31 | 32 | static inline void aw_prefetch(const void *x) {(void)x; } 33 | static inline void aw_prefetchw(const void *x) {(void)x; } 34 | 35 | #define AW_LIST_LOCATION1 ((void *) 0x00100100) 36 | #define AW_LIST_LOCATION2 ((void *) 0x00200200) 37 | 38 | struct aw_mem_list_head { 39 | struct aw_mem_list_head *aw_next, *aw_prev; 40 | }; 41 | 42 | #define AW_MEM_LIST_HEAD_INIT(aw_name) { &(aw_name), &(aw_name) } 43 | 44 | #define VE_LIST_HEAD(aw_name) \ 45 | struct aw_mem_list_head aw_name = AW_MEM_LIST_HEAD_INIT(aw_name) 46 | 47 | #define AW_MEM_INIT_LIST_HEAD(aw_ptr) do { \ 48 | (aw_ptr)->aw_next = (aw_ptr); (aw_ptr)->aw_prev = (aw_ptr); \ 49 | } while (0) 50 | 51 | /* 52 | * Insert a new entry between two known consecutive entries. 53 | * 54 | * This is only for internal list manipulation where we know 55 | * the aw_prev/aw_next entries already! 56 | */ 57 | static inline void __aw_list_add(struct aw_mem_list_head *newList, 58 | struct aw_mem_list_head *aw_prev, 59 | struct aw_mem_list_head *aw_next) 60 | { 61 | aw_next->aw_prev = newList; 62 | newList->aw_next = aw_next; 63 | newList->aw_prev = aw_prev; 64 | aw_prev->aw_next = newList; 65 | } 66 | 67 | /** 68 | * list_add - add a new entry 69 | * @new: new entry to be added 70 | * @head: list head to add it after 71 | * 72 | * Insert a new entry after the specified head. 73 | * This is good for implementing stacks. 74 | */ 75 | static inline void aw_mem_list_add(struct aw_mem_list_head *newList, 76 | struct aw_mem_list_head *head) 77 | { 78 | __aw_list_add(newList, head, head->aw_next); 79 | } 80 | 81 | /** 82 | * aw_mem_list_add_tail - add a new entry 83 | * @new: new entry to be added 84 | * @head: list head to add it before 85 | * 86 | * Insert a new entry before the specified head. 87 | * This is useful for implementing queues. 88 | */ 89 | static inline void aw_mem_list_add_tail(struct aw_mem_list_head *newList, 90 | struct aw_mem_list_head *head) 91 | { 92 | __aw_list_add(newList, head->aw_prev, head); 93 | } 94 | 95 | static inline void __aw_mem_list_del(struct aw_mem_list_head *aw_prev, 96 | struct aw_mem_list_head *aw_next) 97 | { 98 | aw_next->aw_prev = aw_prev; 99 | aw_prev->aw_next = aw_next; 100 | } 101 | 102 | static inline void aw_mem_list_del(struct aw_mem_list_head *entry) 103 | { 104 | __aw_mem_list_del(entry->aw_prev, entry->aw_next); 105 | entry->aw_next = AW_LIST_LOCATION1; 106 | entry->aw_prev = AW_LIST_LOCATION2; 107 | } 108 | 109 | #define aw_mem_list_entry(aw_ptr, type, member) aw_container_of(aw_ptr, type, member) 110 | 111 | #define aw_mem_list_for_each_safe(aw_pos, aw_n, aw_head) \ 112 | for (aw_pos = (aw_head)->aw_next, aw_n = aw_pos->aw_next; aw_pos != (aw_head); \ 113 | aw_pos = aw_n, aw_n = aw_pos->aw_next) 114 | 115 | #define aw_mem_list_for_each_entry(aw_pos, aw_head, member) \ 116 | for (aw_pos = aw_mem_list_entry((aw_head)->aw_next, typeof(*aw_pos), member); \ 117 | aw_prefetch(aw_pos->member.aw_next), &aw_pos->member != (aw_head); \ 118 | aw_pos = aw_mem_list_entry(aw_pos->member.aw_next, typeof(*aw_pos), member)) 119 | 120 | #endif 121 | --------------------------------------------------------------------------------